From d7f42d54ad68b735db03210c1940a8a133aa93b5 Mon Sep 17 00:00:00 2001 From: XMRig Date: Mon, 10 Jun 2019 20:46:29 +0700 Subject: [PATCH 001/172] Added initial support for per pool algo option (mining code is broken). --- src/Summary.cpp | 6 +- src/base/kernel/config/BaseConfig.cpp | 23 +- src/base/kernel/config/BaseConfig.h | 14 +- src/base/kernel/interfaces/IConfig.h | 1 - src/base/net/stratum/Client.cpp | 33 +-- src/base/net/stratum/Job.cpp | 38 ---- src/base/net/stratum/Job.h | 10 +- src/base/net/stratum/Pool.cpp | 203 +---------------- src/base/net/stratum/Pool.h | 10 +- src/base/net/stratum/Pools.cpp | 21 +- src/base/net/stratum/Pools.h | 7 +- src/core/config/Config.cpp | 56 +++-- src/crypto/common/Algorithm.cpp | 304 +++++++------------------- src/crypto/common/Algorithm.h | 84 ++++--- src/net/Network.cpp | 3 +- src/net/strategies/DonateStrategy.cpp | 6 +- src/workers/MultiWorker.cpp | 3 +- src/workers/Workers.cpp | 2 +- 18 files changed, 187 insertions(+), 637 deletions(-) diff --git a/src/Summary.cpp b/src/Summary.cpp index 2b28f98d..d780d64f 100644 --- a/src/Summary.cpp +++ b/src/Summary.cpp @@ -90,10 +90,9 @@ static void print_threads(xmrig::Config *config) snprintf(buf, sizeof buf, ", affinity=0x%" PRIX64, config->affinity()); } - xmrig::Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") CYAN_BOLD("%d") WHITE_BOLD(", %s, av=%d, %sdonate=%d%%") WHITE_BOLD("%s"), + xmrig::Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") CYAN_BOLD("%d") WHITE_BOLD(", av=%d, %sdonate=%d%%") WHITE_BOLD("%s"), "THREADS", config->threadsCount(), - config->algorithm().shortName(), config->algoVariant(), config->pools().donateLevel() == 0 ? RED_BOLD_S : "", config->pools().donateLevel(), @@ -101,10 +100,9 @@ static void print_threads(xmrig::Config *config) ); } else { - xmrig::Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") CYAN_BOLD("%d") WHITE_BOLD(", %s, %sdonate=%d%%"), + xmrig::Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") CYAN_BOLD("%d") WHITE_BOLD(", %sdonate=%d%%"), "THREADS", config->threadsCount(), - config->algorithm().shortName(), config->pools().donateLevel() == 0 ? RED_BOLD_S : "", config->pools().donateLevel() ); diff --git a/src/base/kernel/config/BaseConfig.cpp b/src/base/kernel/config/BaseConfig.cpp index af2418aa..489849a3 100644 --- a/src/base/kernel/config/BaseConfig.cpp +++ b/src/base/kernel/config/BaseConfig.cpp @@ -60,14 +60,7 @@ #include "version.h" -xmrig::BaseConfig::BaseConfig() : - m_algorithm(CRYPTONIGHT, VARIANT_AUTO), - m_autoSave(true), - m_background(false), - m_dryRun(false), - m_syslog(false), - m_upgrade(false), - m_watch(true) +xmrig::BaseConfig::BaseConfig() { } @@ -160,19 +153,7 @@ bool xmrig::BaseConfig::read(const IJsonReader &reader, const char *fileName) m_http.load(chain.getObject("http")); # endif - m_algorithm.parseAlgorithm(reader.getString("algo", "cn")); - - m_pools.load(reader.getArray("pools")); - m_pools.setDonateLevel(reader.getInt("donate-level", kDefaultDonateLevel)); - m_pools.setProxyDonate(reader.getInt("donate-over-proxy", Pools::PROXY_DONATE_AUTO)); - m_pools.setRetries(reader.getInt("retries")); - m_pools.setRetryPause(reader.getInt("retry-pause")); - - if (!m_algorithm.isValid()) { - return false; - } - - m_pools.adjust(m_algorithm); + m_pools.load(reader); return m_pools.active() > 0; } diff --git a/src/base/kernel/config/BaseConfig.h b/src/base/kernel/config/BaseConfig.h index f0c52536..48d7c2cf 100644 --- a/src/base/kernel/config/BaseConfig.h +++ b/src/base/kernel/config/BaseConfig.h @@ -59,7 +59,6 @@ public: inline uint32_t printTime() const { return m_printTime; } inline bool isWatch() const override { return m_watch && !m_fileName.isNull(); } - inline const Algorithm &algorithm() const override { return m_algorithm; } inline const String &fileName() const override { return m_fileName; } inline void setFileName(const char *fileName) override { m_fileName = fileName; } @@ -69,13 +68,12 @@ public: void printVersions(); protected: - Algorithm m_algorithm; - bool m_autoSave; - bool m_background; - bool m_dryRun; - bool m_syslog; - bool m_upgrade; - bool m_watch; + bool m_autoSave = true; + bool m_background = false; + bool m_dryRun = false; + bool m_syslog = false; + bool m_upgrade = false; + bool m_watch = true; Http m_http; Pools m_pools; String m_apiId; diff --git a/src/base/kernel/interfaces/IConfig.h b/src/base/kernel/interfaces/IConfig.h index 3d0407e6..c8189ba5 100644 --- a/src/base/kernel/interfaces/IConfig.h +++ b/src/base/kernel/interfaces/IConfig.h @@ -144,7 +144,6 @@ public: virtual bool isWatch() const = 0; virtual bool read(const IJsonReader &reader, const char *fileName) = 0; virtual bool save() = 0; - virtual const Algorithm &algorithm() const = 0; virtual const String &fileName() const = 0; virtual void getJSON(rapidjson::Document &doc) const = 0; virtual void setFileName(const char *fileName) = 0; diff --git a/src/base/net/stratum/Client.cpp b/src/base/net/stratum/Client.cpp index 1d448ddf..05e53c78 100644 --- a/src/base/net/stratum/Client.cpp +++ b/src/base/net/stratum/Client.cpp @@ -333,17 +333,6 @@ bool xmrig::Client::parseJob(const rapidjson::Value ¶ms, int *code) job.setAlgorithm(params["algo"].GetString()); } - if (params.HasMember("variant")) { - const rapidjson::Value &variant = params["variant"]; - - if (variant.IsInt()) { - job.setVariant(variant.GetInt()); - } - else if (variant.IsString()){ - job.setVariant(variant.GetString()); - } - } - if (params.HasMember("height")) { const rapidjson::Value &variant = params["height"]; @@ -438,7 +427,7 @@ bool xmrig::Client::verifyAlgorithm(const Algorithm &algorithm) const } # endif - if (m_pool.isCompatible(algorithm)) { + if (m_pool.algorithm() == algorithm) { // FIXME return true; } @@ -590,18 +579,18 @@ void xmrig::Client::login() params.AddMember("rigid", m_pool.rigId().toJSON(), allocator); } -# ifdef XMRIG_PROXY_PROJECT - if (m_pool.algorithm().variant() != xmrig::VARIANT_AUTO) -# endif - { - Value algo(kArrayType); +//# ifdef XMRIG_PROXY_PROJECT FIXME +// if (m_pool.algorithm().variant() != xmrig::VARIANT_AUTO) +//# endif +// { +// Value algo(kArrayType); - for (const auto &a : m_pool.algorithms()) { - algo.PushBack(StringRef(a.shortName()), allocator); - } +// for (const auto &a : m_pool.algorithms()) { +// algo.PushBack(StringRef(a.shortName()), allocator); +// } - params.AddMember("algo", algo, allocator); - } +// params.AddMember("algo", algo, allocator); +// } m_listener->onLogin(this, doc, params); diff --git a/src/base/net/stratum/Job.cpp b/src/base/net/stratum/Job.cpp index 1f1cd413..293d0f46 100644 --- a/src/base/net/stratum/Job.cpp +++ b/src/base/net/stratum/Job.cpp @@ -34,10 +34,8 @@ xmrig::Job::Job() : - m_autoVariant(false), m_nicehash(false), m_poolId(-2), - m_threadId(-1), m_size(0), m_diff(0), m_height(0), @@ -49,10 +47,8 @@ xmrig::Job::Job() : xmrig::Job::Job(int poolId, bool nicehash, const Algorithm &algorithm, const String &clientId) : m_algorithm(algorithm), - m_autoVariant(algorithm.variant() == VARIANT_AUTO), m_nicehash(nicehash), m_poolId(poolId), - m_threadId(-1), m_size(0), m_clientId(clientId), m_diff(0), @@ -98,10 +94,6 @@ bool xmrig::Job::setBlob(const char *blob) m_nicehash = true; } - if (m_autoVariant) { - m_algorithm.setVariant(variant()); - } - # ifdef XMRIG_PROXY_PROJECT memset(m_rawBlob, 0, sizeof(m_rawBlob)); memcpy(m_rawBlob, blob, m_size * 2); @@ -153,16 +145,6 @@ bool xmrig::Job::setTarget(const char *target) } -void xmrig::Job::setAlgorithm(const char *algo) -{ - m_algorithm.parseAlgorithm(algo); - - if (m_algorithm.variant() == xmrig::VARIANT_AUTO) { - m_algorithm.setVariant(variant()); - } -} - - void xmrig::Job::setDiff(uint64_t diff) { m_diff = diff; @@ -173,23 +155,3 @@ void xmrig::Job::setDiff(uint64_t diff) m_rawTarget[16] = '\0'; # endif } - - -xmrig::Variant xmrig::Job::variant() const -{ - switch (m_algorithm.algo()) { - case CRYPTONIGHT: - return (m_blob[0] >= 10) ? VARIANT_4 : ((m_blob[0] >= 8) ? VARIANT_2 : VARIANT_1); - - case CRYPTONIGHT_LITE: - return VARIANT_1; - - case CRYPTONIGHT_HEAVY: - return VARIANT_0; - - default: - break; - } - - return m_algorithm.variant(); -} diff --git a/src/base/net/stratum/Job.h b/src/base/net/stratum/Job.h index 5052040a..518a337e 100644 --- a/src/base/net/stratum/Job.h +++ b/src/base/net/stratum/Job.h @@ -53,7 +53,6 @@ public: bool isEqual(const Job &other) const; bool setBlob(const char *blob); bool setTarget(const char *target); - void setAlgorithm(const char *algo); void setDiff(uint64_t diff); inline bool isNicehash() const { return m_nicehash; } @@ -65,7 +64,6 @@ public: inline const uint32_t *nonce() const { return reinterpret_cast(m_blob + 39); } inline const uint8_t *blob() const { return m_blob; } inline int poolId() const { return m_poolId; } - inline int threadId() const { return m_threadId; } inline size_t size() const { return m_size; } inline uint32_t *nonce() { return reinterpret_cast(m_blob + 39); } inline uint64_t diff() const { return m_diff; } @@ -73,12 +71,10 @@ public: inline uint64_t target() const { return m_target; } inline uint8_t fixedByte() const { return *(m_blob + 42); } inline void reset() { m_size = 0; m_diff = 0; } + inline void setAlgorithm(const char *algo) { m_algorithm = algo; } inline void setClientId(const String &id) { m_clientId = id; } inline void setHeight(uint64_t height) { m_height = height; } inline void setPoolId(int poolId) { m_poolId = poolId; } - inline void setThreadId(int threadId) { m_threadId = threadId; } - inline void setVariant(const char *variant) { m_algorithm.parseVariant(variant); } - inline void setVariant(int variant) { m_algorithm.parseVariant(variant); } # ifdef XMRIG_PROXY_PROJECT inline char *rawBlob() { return m_rawBlob; } @@ -93,13 +89,9 @@ public: inline bool operator!=(const Job &other) const { return !isEqual(other); } private: - Variant variant() const; - Algorithm m_algorithm; - bool m_autoVariant; bool m_nicehash; int m_poolId; - int m_threadId; size_t m_size; String m_clientId; String m_id; diff --git a/src/base/net/stratum/Pool.cpp b/src/base/net/stratum/Pool.cpp index f441ba63..bb3fab72 100644 --- a/src/base/net/stratum/Pool.cpp +++ b/src/base/net/stratum/Pool.cpp @@ -47,6 +47,7 @@ namespace xmrig { +static const char *kAlgo = "algo"; static const char *kDaemon = "daemon"; static const char *kDaemonPollInterval = "daemon-poll-interval"; static const char *kEnabled = "enabled"; @@ -58,7 +59,6 @@ static const char *kRigId = "rig-id"; static const char *kTls = "tls"; static const char *kUrl = "url"; static const char *kUser = "user"; -static const char *kVariant = "variant"; const String Pool::kDefaultPassword = "x"; const String Pool::kDefaultUser = "x"; @@ -119,6 +119,7 @@ xmrig::Pool::Pool(const rapidjson::Value &object) : m_rigId = Json::getString(object, kRigId); m_fingerprint = Json::getString(object, kFingerprint); m_pollInterval = Json::getUint64(object, kDaemonPollInterval, kDefaultPollInterval); + m_algorithm = Json::getString(object, kAlgo); m_flags.set(FLAG_ENABLED, Json::getBool(object, kEnabled, true)); m_flags.set(FLAG_NICEHASH, Json::getBool(object, kNicehash)); @@ -132,15 +133,6 @@ xmrig::Pool::Pool(const rapidjson::Value &object) : else if (keepalive.IsBool()) { setKeepAlive(keepalive.GetBool()); } - - const rapidjson::Value &variant = Json::getValue(object, kVariant); - if (variant.IsString()) { - algorithm().parseVariant(variant.GetString()); - } - else if (variant.IsInt()) { - algorithm().parseVariant(variant.GetInt()); - } - } @@ -166,28 +158,6 @@ xmrig::Pool::Pool(const char *host, uint16_t port, const char *user, const char } -bool xmrig::Pool::isCompatible(const Algorithm &algorithm) const -{ - if (m_algorithms.empty()) { - return true; - } - - for (const auto &a : m_algorithms) { - if (algorithm == a) { - return true; - } - } - -# ifdef XMRIG_PROXY_PROJECT - if (m_algorithm.algo() == xmrig::CRYPTONIGHT && algorithm.algo() == xmrig::CRYPTONIGHT) { - return m_algorithm.variant() == xmrig::VARIANT_RWZ || m_algorithm.variant() == xmrig::VARIANT_ZLS; - } -# endif - - return false; -} - - bool xmrig::Pool::isEnabled() const { # ifndef XMRIG_FEATURE_TLS @@ -289,6 +259,7 @@ rapidjson::Value xmrig::Pool::toJSON(rapidjson::Document &doc) const Value obj(kObjectType); + obj.AddMember(StringRef(kAlgo), StringRef(m_algorithm.shortName()), allocator); obj.AddMember(StringRef(kUrl), m_url.toJSON(), allocator); obj.AddMember(StringRef(kUser), m_user.toJSON(), allocator); obj.AddMember(StringRef(kPass), m_password.toJSON(), allocator); @@ -305,22 +276,6 @@ rapidjson::Value xmrig::Pool::toJSON(rapidjson::Document &doc) const obj.AddMember(StringRef(kKeepalive), m_keepAlive, allocator); } - switch (m_algorithm.variant()) { - case VARIANT_AUTO: - case VARIANT_0: - case VARIANT_1: - obj.AddMember(StringRef(kVariant), m_algorithm.variant(), allocator); - break; - - case VARIANT_2: - obj.AddMember(StringRef(kVariant), 2, allocator); - break; - - default: - obj.AddMember(StringRef(kVariant), StringRef(m_algorithm.variantName()), allocator); - break; - } - obj.AddMember(StringRef(kEnabled), m_flags.test(FLAG_ENABLED), allocator); obj.AddMember(StringRef(kTls), isTLS(), allocator); obj.AddMember(StringRef(kFingerprint), m_fingerprint.toJSON(), allocator); @@ -331,29 +286,6 @@ rapidjson::Value xmrig::Pool::toJSON(rapidjson::Document &doc) const } -void xmrig::Pool::adjust(const Algorithm &algorithm) -{ - if (!isValid()) { - return; - } - - if (!m_algorithm.isValid()) { - m_algorithm.setAlgo(algorithm.algo()); - adjustVariant(algorithm.variant()); - } - - rebuild(); -} - - -void xmrig::Pool::setAlgo(const xmrig::Algorithm &algorithm) -{ - m_algorithm = algorithm; - - rebuild(); -} - - #ifdef APP_DEBUG void xmrig::Pool::print() const { @@ -391,132 +323,3 @@ bool xmrig::Pool::parseIPv6(const char *addr) return true; } - - -void xmrig::Pool::addVariant(xmrig::Variant variant) -{ - const xmrig::Algorithm algorithm(m_algorithm.algo(), variant); - if (!algorithm.isValid() || m_algorithm == algorithm) { - return; - } - - m_algorithms.push_back(algorithm); -} - - -void xmrig::Pool::adjustVariant(const xmrig::Variant variantHint) -{ -# ifndef XMRIG_PROXY_PROJECT - using namespace xmrig; - - if (m_host.contains(".nicehash.com")) { - m_flags.set(FLAG_NICEHASH, true); - m_keepAlive = false; - bool valid = true; - - switch (m_port) { - case 3355: - case 33355: - valid = m_algorithm.algo() == CRYPTONIGHT && m_host.contains("cryptonight."); - m_algorithm.setVariant(VARIANT_0); - break; - - case 3363: - case 33363: - valid = m_algorithm.algo() == CRYPTONIGHT && m_host.contains("cryptonightv7."); - m_algorithm.setVariant(VARIANT_1); - break; - - case 3364: - valid = m_algorithm.algo() == CRYPTONIGHT_HEAVY && m_host.contains("cryptonightheavy."); - m_algorithm.setVariant(VARIANT_0); - break; - - case 3367: - case 33367: - valid = m_algorithm.algo() == CRYPTONIGHT && m_host.contains("cryptonightv8."); - m_algorithm.setVariant(VARIANT_2); - break; - - default: - break; - } - - if (!valid) { - m_algorithm.setAlgo(INVALID_ALGO); - } - - m_flags.set(FLAG_TLS, m_port > 33000); - return; - } - - if (m_host.contains(".minergate.com")) { - m_keepAlive = false; - bool valid = true; - m_algorithm.setVariant(VARIANT_1); - - if (m_host.contains("xmr.pool.")) { - valid = m_algorithm.algo() == CRYPTONIGHT; - m_algorithm.setVariant(m_port == 45700 ? VARIANT_AUTO : VARIANT_0); - } - else if (m_host.contains("aeon.pool.") && m_port == 45690) { - valid = m_algorithm.algo() == CRYPTONIGHT_LITE; - m_algorithm.setVariant(VARIANT_1); - } - - if (!valid) { - m_algorithm.setAlgo(INVALID_ALGO); - } - - return; - } - - if (variantHint != VARIANT_AUTO) { - m_algorithm.setVariant(variantHint); - return; - } - - if (m_algorithm.variant() != VARIANT_AUTO) { - return; - } - - if (m_algorithm.algo() == CRYPTONIGHT_HEAVY) { - m_algorithm.setVariant(VARIANT_0); - } - else if (m_algorithm.algo() == CRYPTONIGHT_LITE) { - m_algorithm.setVariant(VARIANT_1); - } -# endif -} - - -void xmrig::Pool::rebuild() -{ - m_algorithms.clear(); - - if (!m_algorithm.isValid()) { - return; - } - - m_algorithms.push_back(m_algorithm); - -# ifndef XMRIG_PROXY_PROJECT - addVariant(VARIANT_4); - addVariant(VARIANT_WOW); - addVariant(VARIANT_2); - addVariant(VARIANT_1); - addVariant(VARIANT_0); - addVariant(VARIANT_HALF); - addVariant(VARIANT_XTL); - addVariant(VARIANT_TUBE); - addVariant(VARIANT_MSR); - addVariant(VARIANT_XHV); - addVariant(VARIANT_XAO); - addVariant(VARIANT_RTO); - addVariant(VARIANT_GPU); - addVariant(VARIANT_RWZ); - addVariant(VARIANT_ZLS); - addVariant(VARIANT_DOUBLE); - addVariant(VARIANT_AUTO); -# endif -} diff --git a/src/base/net/stratum/Pool.h b/src/base/net/stratum/Pool.h index 5348271a..36c3ed1b 100644 --- a/src/base/net/stratum/Pool.h +++ b/src/base/net/stratum/Pool.h @@ -69,13 +69,11 @@ public: bool tls = false ); - inline Algorithm &algorithm() { return m_algorithm; } inline bool isDaemon() const { return m_flags.test(FLAG_DAEMON); } inline bool isNicehash() const { return m_flags.test(FLAG_NICEHASH); } inline bool isTLS() const { return m_flags.test(FLAG_TLS); } inline bool isValid() const { return !m_host.isNull() && m_port > 0; } inline const Algorithm &algorithm() const { return m_algorithm; } - inline const Algorithms &algorithms() const { return m_algorithms; } inline const String &fingerprint() const { return m_fingerprint; } inline const String &host() const { return m_host; } inline const String &password() const { return !m_password.isNull() ? m_password : kDefaultPassword; } @@ -85,6 +83,7 @@ public: inline int keepAlive() const { return m_keepAlive; } inline uint16_t port() const { return m_port; } inline uint64_t pollInterval() const { return m_pollInterval; } + inline void setAlgo(const Algorithm &algorithm) { m_algorithm = algorithm; } inline void setPassword(const String &password) { m_password = password; } inline void setRigId(const String &rigId) { m_rigId = rigId; } inline void setUser(const String &user) { m_user = user; } @@ -92,13 +91,10 @@ public: inline bool operator!=(const Pool &other) const { return !isEqual(other); } inline bool operator==(const Pool &other) const { return isEqual(other); } - bool isCompatible(const Algorithm &algorithm) const; bool isEnabled() const; bool isEqual(const Pool &other) const; bool parse(const char *url); rapidjson::Value toJSON(rapidjson::Document &doc) const; - void adjust(const Algorithm &algorithm); - void setAlgo(const Algorithm &algorithm); # ifdef APP_DEBUG void print() const; @@ -109,12 +105,8 @@ private: inline void setKeepAlive(int keepAlive) { m_keepAlive = keepAlive >= 0 ? keepAlive : 0; } bool parseIPv6(const char *addr); - void addVariant(Variant variant); - void adjustVariant(const Variant variantHint); - void rebuild(); Algorithm m_algorithm; - Algorithms m_algorithms; int m_keepAlive; std::bitset m_flags; String m_fingerprint; diff --git a/src/base/net/stratum/Pools.cpp b/src/base/net/stratum/Pools.cpp index 638ba5ea..985e5d4e 100644 --- a/src/base/net/stratum/Pools.cpp +++ b/src/base/net/stratum/Pools.cpp @@ -24,6 +24,7 @@ #include "base/io/log/Log.h" +#include "base/kernel/interfaces/IJsonReader.h" #include "base/net/stratum/Pools.h" #include "base/net/stratum/strategies/FailoverStrategy.h" #include "base/net/stratum/strategies/SinglePoolStrategy.h" @@ -103,18 +104,11 @@ size_t xmrig::Pools::active() const } -void xmrig::Pools::adjust(const Algorithm &algorithm) -{ - for (Pool &pool : m_data) { - pool.adjust(algorithm); - } -} - - -void xmrig::Pools::load(const rapidjson::Value &pools) +void xmrig::Pools::load(const IJsonReader &reader) { m_data.clear(); + const rapidjson::Value &pools = reader.getArray("pools"); if (!pools.IsArray()) { return; } @@ -129,6 +123,11 @@ void xmrig::Pools::load(const rapidjson::Value &pools) m_data.push_back(std::move(pool)); } } + + setDonateLevel(reader.getInt("donate-level", kDefaultDonateLevel)); + setProxyDonate(reader.getInt("donate-over-proxy", PROXY_DONATE_AUTO)); + setRetries(reader.getInt("retries")); + setRetryPause(reader.getInt("retry-pause")); } @@ -136,11 +135,11 @@ void xmrig::Pools::print() const { size_t i = 1; for (const Pool &pool : m_data) { - Log::print(GREEN_BOLD(" * ") WHITE_BOLD("POOL #%-7zu") CSI "1;%dm%s" CLEAR " variant " WHITE_BOLD("%s"), + Log::print(GREEN_BOLD(" * ") WHITE_BOLD("POOL #%-7zu") CSI "1;%dm%s" CLEAR " algo " WHITE_BOLD("%s"), i, (pool.isEnabled() ? (pool.isTLS() ? 32 : 36) : 31), pool.url().data(), - pool.algorithm().variantName() + pool.algorithm().shortName() ); i++; diff --git a/src/base/net/stratum/Pools.h b/src/base/net/stratum/Pools.h index 6a63f166..70e17225 100644 --- a/src/base/net/stratum/Pools.h +++ b/src/base/net/stratum/Pools.h @@ -35,6 +35,7 @@ namespace xmrig { +class IJsonReader; class IStrategy; class IStrategyListener; @@ -63,15 +64,15 @@ public: IStrategy *createStrategy(IStrategyListener *listener) const; rapidjson::Value toJSON(rapidjson::Document &doc) const; size_t active() const; - void adjust(const Algorithm &algorithm); - void load(const rapidjson::Value &pools); + void load(const IJsonReader &reader); void print() const; + +private: void setDonateLevel(int level); void setProxyDonate(int value); void setRetries(int retries); void setRetryPause(int retryPause); -private: int m_donateLevel; int m_retries; int m_retryPause; diff --git a/src/core/config/Config.cpp b/src/core/config/Config.cpp index c1430e4d..fbb12ffa 100644 --- a/src/core/config/Config.cpp +++ b/src/core/config/Config.cpp @@ -87,8 +87,6 @@ void xmrig::Config::getJSON(rapidjson::Document &doc) const auto &allocator = doc.GetAllocator(); - doc.AddMember("algo", StringRef(algorithm().name()), allocator); - Value api(kObjectType); api.AddMember("id", m_apiId.toJSON(), allocator); api.AddMember("worker-id", m_apiWorkerId.toJSON(), allocator); @@ -146,37 +144,37 @@ void xmrig::Config::getJSON(rapidjson::Document &doc) const bool xmrig::Config::finalize() { - if (!m_threads.cpu.empty()) { - m_threads.mode = Advanced; - const bool softAES = (m_aesMode == AES_AUTO ? (Cpu::info()->hasAES() ? AES_HW : AES_SOFT) : m_aesMode) == AES_SOFT; +// if (!m_threads.cpu.empty()) { // FIXME +// m_threads.mode = Advanced; +// const bool softAES = (m_aesMode == AES_AUTO ? (Cpu::info()->hasAES() ? AES_HW : AES_SOFT) : m_aesMode) == AES_SOFT; - for (size_t i = 0; i < m_threads.cpu.size(); ++i) { - m_threads.list.push_back(CpuThread::createFromData(i, m_algorithm.algo(), m_threads.cpu[i], m_priority, softAES)); - } +// for (size_t i = 0; i < m_threads.cpu.size(); ++i) { +//// m_threads.list.push_back(CpuThread::createFromData(i, m_algorithm.algo(), m_threads.cpu[i], m_priority, softAES)); +// } - return true; - } +// return true; +// } - const AlgoVariant av = getAlgoVariant(); - m_threads.mode = m_threads.count ? Simple : Automatic; +// const AlgoVariant av = getAlgoVariant(); +// m_threads.mode = m_threads.count ? Simple : Automatic; - const size_t size = CpuThread::multiway(av) * cn_select_memory(m_algorithm.algo()) / 1024; +//// const size_t size = CpuThread::multiway(av) * cn_select_memory(m_algorithm.algo()) / 1024; - if (!m_threads.count) { - m_threads.count = Cpu::info()->optimalThreadsCount(size, m_maxCpuUsage); - } - else if (m_safe) { - const size_t count = Cpu::info()->optimalThreadsCount(size, m_maxCpuUsage); - if (m_threads.count > count) { - m_threads.count = count; - } - } +// if (!m_threads.count) { +// m_threads.count = Cpu::info()->optimalThreadsCount(size, m_maxCpuUsage); +// } +// else if (m_safe) { +// const size_t count = Cpu::info()->optimalThreadsCount(size, m_maxCpuUsage); +// if (m_threads.count > count) { +// m_threads.count = count; +// } +// } - for (size_t i = 0; i < m_threads.count; ++i) { - m_threads.list.push_back(CpuThread::createFromAV(i, m_algorithm.algo(), av, m_threads.mask, m_priority, m_assembly)); - } +// for (size_t i = 0; i < m_threads.count; ++i) { +// m_threads.list.push_back(CpuThread::createFromAV(i, m_algorithm.algo(), av, m_threads.mask, m_priority, m_assembly)); +// } - m_shouldSave = m_threads.mode == Automatic; +// m_shouldSave = m_threads.mode == Automatic; return true; } @@ -245,9 +243,9 @@ void xmrig::Config::setThreads(const rapidjson::Value &threads) xmrig::AlgoVariant xmrig::Config::getAlgoVariant() const { # ifdef XMRIG_ALGO_CN_LITE - if (m_algorithm.algo() == xmrig::CRYPTONIGHT_LITE) { - return getAlgoVariantLite(); - } +// if (m_algorithm.algo() == xmrig::CRYPTONIGHT_LITE) { // FIXME +// return getAlgoVariantLite(); +// } # endif if (m_algoVariant <= AV_AUTO || m_algoVariant >= AV_MAX) { diff --git a/src/crypto/common/Algorithm.cpp b/src/crypto/common/Algorithm.cpp index 29ca9ecf..f85b0a6f 100644 --- a/src/crypto/common/Algorithm.cpp +++ b/src/crypto/common/Algorithm.cpp @@ -44,254 +44,96 @@ #endif -struct AlgoData +namespace xmrig { + + +struct AlgoName { const char *name; const char *shortName; - xmrig::Algo algo; - xmrig::Variant variant; + const Algorithm::Id id; }; -static AlgoData const algorithms[] = { - { "cryptonight", "cn", xmrig::CRYPTONIGHT, xmrig::VARIANT_AUTO }, - { "cryptonight/0", "cn/0", xmrig::CRYPTONIGHT, xmrig::VARIANT_0 }, - { "cryptonight/1", "cn/1", xmrig::CRYPTONIGHT, xmrig::VARIANT_1 }, - { "cryptonight/xtl", "cn/xtl", xmrig::CRYPTONIGHT, xmrig::VARIANT_XTL }, - { "cryptonight/msr", "cn/msr", xmrig::CRYPTONIGHT, xmrig::VARIANT_MSR }, - { "cryptonight/xao", "cn/xao", xmrig::CRYPTONIGHT, xmrig::VARIANT_XAO }, - { "cryptonight/rto", "cn/rto", xmrig::CRYPTONIGHT, xmrig::VARIANT_RTO }, - { "cryptonight/2", "cn/2", xmrig::CRYPTONIGHT, xmrig::VARIANT_2 }, - { "cryptonight/half", "cn/half", xmrig::CRYPTONIGHT, xmrig::VARIANT_HALF }, - { "cryptonight/xtlv9", "cn/xtlv9", xmrig::CRYPTONIGHT, xmrig::VARIANT_HALF }, - { "cryptonight/wow", "cn/wow", xmrig::CRYPTONIGHT, xmrig::VARIANT_WOW }, - { "cryptonight/r", "cn/r", xmrig::CRYPTONIGHT, xmrig::VARIANT_4 }, - { "cryptonight/rwz", "cn/rwz", xmrig::CRYPTONIGHT, xmrig::VARIANT_RWZ }, - { "cryptonight/zls", "cn/zls", xmrig::CRYPTONIGHT, xmrig::VARIANT_ZLS }, - { "cryptonight/double", "cn/double", xmrig::CRYPTONIGHT, xmrig::VARIANT_DOUBLE }, - -# ifdef XMRIG_ALGO_CN_LITE - { "cryptonight-lite", "cn-lite", xmrig::CRYPTONIGHT_LITE, xmrig::VARIANT_AUTO }, - { "cryptonight-light", "cn-light", xmrig::CRYPTONIGHT_LITE, xmrig::VARIANT_AUTO }, - { "cryptonight-lite/0", "cn-lite/0", xmrig::CRYPTONIGHT_LITE, xmrig::VARIANT_0 }, - { "cryptonight-lite/1", "cn-lite/1", xmrig::CRYPTONIGHT_LITE, xmrig::VARIANT_1 }, -# endif - -# ifdef XMRIG_ALGO_CN_HEAVY - { "cryptonight-heavy", "cn-heavy", xmrig::CRYPTONIGHT_HEAVY, xmrig::VARIANT_AUTO }, - { "cryptonight-heavy/0", "cn-heavy/0", xmrig::CRYPTONIGHT_HEAVY, xmrig::VARIANT_0 }, - { "cryptonight-heavy/xhv", "cn-heavy/xhv", xmrig::CRYPTONIGHT_HEAVY, xmrig::VARIANT_XHV }, - { "cryptonight-heavy/tube", "cn-heavy/tube", xmrig::CRYPTONIGHT_HEAVY, xmrig::VARIANT_TUBE }, -# endif - -# ifdef XMRIG_ALGO_CN_PICO - { "cryptonight-pico/trtl", "cn-pico/trtl", xmrig::CRYPTONIGHT_PICO, xmrig::VARIANT_TRTL }, - { "cryptonight-pico", "cn-pico", xmrig::CRYPTONIGHT_PICO, xmrig::VARIANT_TRTL }, - { "cryptonight-turtle", "cn-trtl", xmrig::CRYPTONIGHT_PICO, xmrig::VARIANT_TRTL }, - { "cryptonight-ultralite", "cn-ultralite", xmrig::CRYPTONIGHT_PICO, xmrig::VARIANT_TRTL }, - { "cryptonight_turtle", "cn_turtle", xmrig::CRYPTONIGHT_PICO, xmrig::VARIANT_TRTL }, -# endif - +static AlgoName const algorithm_names[] = { + { "cryptonight/0", "cn/0", Algorithm::CN_0 }, + { "cryptonight", "cn", Algorithm::CN_0 }, + { "cryptonight/1", "cn/1", Algorithm::CN_1 }, + { "cryptonight-monerov7", nullptr, Algorithm::CN_1 }, + { "cryptonight_v7", nullptr, Algorithm::CN_1 }, + { "cryptonight/2", "cn/2", Algorithm::CN_2 }, + { "cryptonight-monerov8", nullptr, Algorithm::CN_2 }, + { "cryptonight_v8", nullptr, Algorithm::CN_2 }, + { "cryptonight/r", "cn/r", Algorithm::CN_R }, + { "cryptonight_r", nullptr, Algorithm::CN_R }, + { "cryptonight/wow", "cn/wow", Algorithm::CN_WOW }, + { "cryptonight/fast", "cn/fast", Algorithm::CN_FAST }, + { "cryptonight/msr", "cn/msr", Algorithm::CN_FAST }, + { "cryptonight/half", "cn/half", Algorithm::CN_HALF }, + { "cryptonight/xao", "cn/xao", Algorithm::CN_XAO }, + { "cryptonight_alloy", nullptr, Algorithm::CN_XAO }, + { "cryptonight/rto", "cn/rto", Algorithm::CN_RTO }, + { "cryptonight/rwz", "cn/rwz", Algorithm::CN_RWZ }, + { "cryptonight/zls", "cn/zls", Algorithm::CN_ZLS }, + { "cryptonight/double", "cn/double", Algorithm::CN_ZLS }, # ifdef XMRIG_ALGO_CN_GPU - { "cryptonight/gpu", "cn/gpu", xmrig::CRYPTONIGHT, xmrig::VARIANT_GPU }, + { "cryptonight/gpu", "cn/gpu", Algorithm::CN_GPU }, + { "cryptonight_gpu", nullptr, Algorithm::CN_GPU }, +# endif +# ifdef XMRIG_ALGO_CN_LITE + { "cryptonight-lite/0", "cn-lite/0", Algorithm::CN_LITE_0 }, + { "cryptonight-lite/1", "cn-lite/1", Algorithm::CN_LITE_1 }, + { "cryptonight-lite", "cn-lite", Algorithm::CN_LITE_1 }, + { "cryptonight-light", "cn-light", Algorithm::CN_LITE_1 }, + { "cryptonight_lite", nullptr, Algorithm::CN_LITE_1 }, + { "cryptonight-aeonv7", nullptr, Algorithm::CN_LITE_1 }, + { "cryptonight_lite_v7", nullptr, Algorithm::CN_LITE_1 }, +# endif +# ifdef XMRIG_ALGO_CN_HEAVY + { "cryptonight-heavy/0", "cn-heavy/0", Algorithm::CN_HEAVY_0 }, + { "cryptonight-heavy", "cn-heavy", Algorithm::CN_HEAVY_0 }, + { "cryptonight_heavy", nullptr, Algorithm::CN_HEAVY_0 }, + { "cryptonight-heavy/xhv", "cn-heavy/xhv", Algorithm::CN_HEAVY_XHV }, + { "cryptonight_haven", nullptr, Algorithm::CN_HEAVY_XHV }, + { "cryptonight-heavy/tube", "cn-heavy/tube", Algorithm::CN_HEAVY_TUBE }, + { "cryptonight-bittube2", nullptr, Algorithm::CN_HEAVY_TUBE }, +# endif +# ifdef XMRIG_ALGO_CN_PICO + { "cryptonight-pico", "cn-pico", Algorithm::CN_PICO }, + { "cryptonight-pico/trtl", "cn-pico/trtl", Algorithm::CN_PICO }, + { "cryptonight-turtle", "cn-trtl", Algorithm::CN_PICO }, + { "cryptonight-ultralite", "cn-ultralite", Algorithm::CN_PICO }, + { "cryptonight_turtle", "cn_turtle", Algorithm::CN_PICO }, # endif }; -#ifdef XMRIG_PROXY_PROJECT -static AlgoData const xmrStakAlgorithms[] = { - { "cryptonight-monerov7", nullptr, xmrig::CRYPTONIGHT, xmrig::VARIANT_1 }, - { "cryptonight_v7", nullptr, xmrig::CRYPTONIGHT, xmrig::VARIANT_1 }, - { "cryptonight-monerov8", nullptr, xmrig::CRYPTONIGHT, xmrig::VARIANT_2 }, - { "cryptonight_v8", nullptr, xmrig::CRYPTONIGHT, xmrig::VARIANT_2 }, - { "cryptonight_v7_stellite", nullptr, xmrig::CRYPTONIGHT, xmrig::VARIANT_XTL }, - { "cryptonight_lite", nullptr, xmrig::CRYPTONIGHT_LITE, xmrig::VARIANT_0 }, - { "cryptonight-aeonv7", nullptr, xmrig::CRYPTONIGHT_LITE, xmrig::VARIANT_1 }, - { "cryptonight_lite_v7", nullptr, xmrig::CRYPTONIGHT_LITE, xmrig::VARIANT_1 }, - { "cryptonight_heavy", nullptr, xmrig::CRYPTONIGHT_HEAVY, xmrig::VARIANT_0 }, - { "cryptonight_haven", nullptr, xmrig::CRYPTONIGHT_HEAVY, xmrig::VARIANT_XHV }, - { "cryptonight_masari", nullptr, xmrig::CRYPTONIGHT, xmrig::VARIANT_MSR }, - { "cryptonight_masari", nullptr, xmrig::CRYPTONIGHT, xmrig::VARIANT_MSR }, - { "cryptonight-bittube2", nullptr, xmrig::CRYPTONIGHT_HEAVY, xmrig::VARIANT_TUBE }, // bittube-miner - { "cryptonight_alloy", nullptr, xmrig::CRYPTONIGHT, xmrig::VARIANT_XAO }, // xmr-stak-alloy - { "cryptonight_turtle", nullptr, xmrig::CRYPTONIGHT_PICO, xmrig::VARIANT_TRTL }, - { "cryptonight_gpu", nullptr, xmrig::CRYPTONIGHT, xmrig::VARIANT_GPU }, - { "cryptonight_r", nullptr, xmrig::CRYPTONIGHT, xmrig::VARIANT_4 }, -}; -#endif - - -static const char *variants[] = { - "0", - "1", - "tube", - "xtl", - "msr", - "xhv", - "xao", - "rto", - "2", - "half", - "trtl", - "gpu", - "wow", - "r", - "rwz", - "zls", - "double" -}; - - -static_assert(xmrig::VARIANT_MAX == ARRAY_SIZE(variants), "variants size mismatch"); - - -bool xmrig::Algorithm::isValid() const -{ - if (m_algo == INVALID_ALGO) { - return false; - } - - for (size_t i = 0; i < ARRAY_SIZE(algorithms); i++) { - if (algorithms[i].algo == m_algo && algorithms[i].variant == m_variant) { - return true; - } - } - - return false; -} - - -const char *xmrig::Algorithm::variantName() const -{ - if (m_variant == VARIANT_AUTO) { - return "auto"; - } - - return variants[m_variant]; -} - - -void xmrig::Algorithm::parseAlgorithm(const char *algo) -{ - m_algo = INVALID_ALGO; - m_variant = VARIANT_AUTO; - -// assert(algo != nullptr); - if (algo == nullptr || strlen(algo) < 1) { - return; - } - - if (*algo == '!') { - m_flags |= Forced; - - return parseAlgorithm(algo + 1); - } - - for (size_t i = 0; i < ARRAY_SIZE(algorithms); i++) { - if ((strcasecmp(algo, algorithms[i].name) == 0) || (strcasecmp(algo, algorithms[i].shortName) == 0)) { - m_algo = algorithms[i].algo; - m_variant = algorithms[i].variant; - break; - } - } - - if (m_algo == INVALID_ALGO) { - assert(false); - } -} - - -void xmrig::Algorithm::parseVariant(const char *variant) -{ - m_variant = VARIANT_AUTO; - - if (variant == nullptr || strlen(variant) < 1) { - return; - } - - if (*variant == '!') { - m_flags |= Forced; - - return parseVariant(variant + 1); - } - - for (size_t i = 0; i < ARRAY_SIZE(variants); i++) { - if (strcasecmp(variant, variants[i]) == 0) { - m_variant = static_cast(i); - return; - } - } - - if (strcasecmp(variant, "xtlv9") == 0) { - m_variant = VARIANT_HALF; - } -} - - -void xmrig::Algorithm::parseVariant(int variant) -{ - assert(variant >= -1 && variant <= 2); - - switch (variant) { - case -1: - case 0: - case 1: - m_variant = static_cast(variant); - break; - - case 2: - m_variant = VARIANT_2; - break; - - default: - break; - } -} - - -void xmrig::Algorithm::setAlgo(Algo algo) -{ - m_algo = algo; - - if (m_algo == CRYPTONIGHT_PICO && m_variant == VARIANT_AUTO) { - m_variant = xmrig::VARIANT_TRTL; - } -} - - -#ifdef XMRIG_PROXY_PROJECT -void xmrig::Algorithm::parseXmrStakAlgorithm(const char *algo) -{ - m_algo = INVALID_ALGO; - m_variant = VARIANT_AUTO; - - assert(algo != nullptr); - if (algo == nullptr) { - return; - } - - for (size_t i = 0; i < ARRAY_SIZE(xmrStakAlgorithms); i++) { - if (strcasecmp(algo, xmrStakAlgorithms[i].name) == 0) { - m_algo = xmrStakAlgorithms[i].algo; - m_variant = xmrStakAlgorithms[i].variant; - break; - } - } - - if (m_algo == INVALID_ALGO) { - assert(false); - } -} -#endif +} /* namespace xmrig */ const char *xmrig::Algorithm::name(bool shortName) const { - for (size_t i = 0; i < ARRAY_SIZE(algorithms); i++) { - if (algorithms[i].algo == m_algo && algorithms[i].variant == m_variant) { - return shortName ? algorithms[i].shortName : algorithms[i].name; + for (size_t i = 0; i < ARRAY_SIZE(algorithm_names); i++) { + if (algorithm_names[i].id == m_id) { + return shortName ? algorithm_names[i].shortName : algorithm_names[i].name; } } return "invalid"; } + + +xmrig::Algorithm::Id xmrig::Algorithm::parse(const char *name) +{ + if (name == nullptr || strlen(name) < 1) { + return INVALID; + } + + for (size_t i = 0; i < ARRAY_SIZE(algorithm_names); i++) { + if ((strcasecmp(name, algorithm_names[i].name) == 0) || (algorithm_names[i].shortName != nullptr && strcasecmp(name, algorithm_names[i].shortName) == 0)) { + return algorithm_names[i].id; + } + } + + return INVALID; +} diff --git a/src/crypto/common/Algorithm.h b/src/crypto/common/Algorithm.h index 664552aa..c70e0caa 100644 --- a/src/crypto/common/Algorithm.h +++ b/src/crypto/common/Algorithm.h @@ -30,68 +30,63 @@ #include -#include "common/xmrig.h" - - namespace xmrig { class Algorithm { public: - enum Flags { - None = 0, - Forced = 1 + enum Id : int { + INVALID = -1, + CN_0, // "cn/0" Original CryptoNight + CN_1, // "cn/1" CryptoNight variant 1 also known as Monero7 and CryptoNightV7 + CN_2, // "cn/2" CryptoNight variant 2 + CN_R, // "cn/r" CryptoNightR (Monero's variant 4) + CN_WOW, // "cn/wow" CryptoNightR (Wownero) + CN_FAST, // "cn/fast" CryptoNight variant 1 with half iterations + CN_HALF, // "cn/half" CryptoNight variant 2 with half iterations (Masari/Stellite) + CN_XAO, // "cn/xao" Modified CryptoNight variant 0 (Alloy only) + CN_RTO, // "cn/rto" Modified CryptoNight variant 1 (Arto only) + CN_RWZ, // "cn/rwz" CryptoNight variant 2 with 3/4 iterations and reversed shuffle operation (Graft) + CN_ZLS, // "cn/zls" CryptoNight variant 2 with 3/4 iterations (Zelerius) + CN_DOUBLE, // "cn/double" CryptoNight variant 2 with double iterations (X-CASH) +# ifdef XMRIG_ALGO_CN_GPU + CN_GPU, // "cn/gpu" CryptoNight-GPU (Ryo) +# endif +# ifdef XMRIG_ALGO_CN_LITE + CN_LITE_0, // "cn-lite/0" CryptoNight-Lite (1 MB) variant 0 + CN_LITE_1, // "cn-lite/1" CryptoNight-Lite (1 MB) variant 1 +# endif +# ifdef XMRIG_ALGO_CN_HEAVY + CN_HEAVY_0, // "cn-heavy/0" CryptoNight-Heavy (4 MB) + CN_HEAVY_TUBE, // "cn-heavy/tube" Modified CryptoNight-Heavy (TUBE only) + CN_HEAVY_XHV, // "cn-heavy/xhv" Modified CryptoNight-Heavy (Haven Protocol only) +# endif +# ifdef XMRIG_ALGO_CN_PICO + CN_PICO, // "cn-pico" CryptoNight Turtle (TRTL) +# endif + MAX }; - inline Algorithm() : - m_algo(INVALID_ALGO), - m_flags(0), - m_variant(VARIANT_AUTO) - {} + inline Algorithm() {} + inline Algorithm(const char *algo) : m_id(parse(algo)) {} + inline Algorithm(Id id) : m_id(id) {} - inline Algorithm(Algo algo, Variant variant) : - m_flags(0), - m_variant(variant) - { - setAlgo(algo); - } - - inline Algorithm(const char *algo) : - m_flags(0) - { - parseAlgorithm(algo); - } - - inline Algo algo() const { return m_algo; } - inline bool isEqual(const Algorithm &other) const { return m_algo == other.m_algo && m_variant == other.m_variant; } - inline bool isForced() const { return m_flags & Forced; } + inline bool isEqual(const Algorithm &other) const { return m_id == other.m_id; } inline const char *name() const { return name(false); } inline const char *shortName() const { return name(true); } - inline int flags() const { return m_flags; } - inline Variant variant() const { return m_variant; } - inline void setVariant(Variant variant) { m_variant = variant; } + inline Id id() const { return m_id; } + inline bool isValid() const { return m_id != INVALID; } inline bool operator!=(const Algorithm &other) const { return !isEqual(other); } inline bool operator==(const Algorithm &other) const { return isEqual(other); } - bool isValid() const; - const char *variantName() const; - void parseAlgorithm(const char *algo); - void parseVariant(const char *variant); - void parseVariant(int variant); - void setAlgo(Algo algo); - -# ifdef XMRIG_PROXY_PROJECT - void parseXmrStakAlgorithm(const char *algo); -# endif + static Id parse(const char *name); private: const char *name(bool shortName) const; - Algo m_algo; - int m_flags; - Variant m_variant; + Id m_id = INVALID; }; @@ -100,4 +95,5 @@ typedef std::vector Algorithms; } /* namespace xmrig */ -#endif /* __ALGORITHM_H__ */ + +#endif /* XMRIG_ALGORITHM_H */ diff --git a/src/net/Network.cpp b/src/net/Network.cpp index 1ab42236..16669f52 100644 --- a/src/net/Network.cpp +++ b/src/net/Network.cpp @@ -231,8 +231,7 @@ void xmrig::Network::getConnection(rapidjson::Value &reply, rapidjson::Document using namespace rapidjson; auto &allocator = doc.GetAllocator(); - const Algorithm &algo = m_strategy->client()->job().algorithm(); - reply.AddMember("algo", StringRef((algo.isValid() ? algo : m_controller->config()->algorithm()).shortName()), allocator); + reply.AddMember("algo", StringRef(m_strategy->client()->job().algorithm().shortName()), allocator); Value connection(kObjectType); connection.AddMember("pool", StringRef(m_state.pool), allocator); diff --git a/src/net/strategies/DonateStrategy.cpp b/src/net/strategies/DonateStrategy.cpp index fb958a4c..3d913087 100644 --- a/src/net/strategies/DonateStrategy.cpp +++ b/src/net/strategies/DonateStrategy.cpp @@ -79,9 +79,9 @@ xmrig::DonateStrategy::DonateStrategy(Controller *controller, IStrategyListener # endif m_pools.push_back(Pool(kDonateHost, 3333, m_userId, nullptr, 0, true)); - for (Pool &pool : m_pools) { - pool.adjust(Algorithm(controller->config()->algorithm().algo(), VARIANT_AUTO)); - } +// for (Pool &pool : m_pools) { +// pool.adjust(Algorithm()); // FIXME +// } if (m_pools.size() > 1) { m_strategy = new FailoverStrategy(m_pools, 1, 2, this, true); diff --git a/src/workers/MultiWorker.cpp b/src/workers/MultiWorker.cpp index e4a5fb0c..30c43000 100644 --- a/src/workers/MultiWorker.cpp +++ b/src/workers/MultiWorker.cpp @@ -126,7 +126,8 @@ void MultiWorker::start() storeStats(); } - m_thread->fn(m_state.job.algorithm().variant())(m_state.blob, m_state.job.size(), m_hash, m_ctx, m_state.job.height()); + // FIXME +// m_thread->fn(m_state.job.algorithm().variant())(m_state.blob, m_state.job.size(), m_hash, m_ctx, m_state.job.height()); for (size_t i = 0; i < N; ++i) { if (*reinterpret_cast(m_hash + (i * 32) + 24) < m_state.job.target()) { diff --git a/src/workers/Workers.cpp b/src/workers/Workers.cpp index b95d8b85..62cbd1cf 100644 --- a/src/workers/Workers.cpp +++ b/src/workers/Workers.cpp @@ -176,7 +176,7 @@ void Workers::start(xmrig::Controller *controller) m_controller = controller; const std::vector &threads = controller->config()->threads(); - m_status.algo = controller->config()->algorithm().algo(); +// m_status.algo = controller->config()->algorithm().algo(); // FIXME m_status.threads = threads.size(); for (const xmrig::IThread *thread : threads) { From 1f0e3e501cece285744e700d0da95ede52a37fc5 Mon Sep 17 00:00:00 2001 From: XMRig Date: Thu, 13 Jun 2019 22:08:52 +0700 Subject: [PATCH 002/172] Implemented new style algorithm definitions (except ARM), removed Algo and Variant enums. --- CMakeLists.txt | 6 +- cmake/asm.cmake | 5 +- src/Mem.cpp | 13 +- src/Mem.h | 4 +- src/Mem_win.cpp | 1 - src/Summary.cpp | 4 +- src/common/cpu/BasicCpuInfo.cpp | 2 +- src/common/xmrig.h | 33 -- src/core/config/Config.cpp | 57 +- src/core/config/Config.h | 2 +- src/crypto/cn/CnAlgo.h | 207 +++++++ src/crypto/cn/CnHash.cpp | 269 +++++++++ src/crypto/cn/CnHash.h | 63 ++ src/crypto/cn/CryptoNight.h | 4 +- src/crypto/cn/CryptoNight_constants.h | 251 -------- src/crypto/cn/CryptoNight_monero.h | 36 +- src/crypto/cn/CryptoNight_test.h | 15 - src/crypto/cn/CryptoNight_x86.h | 784 ++++++++++++++----------- src/crypto/cn/gpu/cn_gpu_avx.cpp | 6 +- src/crypto/cn/gpu/cn_gpu_ssse3.cpp | 6 +- src/crypto/cn/r/variant4_random_math.h | 19 +- src/crypto/common/Algorithm.cpp | 81 ++- src/crypto/common/Algorithm.h | 15 +- src/interfaces/IThread.h | 4 +- src/workers/CpuThread.cpp | 544 +---------------- src/workers/CpuThread.h | 20 +- src/workers/MultiWorker.cpp | 88 +-- src/workers/MultiWorker.h | 17 +- src/workers/Workers.cpp | 21 +- src/workers/Workers.h | 5 +- 30 files changed, 1223 insertions(+), 1359 deletions(-) create mode 100644 src/crypto/cn/CnAlgo.h create mode 100644 src/crypto/cn/CnHash.cpp create mode 100644 src/crypto/cn/CnHash.h delete mode 100644 src/crypto/cn/CryptoNight_constants.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 30625d28..9c70a673 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -59,7 +59,8 @@ set(HEADERS_CRYPTO src/crypto/cn/c_groestl.h src/crypto/cn/c_jh.h src/crypto/cn/c_skein.h - src/crypto/cn/CryptoNight_constants.h + src/crypto/cn/CnAlgo.h + src/crypto/cn/CnHash.h src/crypto/cn/CryptoNight_monero.h src/crypto/cn/CryptoNight_test.h src/crypto/cn/CryptoNight.h @@ -102,10 +103,11 @@ set(SOURCES ) set(SOURCES_CRYPTO - src/crypto/cn/c_groestl.c src/crypto/cn/c_blake256.c + src/crypto/cn/c_groestl.c src/crypto/cn/c_jh.c src/crypto/cn/c_skein.c + src/crypto/cn/CnHash.cpp src/crypto/common/Algorithm.cpp ) diff --git a/cmake/asm.cmake b/cmake/asm.cmake index 25cccead..d3010e51 100644 --- a/cmake/asm.cmake +++ b/cmake/asm.cmake @@ -38,8 +38,11 @@ if (WITH_ASM AND NOT XMRIG_ARM AND CMAKE_SIZEOF_VOID_P EQUAL 8) add_library(${XMRIG_ASM_LIBRARY} STATIC ${XMRIG_ASM_FILES}) set(XMRIG_ASM_SOURCES src/crypto/cn/Asm.h src/crypto/cn/Asm.cpp src/crypto/cn/r/CryptonightR_gen.cpp) set_property(TARGET ${XMRIG_ASM_LIBRARY} PROPERTY LINKER_LANGUAGE C) + + add_definitions(/DXMRIG_FEATURE_ASM) else() set(XMRIG_ASM_SOURCES "") set(XMRIG_ASM_LIBRARY "") - add_definitions(/DXMRIG_NO_ASM) + + remove_definitions(/DXMRIG_FEATURE_ASM) endif() diff --git a/src/Mem.cpp b/src/Mem.cpp index b9e0fbf9..574c5ff2 100644 --- a/src/Mem.cpp +++ b/src/Mem.cpp @@ -27,7 +27,6 @@ #include -#include "crypto/cn/CryptoNight_constants.h" #include "crypto/cn/CryptoNight.h" #include "crypto/common/portable/mm_malloc.h" #include "crypto/common/VirtualMemory.h" @@ -38,12 +37,14 @@ bool Mem::m_enabled = true; int Mem::m_flags = 0; -MemInfo Mem::create(cryptonight_ctx **ctx, xmrig::Algo algorithm, size_t count) +MemInfo Mem::create(cryptonight_ctx **ctx, const xmrig::Algorithm &algorithm, size_t count) { using namespace xmrig; + constexpr CnAlgo props; + MemInfo info; - info.size = cn_select_memory(algorithm) * count; + info.size = props.memory(algorithm.id()) * count; constexpr const size_t align_size = 2 * 1024 * 1024; info.size = ((info.size + align_size - 1) / align_size) * align_size; @@ -53,10 +54,10 @@ MemInfo Mem::create(cryptonight_ctx **ctx, xmrig::Algo algorithm, size_t count) for (size_t i = 0; i < count; ++i) { cryptonight_ctx *c = static_cast(_mm_malloc(sizeof(cryptonight_ctx), 4096)); - c->memory = info.memory + (i * cn_select_memory(algorithm)); + c->memory = info.memory + (i * props.memory(algorithm.id())); - c->generated_code = reinterpret_cast(xmrig::VirtualMemory::allocateExecutableMemory(0x4000)); - c->generated_code_data.variant = xmrig::VARIANT_MAX; + c->generated_code = reinterpret_cast(VirtualMemory::allocateExecutableMemory(0x4000)); + c->generated_code_data.algo = Algorithm::INVALID; c->generated_code_data.height = std::numeric_limits::max(); ctx[i] = c; diff --git a/src/Mem.h b/src/Mem.h index bfb36b00..f43e005d 100644 --- a/src/Mem.h +++ b/src/Mem.h @@ -31,7 +31,7 @@ #include -#include "common/xmrig.h" +#include "crypto/cn/CnAlgo.h" struct cryptonight_ctx; @@ -56,7 +56,7 @@ public: Lock = 4 }; - static MemInfo create(cryptonight_ctx **ctx, xmrig::Algo algorithm, size_t count); + static MemInfo create(cryptonight_ctx **ctx, const xmrig::Algorithm &algorithm, size_t count); static void init(bool enabled); static void release(cryptonight_ctx **ctx, size_t count, MemInfo &info); diff --git a/src/Mem_win.cpp b/src/Mem_win.cpp index 76cbf434..34460e9d 100644 --- a/src/Mem_win.cpp +++ b/src/Mem_win.cpp @@ -34,7 +34,6 @@ #include "common/xmrig.h" #include "crypto/common/portable/mm_malloc.h" #include "crypto/common/VirtualMemory.h" -#include "crypto/cn/CryptoNight_constants.h" #include "crypto/cn/CryptoNight.h" #include "Mem.h" diff --git a/src/Summary.cpp b/src/Summary.cpp index d780d64f..2ba0fd57 100644 --- a/src/Summary.cpp +++ b/src/Summary.cpp @@ -39,7 +39,7 @@ #include "version.h" -#ifndef XMRIG_NO_ASM +#ifdef XMRIG_FEATURE_ASM static const char *coloredAsmNames[] = { RED_BOLD("none"), "auto", @@ -108,7 +108,7 @@ static void print_threads(xmrig::Config *config) ); } -# ifndef XMRIG_NO_ASM +# ifdef XMRIG_FEATURE_ASM if (config->assembly() == xmrig::ASM_AUTO) { const xmrig::Assembly assembly = xmrig::Cpu::info()->assembly(); diff --git a/src/common/cpu/BasicCpuInfo.cpp b/src/common/cpu/BasicCpuInfo.cpp index d7778bdd..c5b8ed0a 100644 --- a/src/common/cpu/BasicCpuInfo.cpp +++ b/src/common/cpu/BasicCpuInfo.cpp @@ -129,7 +129,7 @@ xmrig::BasicCpuInfo::BasicCpuInfo() : { cpu_brand_string(m_brand); -# ifndef XMRIG_NO_ASM +# ifdef XMRIG_FEATURE_ASM if (hasAES()) { char vendor[13] = { 0 }; int32_t data[4] = { 0 }; diff --git a/src/common/xmrig.h b/src/common/xmrig.h index e8ca8857..5dd41845 100644 --- a/src/common/xmrig.h +++ b/src/common/xmrig.h @@ -30,16 +30,6 @@ namespace xmrig { -enum Algo { - INVALID_ALGO = -1, - CRYPTONIGHT, /* CryptoNight (2 MB) */ - CRYPTONIGHT_LITE, /* CryptoNight (1 MB) */ - CRYPTONIGHT_HEAVY, /* CryptoNight (4 MB) */ - CRYPTONIGHT_PICO, /* CryptoNight (256 KB) */ - ALGO_MAX -}; - - //--av=1 For CPUs with hardware AES. //--av=2 Lower power mode (double hash) of 1. //--av=3 Software AES implementation. @@ -60,29 +50,6 @@ enum AlgoVariant { }; -enum Variant { - VARIANT_AUTO = -1, // Autodetect - VARIANT_0 = 0, // Original CryptoNight or CryptoNight-Heavy - VARIANT_1 = 1, // CryptoNight variant 1 also known as Monero7 and CryptoNightV7 - VARIANT_TUBE = 2, // Modified CryptoNight-Heavy (TUBE only) - VARIANT_XTL = 3, // Modified CryptoNight variant 1 (Stellite only) - VARIANT_MSR = 4, // Modified CryptoNight variant 1 (Masari only) - VARIANT_XHV = 5, // Modified CryptoNight-Heavy (Haven Protocol only) - VARIANT_XAO = 6, // Modified CryptoNight variant 0 (Alloy only) - VARIANT_RTO = 7, // Modified CryptoNight variant 1 (Arto only) - VARIANT_2 = 8, // CryptoNight variant 2 - VARIANT_HALF = 9, // CryptoNight variant 2 with half iterations (Masari/Stellite) - VARIANT_TRTL = 10, // CryptoNight Turtle (TRTL) - VARIANT_GPU = 11, // CryptoNight-GPU (Ryo) - VARIANT_WOW = 12, // CryptoNightR (Wownero) - VARIANT_4 = 13, // CryptoNightR (Monero's variant 4) - VARIANT_RWZ = 14, // CryptoNight variant 2 with 3/4 iterations and reversed shuffle operation (Graft) - VARIANT_ZLS = 15, // CryptoNight variant 2 with 3/4 iterations (Zelerius) - VARIANT_DOUBLE = 16, // CryptoNight variant 2 with double iterations (X-CASH) - VARIANT_MAX -}; - - enum AlgoVerify { VERIFY_HW_AES = 1, VERIFY_SOFT_AES = 2 diff --git a/src/core/config/Config.cpp b/src/core/config/Config.cpp index fbb12ffa..d82c3225 100644 --- a/src/core/config/Config.cpp +++ b/src/core/config/Config.cpp @@ -33,7 +33,6 @@ #include "common/cpu/Cpu.h" #include "core/config/Config.h" #include "crypto/cn/Asm.h" -#include "crypto/cn/CryptoNight_constants.h" #include "rapidjson/document.h" #include "rapidjson/filewritestream.h" #include "rapidjson/prettywriter.h" @@ -71,7 +70,7 @@ bool xmrig::Config::read(const IJsonReader &reader, const char *fileName) setPriority(reader.getInt("cpu-priority", -1)); setThreads(reader.getValue("threads")); -# ifndef XMRIG_NO_ASM +# ifdef XMRIG_FEATURE_ASM setAssembly(reader.getValue("asm")); # endif @@ -93,7 +92,7 @@ void xmrig::Config::getJSON(rapidjson::Document &doc) const doc.AddMember("api", api, allocator); doc.AddMember("http", m_http.toJSON(doc), allocator); -# ifndef XMRIG_NO_ASM +# ifdef XMRIG_FEATURE_ASM doc.AddMember("asm", Asm::toJSON(m_assembly), allocator); # endif @@ -144,37 +143,39 @@ void xmrig::Config::getJSON(rapidjson::Document &doc) const bool xmrig::Config::finalize() { -// if (!m_threads.cpu.empty()) { // FIXME -// m_threads.mode = Advanced; -// const bool softAES = (m_aesMode == AES_AUTO ? (Cpu::info()->hasAES() ? AES_HW : AES_SOFT) : m_aesMode) == AES_SOFT; + Algorithm algorithm(Algorithm::CN_0); // FIXME algo -// for (size_t i = 0; i < m_threads.cpu.size(); ++i) { -//// m_threads.list.push_back(CpuThread::createFromData(i, m_algorithm.algo(), m_threads.cpu[i], m_priority, softAES)); -// } + if (!m_threads.cpu.empty()) { + m_threads.mode = Advanced; + const bool softAES = (m_aesMode == AES_AUTO ? (Cpu::info()->hasAES() ? AES_HW : AES_SOFT) : m_aesMode) == AES_SOFT; -// return true; -// } + for (size_t i = 0; i < m_threads.cpu.size(); ++i) { + m_threads.list.push_back(CpuThread::createFromData(i, algorithm, m_threads.cpu[i], m_priority, softAES)); + } -// const AlgoVariant av = getAlgoVariant(); -// m_threads.mode = m_threads.count ? Simple : Automatic; + return true; + } -//// const size_t size = CpuThread::multiway(av) * cn_select_memory(m_algorithm.algo()) / 1024; + const AlgoVariant av = getAlgoVariant(); + m_threads.mode = m_threads.count ? Simple : Automatic; -// if (!m_threads.count) { -// m_threads.count = Cpu::info()->optimalThreadsCount(size, m_maxCpuUsage); -// } -// else if (m_safe) { -// const size_t count = Cpu::info()->optimalThreadsCount(size, m_maxCpuUsage); -// if (m_threads.count > count) { -// m_threads.count = count; -// } -// } + const size_t size = CpuThread::multiway(av) * CnAlgo<>::memory(algorithm) / 1024; -// for (size_t i = 0; i < m_threads.count; ++i) { -// m_threads.list.push_back(CpuThread::createFromAV(i, m_algorithm.algo(), av, m_threads.mask, m_priority, m_assembly)); -// } + if (!m_threads.count) { + m_threads.count = Cpu::info()->optimalThreadsCount(size, m_maxCpuUsage); + } + else if (m_safe) { + const size_t count = Cpu::info()->optimalThreadsCount(size, m_maxCpuUsage); + if (m_threads.count > count) { + m_threads.count = count; + } + } -// m_shouldSave = m_threads.mode == Automatic; + for (size_t i = 0; i < m_threads.count; ++i) { + m_threads.list.push_back(CpuThread::createFromAV(i, algorithm, av, m_threads.mask, m_priority, m_assembly)); + } + + m_shouldSave = m_threads.mode == Automatic; return true; } @@ -276,7 +277,7 @@ xmrig::AlgoVariant xmrig::Config::getAlgoVariantLite() const #endif -#ifndef XMRIG_NO_ASM +#ifdef XMRIG_FEATURE_ASM void xmrig::Config::setAssembly(const rapidjson::Value &assembly) { m_assembly = Asm::parse(assembly); diff --git a/src/core/config/Config.h b/src/core/config/Config.h index 4bcb8bba..637f80df 100644 --- a/src/core/config/Config.h +++ b/src/core/config/Config.h @@ -94,7 +94,7 @@ private: AlgoVariant getAlgoVariantLite() const; # endif -# ifndef XMRIG_NO_ASM +# ifdef XMRIG_FEATURE_ASM void setAssembly(const rapidjson::Value &assembly); # endif diff --git a/src/crypto/cn/CnAlgo.h b/src/crypto/cn/CnAlgo.h new file mode 100644 index 00000000..74ade22b --- /dev/null +++ b/src/crypto/cn/CnAlgo.h @@ -0,0 +1,207 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2019 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_CN_ALGO_H +#define XMRIG_CN_ALGO_H + + +#include +#include + + +#include "crypto/common/Algorithm.h" + + +namespace xmrig +{ + + +template +class CnAlgo +{ +public: + constexpr inline CnAlgo() + { + static_assert(ALGO != Algorithm::INVALID && m_memory[ALGO] > 0, "invalid CRYPTONIGHT algorithm"); + static_assert(sizeof(m_memory) / sizeof(m_memory)[0] == Algorithm::MAX, "memory table size mismatch"); + static_assert(sizeof(m_iterations) / sizeof(m_iterations)[0] == Algorithm::MAX, "iterations table size mismatch"); + static_assert(sizeof(m_base) / sizeof(m_base)[0] == Algorithm::MAX, "iterations table size mismatch"); + } + + constexpr inline Algorithm::Id base() const { return m_base[ALGO]; } + constexpr inline bool isHeavy() const { return memory() == CN_MEMORY * 2; } + constexpr inline bool isR() const { return ALGO == Algorithm::CN_R || ALGO == Algorithm::CN_WOW; } + constexpr inline size_t memory() const { return m_memory[ALGO]; } + constexpr inline uint32_t iterations() const { return m_iterations[ALGO]; } + constexpr inline uint32_t mask() const { return ((memory() - 1) / 16) * 16; } + + inline static size_t memory(Algorithm::Id algo) + { + switch (Algorithm::family(algo)) { + case Algorithm::CN: + return CN_MEMORY; + + case Algorithm::CN_LITE: + return CN_MEMORY / 2; + + case Algorithm::CN_HEAVY: + return CN_MEMORY * 2; + + case Algorithm::CN_PICO: + return CN_MEMORY / 8; + + default: + break; + } + + return 0; + } + + inline static uint32_t mask(Algorithm::Id algo) + { +# ifdef XMRIG_ALGO_CN_GPU + if (algo == Algorithm::CN_GPU) { + return 0x1FFFC0; + } +# endif + +# ifdef XMRIG_ALGO_CN_PICO + if (algo == Algorithm::CN_PICO_0) { + return 0x1FFF0; + } +# endif + + return ((memory(algo) - 1) / 16) * 16; + } + +private: + constexpr const static size_t CN_MEMORY = 0x200000; + constexpr const static uint32_t CN_ITER = 0x80000; + + constexpr const static size_t m_memory[] = { + CN_MEMORY, // CN_0 + CN_MEMORY, // CN_1 + CN_MEMORY, // CN_2 + CN_MEMORY, // CN_R + CN_MEMORY, // CN_WOW + CN_MEMORY, // CN_FAST + CN_MEMORY, // CN_HALF + CN_MEMORY, // CN_XAO + CN_MEMORY, // CN_RTO + CN_MEMORY, // CN_RWZ + CN_MEMORY, // CN_ZLS + CN_MEMORY, // CN_DOUBLE +# ifdef XMRIG_ALGO_CN_GPU + CN_MEMORY, // CN_GPU +# endif +# ifdef XMRIG_ALGO_CN_LITE + CN_MEMORY / 2, // CN_LITE_0 + CN_MEMORY / 2, // CN_LITE_1 +# endif +# ifdef XMRIG_ALGO_CN_HEAVY + CN_MEMORY * 2, // CN_HEAVY_0 + CN_MEMORY * 2, // CN_HEAVY_TUBE + CN_MEMORY * 2, // CN_HEAVY_XHV +# endif +# ifdef XMRIG_ALGO_CN_PICO + CN_MEMORY / 8, // CN_PICO_0 +# endif + }; + + constexpr const static uint32_t m_iterations[] = { + CN_ITER, // CN_0 + CN_ITER, // CN_1 + CN_ITER, // CN_2 + CN_ITER, // CN_R + CN_ITER, // CN_WOW + CN_ITER / 2, // CN_FAST + CN_ITER / 2, // CN_HALF + CN_ITER * 2, // CN_XAO + CN_ITER, // CN_RTO + 0x60000, // CN_RWZ + 0x60000, // CN_ZLS + CN_ITER * 2, // CN_DOUBLE +# ifdef XMRIG_ALGO_CN_GPU + 0xC000, // CN_GPU +# endif +# ifdef XMRIG_ALGO_CN_LITE + CN_ITER / 2, // CN_LITE_0 + CN_ITER / 2, // CN_LITE_1 +# endif +# ifdef XMRIG_ALGO_CN_HEAVY + CN_ITER / 2, // CN_HEAVY_0 + CN_ITER / 2, // CN_HEAVY_TUBE + CN_ITER / 2, // CN_HEAVY_XHV +# endif +# ifdef XMRIG_ALGO_CN_PICO + CN_ITER / 8, // CN_PICO_0 +# endif + }; + + constexpr const static Algorithm::Id m_base[] = { + Algorithm::CN_0, // CN_0 + Algorithm::CN_1, // CN_1 + Algorithm::CN_2, // CN_2 + Algorithm::CN_2, // CN_R + Algorithm::CN_2, // CN_WOW + Algorithm::CN_1, // CN_FAST + Algorithm::CN_2, // CN_HALF + Algorithm::CN_0, // CN_XAO + Algorithm::CN_1, // CN_RTO + Algorithm::CN_2, // CN_RWZ + Algorithm::CN_2, // CN_ZLS + Algorithm::CN_2, // CN_DOUBLE +# ifdef XMRIG_ALGO_CN_GPU + Algorithm::CN_GPU, // CN_GPU +# endif +# ifdef XMRIG_ALGO_CN_LITE + Algorithm::CN_0, // CN_LITE_0 + Algorithm::CN_1, // CN_LITE_1 +# endif +# ifdef XMRIG_ALGO_CN_HEAVY + Algorithm::CN_0, // CN_HEAVY_0 + Algorithm::CN_1, // CN_HEAVY_TUBE + Algorithm::CN_0, // CN_HEAVY_XHV +# endif +# ifdef XMRIG_ALGO_CN_PICO + Algorithm::CN_2, // CN_PICO_0 +# endif + }; +}; + + +#ifdef XMRIG_ALGO_CN_GPU +template<> constexpr inline uint32_t CnAlgo::mask() const { return 0x1FFFC0; } +#endif + +#ifdef XMRIG_ALGO_CN_PICO +template<> constexpr inline uint32_t CnAlgo::mask() const { return 0x1FFF0; } +#endif + + +} /* namespace xmrig */ + + +#endif /* XMRIG_CN_ALGO_H */ diff --git a/src/crypto/cn/CnHash.cpp b/src/crypto/cn/CnHash.cpp new file mode 100644 index 00000000..61d2ea69 --- /dev/null +++ b/src/crypto/cn/CnHash.cpp @@ -0,0 +1,269 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2019 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include + + +#include "common/cpu/Cpu.h" +#include "crypto/cn/CnHash.h" +#include "crypto/common/VirtualMemory.h" + + +#if defined(XMRIG_ARM) +# include "crypto/cn/CryptoNight_arm.h" +#else +# include "crypto/cn/CryptoNight_x86.h" +#endif + + +#define ADD_FN(algo) \ + m_map[algo][AV_SINGLE][ASM_NONE] = cryptonight_single_hash; \ + m_map[algo][AV_SINGLE_SOFT][ASM_NONE] = cryptonight_single_hash; \ + m_map[algo][AV_DOUBLE][ASM_NONE] = cryptonight_double_hash; \ + m_map[algo][AV_DOUBLE_SOFT][ASM_NONE] = cryptonight_double_hash; \ + m_map[algo][AV_TRIPLE][ASM_NONE] = cryptonight_triple_hash; \ + m_map[algo][AV_TRIPLE_SOFT][ASM_NONE] = cryptonight_triple_hash; \ + m_map[algo][AV_QUAD][ASM_NONE] = cryptonight_quad_hash; \ + m_map[algo][AV_QUAD_SOFT][ASM_NONE] = cryptonight_quad_hash; \ + m_map[algo][AV_PENTA][ASM_NONE] = cryptonight_penta_hash; \ + m_map[algo][AV_PENTA_SOFT][ASM_NONE] = cryptonight_penta_hash; + + +#ifdef XMRIG_FEATURE_ASM +# define ADD_FN_ASM(algo) \ + m_map[algo][AV_SINGLE][ASM_INTEL] = cryptonight_single_hash_asm; \ + m_map[algo][AV_SINGLE][ASM_RYZEN] = cryptonight_single_hash_asm; \ + m_map[algo][AV_SINGLE][ASM_BULLDOZER] = cryptonight_single_hash_asm; \ + m_map[algo][AV_DOUBLE][ASM_INTEL] = cryptonight_double_hash_asm; \ + m_map[algo][AV_DOUBLE][ASM_RYZEN] = cryptonight_double_hash_asm; \ + m_map[algo][AV_DOUBLE][ASM_BULLDOZER] = cryptonight_double_hash_asm; + + +extern "C" void cnv2_mainloop_ivybridge_asm(cryptonight_ctx **ctx); +extern "C" void cnv2_mainloop_ryzen_asm(cryptonight_ctx **ctx); +extern "C" void cnv2_mainloop_bulldozer_asm(cryptonight_ctx **ctx); +extern "C" void cnv2_double_mainloop_sandybridge_asm(cryptonight_ctx **ctx); + + +namespace xmrig { + + +cn_mainloop_fun cn_half_mainloop_ivybridge_asm = nullptr; +cn_mainloop_fun cn_half_mainloop_ryzen_asm = nullptr; +cn_mainloop_fun cn_half_mainloop_bulldozer_asm = nullptr; +cn_mainloop_fun cn_half_double_mainloop_sandybridge_asm = nullptr; + +cn_mainloop_fun cn_trtl_mainloop_ivybridge_asm = nullptr; +cn_mainloop_fun cn_trtl_mainloop_ryzen_asm = nullptr; +cn_mainloop_fun cn_trtl_mainloop_bulldozer_asm = nullptr; +cn_mainloop_fun cn_trtl_double_mainloop_sandybridge_asm = nullptr; + +cn_mainloop_fun cn_zls_mainloop_ivybridge_asm = nullptr; +cn_mainloop_fun cn_zls_mainloop_ryzen_asm = nullptr; +cn_mainloop_fun cn_zls_mainloop_bulldozer_asm = nullptr; +cn_mainloop_fun cn_zls_double_mainloop_sandybridge_asm = nullptr; + +cn_mainloop_fun cn_double_mainloop_ivybridge_asm = nullptr; +cn_mainloop_fun cn_double_mainloop_ryzen_asm = nullptr; +cn_mainloop_fun cn_double_mainloop_bulldozer_asm = nullptr; +cn_mainloop_fun cn_double_double_mainloop_sandybridge_asm = nullptr; + + +template +static void patchCode(T dst, U src, const uint32_t iterations, const uint32_t mask = CnAlgo().mask()) +{ + const uint8_t* p = reinterpret_cast(src); + + // Workaround for Visual Studio placing trampoline in debug builds. +# if defined(_MSC_VER) + if (p[0] == 0xE9) { + p += *(int32_t*)(p + 1) + 5; + } +# endif + + size_t size = 0; + while (*(uint32_t*)(p + size) != 0xDEADC0DE) { + ++size; + } + + size += sizeof(uint32_t); + + memcpy((void*) dst, (const void*) src, size); + + uint8_t* patched_data = reinterpret_cast(dst); + for (size_t i = 0; i + sizeof(uint32_t) <= size; ++i) { + switch (*(uint32_t*)(patched_data + i)) { + case CnAlgo().iterations(): + *(uint32_t*)(patched_data + i) = iterations; + break; + + case CnAlgo().mask(): + *(uint32_t*)(patched_data + i) = mask; + break; + } + } +} + + +static void patchAsmVariants() +{ + const int allocation_size = 65536; + uint8_t *base = static_cast(VirtualMemory::allocateExecutableMemory(allocation_size)); + + cn_half_mainloop_ivybridge_asm = reinterpret_cast (base + 0x0000); + cn_half_mainloop_ryzen_asm = reinterpret_cast (base + 0x1000); + cn_half_mainloop_bulldozer_asm = reinterpret_cast (base + 0x2000); + cn_half_double_mainloop_sandybridge_asm = reinterpret_cast (base + 0x3000); + +# ifdef XMRIG_ALGO_CN_PICO + cn_trtl_mainloop_ivybridge_asm = reinterpret_cast (base + 0x4000); + cn_trtl_mainloop_ryzen_asm = reinterpret_cast (base + 0x5000); + cn_trtl_mainloop_bulldozer_asm = reinterpret_cast (base + 0x6000); + cn_trtl_double_mainloop_sandybridge_asm = reinterpret_cast (base + 0x7000); +# endif + + cn_zls_mainloop_ivybridge_asm = reinterpret_cast (base + 0x8000); + cn_zls_mainloop_ryzen_asm = reinterpret_cast (base + 0x9000); + cn_zls_mainloop_bulldozer_asm = reinterpret_cast (base + 0xA000); + cn_zls_double_mainloop_sandybridge_asm = reinterpret_cast (base + 0xB000); + + cn_double_mainloop_ivybridge_asm = reinterpret_cast (base + 0xC000); + cn_double_mainloop_ryzen_asm = reinterpret_cast (base + 0xD000); + cn_double_mainloop_bulldozer_asm = reinterpret_cast (base + 0xE000); + cn_double_double_mainloop_sandybridge_asm = reinterpret_cast (base + 0xF000); + + { + constexpr uint32_t ITER = CnAlgo().iterations(); + + patchCode(cn_half_mainloop_ivybridge_asm, cnv2_mainloop_ivybridge_asm, ITER); + patchCode(cn_half_mainloop_ryzen_asm, cnv2_mainloop_ryzen_asm, ITER); + patchCode(cn_half_mainloop_bulldozer_asm, cnv2_mainloop_bulldozer_asm, ITER); + patchCode(cn_half_double_mainloop_sandybridge_asm, cnv2_double_mainloop_sandybridge_asm, ITER); + } + +# ifdef XMRIG_ALGO_CN_PICO + { + constexpr uint32_t ITER = CnAlgo().iterations(); + constexpr uint32_t MASK = CnAlgo().mask(); + + patchCode(cn_trtl_mainloop_ivybridge_asm, cnv2_mainloop_ivybridge_asm, ITER, MASK); + patchCode(cn_trtl_mainloop_ryzen_asm, cnv2_mainloop_ryzen_asm, ITER, MASK); + patchCode(cn_trtl_mainloop_bulldozer_asm, cnv2_mainloop_bulldozer_asm, ITER, MASK); + patchCode(cn_trtl_double_mainloop_sandybridge_asm, cnv2_double_mainloop_sandybridge_asm, ITER, MASK); + } +# endif + + { + constexpr uint32_t ITER = CnAlgo().iterations(); + + patchCode(cn_zls_mainloop_ivybridge_asm, cnv2_mainloop_ivybridge_asm, ITER); + patchCode(cn_zls_mainloop_ryzen_asm, cnv2_mainloop_ryzen_asm, ITER); + patchCode(cn_zls_mainloop_bulldozer_asm, cnv2_mainloop_bulldozer_asm, ITER); + patchCode(cn_zls_double_mainloop_sandybridge_asm, cnv2_double_mainloop_sandybridge_asm, ITER); + } + + { + constexpr uint32_t ITER = CnAlgo().iterations(); + + patchCode(cn_double_mainloop_ivybridge_asm, cnv2_mainloop_ivybridge_asm, ITER); + patchCode(cn_double_mainloop_ryzen_asm, cnv2_mainloop_ryzen_asm, ITER); + patchCode(cn_double_mainloop_bulldozer_asm, cnv2_mainloop_bulldozer_asm, ITER); + patchCode(cn_double_double_mainloop_sandybridge_asm, cnv2_double_mainloop_sandybridge_asm, ITER); + } + + VirtualMemory::protectExecutableMemory(base, allocation_size); + VirtualMemory::flushInstructionCache(base, allocation_size); +} +} // namespace xmrig +#else +# define ADD_FN_ASM(algo) +#endif + + +xmrig::CnHash::CnHash() +{ + ADD_FN(Algorithm::CN_0); + ADD_FN(Algorithm::CN_1); + ADD_FN(Algorithm::CN_2); + ADD_FN(Algorithm::CN_R); + ADD_FN(Algorithm::CN_WOW); + ADD_FN(Algorithm::CN_FAST); + ADD_FN(Algorithm::CN_HALF); + ADD_FN(Algorithm::CN_XAO); + ADD_FN(Algorithm::CN_RTO); + ADD_FN(Algorithm::CN_RWZ); + ADD_FN(Algorithm::CN_ZLS); + ADD_FN(Algorithm::CN_DOUBLE); + + ADD_FN_ASM(Algorithm::CN_2); + ADD_FN_ASM(Algorithm::CN_HALF); + ADD_FN_ASM(Algorithm::CN_R); + ADD_FN_ASM(Algorithm::CN_WOW); + ADD_FN_ASM(Algorithm::CN_RWZ); + ADD_FN_ASM(Algorithm::CN_ZLS); + ADD_FN_ASM(Algorithm::CN_DOUBLE); + +# ifdef XMRIG_ALGO_CN_GPU + m_map[Algorithm::CN_GPU][AV_SINGLE][ASM_NONE] = cryptonight_single_hash_gpu; + m_map[Algorithm::CN_GPU][AV_SINGLE_SOFT][ASM_NONE] = cryptonight_single_hash_gpu; +# endif + +# ifdef XMRIG_ALGO_CN_LITE + ADD_FN(Algorithm::CN_LITE_0); + ADD_FN(Algorithm::CN_LITE_1); +# endif + +# ifdef XMRIG_ALGO_CN_HEAVY + ADD_FN(Algorithm::CN_HEAVY_0); + ADD_FN(Algorithm::CN_HEAVY_TUBE); + ADD_FN(Algorithm::CN_HEAVY_XHV); +# endif + +# ifdef XMRIG_ALGO_CN_PICO + ADD_FN(Algorithm::CN_PICO_0); + ADD_FN_ASM(Algorithm::CN_PICO_0); +# endif + +# ifdef XMRIG_FEATURE_ASM + patchAsmVariants(); +# endif +} + + +xmrig::cn_hash_fun xmrig::CnHash::fn(const Algorithm &algorithm, AlgoVariant av, Assembly assembly) const +{ + if (!algorithm.isValid()) { + return nullptr; + } + +# ifdef XMRIG_FEATURE_ASM + cn_hash_fun fun = m_map[algorithm][av][assembly == ASM_AUTO ? Cpu::info()->assembly() : assembly]; + if (fun) { + return fun; + } +# endif + + return m_map[algorithm][av][ASM_NONE]; +} diff --git a/src/crypto/cn/CnHash.h b/src/crypto/cn/CnHash.h new file mode 100644 index 00000000..5fbf5c8a --- /dev/null +++ b/src/crypto/cn/CnHash.h @@ -0,0 +1,63 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2019 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_CN_HASH_H +#define XMRIG_CN_HASH_H + + +#include +#include + + +#include "common/xmrig.h" +#include "crypto/cn/CnAlgo.h" + + +struct cryptonight_ctx; + + +namespace xmrig +{ + +typedef void (*cn_hash_fun)(const uint8_t *input, size_t size, uint8_t *output, cryptonight_ctx **ctx, uint64_t height); +typedef void (*cn_mainloop_fun)(cryptonight_ctx **ctx); + + +class CnHash +{ +public: + CnHash(); + + cn_hash_fun fn(const Algorithm &algorithm, AlgoVariant av, Assembly assembly) const; + +private: + cn_hash_fun m_map[Algorithm::MAX][AV_MAX][ASM_MAX] = {}; +}; + + +} /* namespace xmrig */ + + +#endif /* XMRIG_CN_HASH_H */ diff --git a/src/crypto/cn/CryptoNight.h b/src/crypto/cn/CryptoNight.h index f50966ed..434c34f8 100644 --- a/src/crypto/cn/CryptoNight.h +++ b/src/crypto/cn/CryptoNight.h @@ -42,10 +42,10 @@ typedef void(*cn_mainloop_fun_ms_abi)(cryptonight_ctx**) ABI_ATTRIBUTE; struct cryptonight_r_data { - int variant; + int algo; uint64_t height; - bool match(const int v, const uint64_t h) const { return (v == variant) && (h == height); } + bool match(const int a, const uint64_t h) const { return (a == algo) && (h == height); } }; diff --git a/src/crypto/cn/CryptoNight_constants.h b/src/crypto/cn/CryptoNight_constants.h deleted file mode 100644 index 1bc06a3b..00000000 --- a/src/crypto/cn/CryptoNight_constants.h +++ /dev/null @@ -1,251 +0,0 @@ -/* XMRig - * Copyright 2010 Jeff Garzik - * Copyright 2012-2014 pooler - * Copyright 2014 Lucas Jones - * Copyright 2014-2016 Wolf9466 - * Copyright 2016 Jay D Dee - * Copyright 2017-2019 XMR-Stak , - * Copyright 2018 Lee Clagett - * Copyright 2018-2019 SChernykh - * Copyright 2016-2019 XMRig , - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef XMRIG_CRYPTONIGHT_CONSTANTS_H -#define XMRIG_CRYPTONIGHT_CONSTANTS_H - - -#include -#include - - -#include "common/xmrig.h" - - -namespace xmrig -{ - -constexpr const size_t CRYPTONIGHT_MEMORY = 2 * 1024 * 1024; -constexpr const uint32_t CRYPTONIGHT_MASK = 0x1FFFF0; -constexpr const uint32_t CRYPTONIGHT_ITER = 0x80000; -constexpr const uint32_t CRYPTONIGHT_HALF_ITER = 0x40000; -constexpr const uint32_t CRYPTONIGHT_XAO_ITER = 0x100000; -constexpr const uint32_t CRYPTONIGHT_DOUBLE_ITER = 0x100000; -constexpr const uint32_t CRYPTONIGHT_WALTZ_ITER = 0x60000; -constexpr const uint32_t CRYPTONIGHT_ZLS_ITER = 0x60000; - -constexpr const uint32_t CRYPTONIGHT_GPU_ITER = 0xC000; -constexpr const uint32_t CRYPTONIGHT_GPU_MASK = 0x1FFFC0; - -constexpr const size_t CRYPTONIGHT_LITE_MEMORY = 1 * 1024 * 1024; -constexpr const uint32_t CRYPTONIGHT_LITE_MASK = 0xFFFF0; -constexpr const uint32_t CRYPTONIGHT_LITE_ITER = 0x40000; - -constexpr const size_t CRYPTONIGHT_HEAVY_MEMORY = 4 * 1024 * 1024; -constexpr const uint32_t CRYPTONIGHT_HEAVY_MASK = 0x3FFFF0; -constexpr const uint32_t CRYPTONIGHT_HEAVY_ITER = 0x40000; - -constexpr const size_t CRYPTONIGHT_PICO_MEMORY = 256 * 1024; -constexpr const uint32_t CRYPTONIGHT_PICO_MASK = 0x1FFF0; -constexpr const uint32_t CRYPTONIGHT_PICO_ITER = 0x40000; -constexpr const uint32_t CRYPTONIGHT_TRTL_ITER = 0x10000; - - -template inline constexpr size_t cn_select_memory() { return 0; } -template<> inline constexpr size_t cn_select_memory() { return CRYPTONIGHT_MEMORY; } -template<> inline constexpr size_t cn_select_memory() { return CRYPTONIGHT_LITE_MEMORY; } -template<> inline constexpr size_t cn_select_memory() { return CRYPTONIGHT_HEAVY_MEMORY; } -template<> inline constexpr size_t cn_select_memory() { return CRYPTONIGHT_PICO_MEMORY; } - - -inline size_t cn_select_memory(Algo algorithm) -{ - switch(algorithm) - { - case CRYPTONIGHT: - return CRYPTONIGHT_MEMORY; - - case CRYPTONIGHT_LITE: - return CRYPTONIGHT_LITE_MEMORY; - - case CRYPTONIGHT_HEAVY: - return CRYPTONIGHT_HEAVY_MEMORY; - - case CRYPTONIGHT_PICO: - return CRYPTONIGHT_PICO_MEMORY; - - default: - break; - } - - return 0; -} - - -template inline constexpr uint32_t cn_select_mask() { return 0; } -template<> inline constexpr uint32_t cn_select_mask() { return CRYPTONIGHT_MASK; } -template<> inline constexpr uint32_t cn_select_mask() { return CRYPTONIGHT_LITE_MASK; } -template<> inline constexpr uint32_t cn_select_mask() { return CRYPTONIGHT_HEAVY_MASK; } -template<> inline constexpr uint32_t cn_select_mask() { return CRYPTONIGHT_PICO_MASK; } - - -inline uint32_t cn_select_mask(Algo algorithm) -{ - switch(algorithm) - { - case CRYPTONIGHT: - return CRYPTONIGHT_MASK; - - case CRYPTONIGHT_LITE: - return CRYPTONIGHT_LITE_MASK; - - case CRYPTONIGHT_HEAVY: - return CRYPTONIGHT_HEAVY_MASK; - - case CRYPTONIGHT_PICO: - return CRYPTONIGHT_PICO_MASK; - - default: - break; - } - - return 0; -} - - -template inline constexpr uint32_t cn_select_iter() { return 0; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_ITER; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_ITER; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_ITER; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_ITER; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_ITER; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_ITER; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_HALF_ITER; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_HALF_ITER; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_XAO_ITER; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_ITER; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_GPU_ITER; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_WALTZ_ITER; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_ZLS_ITER; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_DOUBLE_ITER; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_LITE_ITER; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_LITE_ITER; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_HEAVY_ITER; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_HEAVY_ITER; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_HEAVY_ITER; } -template<> inline constexpr uint32_t cn_select_iter() { return CRYPTONIGHT_TRTL_ITER; } - - -inline uint32_t cn_select_iter(Algo algorithm, Variant variant) -{ - switch (variant) { - case VARIANT_MSR: - case VARIANT_HALF: - return CRYPTONIGHT_HALF_ITER; - - case VARIANT_GPU: - return CRYPTONIGHT_GPU_ITER; - - case VARIANT_RTO: - case VARIANT_DOUBLE: - return CRYPTONIGHT_XAO_ITER; - - case VARIANT_TRTL: - return CRYPTONIGHT_TRTL_ITER; - - case VARIANT_RWZ: - case VARIANT_ZLS: - return CRYPTONIGHT_WALTZ_ITER; - - default: - break; - } - - switch(algorithm) - { - case CRYPTONIGHT: - return CRYPTONIGHT_ITER; - - case CRYPTONIGHT_LITE: - return CRYPTONIGHT_LITE_ITER; - - case CRYPTONIGHT_HEAVY: - return CRYPTONIGHT_HEAVY_ITER; - - case CRYPTONIGHT_PICO: - return CRYPTONIGHT_TRTL_ITER; - - default: - break; - } - - return 0; -} - - -template inline constexpr Variant cn_base_variant() { return VARIANT_0; } -template<> inline constexpr Variant cn_base_variant() { return VARIANT_0; } -template<> inline constexpr Variant cn_base_variant() { return VARIANT_1; } -template<> inline constexpr Variant cn_base_variant() { return VARIANT_1; } -template<> inline constexpr Variant cn_base_variant() { return VARIANT_1; } -template<> inline constexpr Variant cn_base_variant() { return VARIANT_1; } -template<> inline constexpr Variant cn_base_variant() { return VARIANT_0; } -template<> inline constexpr Variant cn_base_variant() { return VARIANT_0; } -template<> inline constexpr Variant cn_base_variant() { return VARIANT_1; } -template<> inline constexpr Variant cn_base_variant() { return VARIANT_2; } -template<> inline constexpr Variant cn_base_variant() { return VARIANT_2; } -template<> inline constexpr Variant cn_base_variant() { return VARIANT_2; } -template<> inline constexpr Variant cn_base_variant() { return VARIANT_GPU; } -template<> inline constexpr Variant cn_base_variant() { return VARIANT_2; } -template<> inline constexpr Variant cn_base_variant() { return VARIANT_2; } -template<> inline constexpr Variant cn_base_variant() { return VARIANT_2; } -template<> inline constexpr Variant cn_base_variant() { return VARIANT_2; } -template<> inline constexpr Variant cn_base_variant() { return VARIANT_2; } - - -inline Variant cn_base_variant(Variant variant) -{ - switch (variant) { - case VARIANT_0: - case VARIANT_XHV: - case VARIANT_XAO: - return VARIANT_0; - - case VARIANT_1: - case VARIANT_TUBE: - case VARIANT_XTL: - case VARIANT_MSR: - case VARIANT_RTO: - return VARIANT_1; - - case VARIANT_GPU: - return VARIANT_GPU; - - default: - break; - } - - return VARIANT_2; -} - - -template inline constexpr bool cn_is_cryptonight_r() { return false; } -template<> inline constexpr bool cn_is_cryptonight_r() { return true; } -template<> inline constexpr bool cn_is_cryptonight_r() { return true; } - -} /* namespace xmrig */ - - -#endif /* XMRIG_CRYPTONIGHT_CONSTANTS_H */ diff --git a/src/crypto/cn/CryptoNight_monero.h b/src/crypto/cn/CryptoNight_monero.h index 94a18c45..259cb3b6 100644 --- a/src/crypto/cn/CryptoNight_monero.h +++ b/src/crypto/cn/CryptoNight_monero.h @@ -33,21 +33,21 @@ #ifndef XMRIG_ARM # define VARIANT1_INIT(part) \ uint64_t tweak1_2_##part = 0; \ - if (BASE == xmrig::VARIANT_1) { \ + if (BASE == Algorithm::CN_1) { \ tweak1_2_##part = (*reinterpret_cast(input + 35 + part * size) ^ \ *(reinterpret_cast(ctx[part]->state) + 24)); \ } #else # define VARIANT1_INIT(part) \ uint64_t tweak1_2_##part = 0; \ - if (BASE == xmrig::VARIANT_1) { \ + if (BASE == Algorithm::CN_1) { \ memcpy(&tweak1_2_##part, input + 35 + part * size, sizeof tweak1_2_##part); \ tweak1_2_##part ^= *(reinterpret_cast(ctx[part]->state) + 24); \ } #endif #define VARIANT1_1(p) \ - if (BASE == xmrig::VARIANT_1) { \ + if (BASE == Algorithm::CN_1) { \ const uint8_t tmp = reinterpret_cast(p)[11]; \ static const uint32_t table = 0x75310; \ const uint8_t index = (((tmp >> 3) & 6) | (tmp & 1)) << 1; \ @@ -55,20 +55,20 @@ } #define VARIANT1_2(p, part) \ - if (BASE == xmrig::VARIANT_1) { \ + if (BASE == Algorithm::CN_1) { \ (p) ^= tweak1_2_##part; \ } #ifndef XMRIG_ARM # define VARIANT2_INIT(part) \ - __m128i division_result_xmm_##part = _mm_cvtsi64_si128(h##part[12]); \ - __m128i sqrt_result_xmm_##part = _mm_cvtsi64_si128(h##part[13]); + __m128i division_result_xmm_##part = _mm_cvtsi64_si128(static_cast(h##part[12])); \ + __m128i sqrt_result_xmm_##part = _mm_cvtsi64_si128(static_cast(h##part[13])); #ifdef _MSC_VER -# define VARIANT2_SET_ROUNDING_MODE() if (BASE == xmrig::VARIANT_2) { _control87(RC_DOWN, MCW_RC); } +# define VARIANT2_SET_ROUNDING_MODE() if (BASE == Algorithm::CN_2) { _control87(RC_DOWN, MCW_RC); } #else -# define VARIANT2_SET_ROUNDING_MODE() if (BASE == xmrig::VARIANT_2) { fesetround(FE_DOWNWARD); } +# define VARIANT2_SET_ROUNDING_MODE() if (BASE == Algorithm::CN_2) { fesetround(FE_DOWNWARD); } #endif # define VARIANT2_INTEGER_MATH(part, cl, cx) \ @@ -91,7 +91,7 @@ _mm_store_si128((__m128i *)((base_ptr) + ((offset) ^ 0x10)), _mm_add_epi64(chunk3, _b1)); \ _mm_store_si128((__m128i *)((base_ptr) + ((offset) ^ 0x20)), _mm_add_epi64(chunk1, _b)); \ _mm_store_si128((__m128i *)((base_ptr) + ((offset) ^ 0x30)), _mm_add_epi64(chunk2, _a)); \ - if (VARIANT == xmrig::VARIANT_4) { \ + if (ALGO == Algorithm::CN_R) { \ _c = _mm_xor_si128(_mm_xor_si128(_c, chunk3), _mm_xor_si128(chunk1, chunk2)); \ } \ } while (0) @@ -141,7 +141,7 @@ vst1q_u64((uint64_t*)((base_ptr) + ((offset) ^ 0x10)), vaddq_u64(chunk3, vreinterpretq_u64_u8(_b1))); \ vst1q_u64((uint64_t*)((base_ptr) + ((offset) ^ 0x20)), vaddq_u64(chunk1, vreinterpretq_u64_u8(_b))); \ vst1q_u64((uint64_t*)((base_ptr) + ((offset) ^ 0x30)), vaddq_u64(chunk2, vreinterpretq_u64_u8(_a))); \ - if (VARIANT == xmrig::VARIANT_4) { \ + if (ALGO == Algorithm::CN_4) { \ _c = veorq_u64(veorq_u64(_c, chunk3), veorq_u64(chunk1, chunk2)); \ } \ } while (0) @@ -184,17 +184,17 @@ #define VARIANT4_RANDOM_MATH_INIT(part) \ uint32_t r##part[9]; \ struct V4_Instruction code##part[256]; \ - if ((VARIANT == xmrig::VARIANT_WOW) || (VARIANT == xmrig::VARIANT_4)) { \ - r##part[0] = (uint32_t)(h##part[12]); \ - r##part[1] = (uint32_t)(h##part[12] >> 32); \ - r##part[2] = (uint32_t)(h##part[13]); \ - r##part[3] = (uint32_t)(h##part[13] >> 32); \ + if (props.isR()) { \ + r##part[0] = static_cast(h##part[12]); \ + r##part[1] = static_cast(h##part[12] >> 32); \ + r##part[2] = static_cast(h##part[13]); \ + r##part[3] = static_cast(h##part[13] >> 32); \ } \ - v4_random_math_init(code##part, height); + v4_random_math_init(code##part, height); #define VARIANT4_RANDOM_MATH(part, al, ah, cl, bx0, bx1) \ - if ((VARIANT == xmrig::VARIANT_WOW) || (VARIANT == xmrig::VARIANT_4)) { \ - cl ^= (r##part[0] + r##part[1]) | ((uint64_t)(r##part[2] + r##part[3]) << 32); \ + if (props.isR()) { \ + cl ^= (r##part[0] + r##part[1]) | (static_cast(r##part[2] + r##part[3]) << 32); \ r##part[4] = static_cast(al); \ r##part[5] = static_cast(ah); \ r##part[6] = static_cast(_mm_cvtsi128_si32(bx0)); \ diff --git a/src/crypto/cn/CryptoNight_test.h b/src/crypto/cn/CryptoNight_test.h index 2429fc17..77cbbb8e 100644 --- a/src/crypto/cn/CryptoNight_test.h +++ b/src/crypto/cn/CryptoNight_test.h @@ -156,21 +156,6 @@ const static uint8_t test_output_v2[160] = { }; -// "cn/xtl" Stellite (XTL) -const static uint8_t test_output_xtl[160] = { - 0x8F, 0xE5, 0xF0, 0x5F, 0x02, 0x2A, 0x61, 0x7D, 0xE5, 0x3F, 0x79, 0x36, 0x4B, 0x25, 0xCB, 0xC3, - 0xC0, 0x8E, 0x0E, 0x1F, 0xE3, 0xBE, 0x48, 0x57, 0x07, 0x03, 0xFE, 0xE1, 0xEC, 0x0E, 0xB0, 0xB1, - 0x21, 0x26, 0xFF, 0x98, 0xE6, 0x86, 0x08, 0x5B, 0xC9, 0x96, 0x44, 0xA3, 0xB8, 0x4E, 0x28, 0x90, - 0x76, 0xED, 0xAD, 0xB9, 0xAA, 0xAC, 0x01, 0x94, 0x1D, 0xBE, 0x3E, 0xEA, 0xAD, 0xEE, 0xB2, 0xCF, - 0xB0, 0x43, 0x4B, 0x88, 0xFC, 0xB2, 0xF3, 0x82, 0x9D, 0xD7, 0xDF, 0x51, 0x97, 0x2C, 0x5A, 0xE3, - 0xC7, 0x16, 0x0B, 0xC8, 0x7C, 0xB7, 0x2F, 0x1C, 0x55, 0x33, 0xCA, 0xE1, 0xEE, 0x08, 0xA4, 0x86, - 0x60, 0xED, 0x6E, 0x9D, 0x2D, 0x05, 0x0D, 0x7D, 0x02, 0x49, 0x23, 0x39, 0x7C, 0xC3, 0x6D, 0x3D, - 0x05, 0x51, 0x28, 0xF1, 0x9B, 0x3C, 0xDF, 0xC4, 0xEA, 0x8A, 0xA6, 0x6A, 0x3C, 0x8B, 0xE2, 0xAF, - 0x47, 0x00, 0xFC, 0x36, 0xED, 0x50, 0xBB, 0xD2, 0x2E, 0x63, 0x4B, 0x93, 0x11, 0x0C, 0xA7, 0xBA, - 0x32, 0x6E, 0x47, 0x4D, 0xCE, 0xCC, 0x82, 0x54, 0x1D, 0x06, 0xF8, 0x06, 0x86, 0xBD, 0x22, 0x48 -}; - - // "cn/half" const static uint8_t test_output_half[160] = { 0x5D, 0x4F, 0xBC, 0x35, 0x60, 0x97, 0xEA, 0x64, 0x40, 0xB0, 0x88, 0x8E, 0xDE, 0xB6, 0x35, 0xDD, diff --git a/src/crypto/cn/CryptoNight_x86.h b/src/crypto/cn/CryptoNight_x86.h index 8d6792d2..994ee116 100644 --- a/src/crypto/cn/CryptoNight_x86.h +++ b/src/crypto/cn/CryptoNight_x86.h @@ -37,9 +37,9 @@ #include "common/cpu/Cpu.h" #include "common/crypto/keccak.h" -#include "crypto/cn/CryptoNight.h" -#include "crypto/cn/CryptoNight_constants.h" +#include "crypto/cn/CnAlgo.h" #include "crypto/cn/CryptoNight_monero.h" +#include "crypto/cn/CryptoNight.h" #include "crypto/cn/soft_aes.h" @@ -303,9 +303,14 @@ inline void mix_and_propagate(__m128i& x0, __m128i& x1, __m128i& x2, __m128i& x3 } -template +namespace xmrig { + + +template static inline void cn_explode_scratchpad(const __m128i *input, __m128i *output) { + constexpr CnAlgo props; + __m128i xin0, xin1, xin2, xin3, xin4, xin5, xin6, xin7; __m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9; @@ -320,7 +325,7 @@ static inline void cn_explode_scratchpad(const __m128i *input, __m128i *output) xin6 = _mm_load_si128(input + 10); xin7 = _mm_load_si128(input + 11); - if (ALGO == xmrig::CRYPTONIGHT_HEAVY) { + if (props.isHeavy()) { for (size_t i = 0; i < 16; i++) { aes_round(k0, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); aes_round(k1, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); @@ -337,7 +342,7 @@ static inline void cn_explode_scratchpad(const __m128i *input, __m128i *output) } } - for (size_t i = 0; i < MEM / sizeof(__m128i); i += 8) { + for (size_t i = 0; i < props.memory() / sizeof(__m128i); i += 8) { aes_round(k0, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); aes_round(k1, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); aes_round(k2, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); @@ -361,37 +366,17 @@ static inline void cn_explode_scratchpad(const __m128i *input, __m128i *output) } -#ifdef XMRIG_ALGO_CN_GPU -template -void cn_explode_scratchpad_gpu(const uint8_t *input, uint8_t *output) -{ - constexpr size_t hash_size = 200; // 25x8 bytes - alignas(16) uint64_t hash[25]; - - for (uint64_t i = 0; i < MEM / 512; i++) - { - memcpy(hash, input, hash_size); - hash[0] ^= i; - - xmrig::keccakf(hash, 24); - memcpy(output, hash, 160); - output += 160; - - xmrig::keccakf(hash, 24); - memcpy(output, hash, 176); - output += 176; - - xmrig::keccakf(hash, 24); - memcpy(output, hash, 176); - output += 176; - } -} -#endif - - -template +template static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output) { + constexpr CnAlgo props; + +# ifdef XMRIG_ALGO_CN_GPU + constexpr bool IS_HEAVY = props.isHeavy() || ALGO == Algorithm::CN_GPU; +# else + constexpr bool IS_HEAVY = props.isHeavy(); +# endif + __m128i xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7; __m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9; @@ -406,8 +391,7 @@ static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output) xout6 = _mm_load_si128(output + 10); xout7 = _mm_load_si128(output + 11); - for (size_t i = 0; i < MEM / sizeof(__m128i); i += 8) - { + for (size_t i = 0; i < props.memory() / sizeof(__m128i); i += 8) { xout0 = _mm_xor_si128(_mm_load_si128(input + i + 0), xout0); xout1 = _mm_xor_si128(_mm_load_si128(input + i + 1), xout1); xout2 = _mm_xor_si128(_mm_load_si128(input + i + 2), xout2); @@ -428,13 +412,13 @@ static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output) aes_round(k8, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); aes_round(k9, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - if (ALGO == xmrig::CRYPTONIGHT_HEAVY) { + if (IS_HEAVY) { mix_and_propagate(xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7); } } - if (ALGO == xmrig::CRYPTONIGHT_HEAVY) { - for (size_t i = 0; i < MEM / sizeof(__m128i); i += 8) { + if (IS_HEAVY) { + for (size_t i = 0; i < props.memory() / sizeof(__m128i); i += 8) { xout0 = _mm_xor_si128(_mm_load_si128(input + i + 0), xout0); xout1 = _mm_xor_si128(_mm_load_si128(input + i + 1), xout1); xout2 = _mm_xor_si128(_mm_load_si128(input + i + 2), xout2); @@ -485,6 +469,9 @@ static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output) } +} /* namespace xmrig */ + + static inline __m128i aes_round_tweak_div(const __m128i &in, const __m128i &key) { alignas(16) uint32_t k[4]; @@ -527,12 +514,21 @@ static inline __m128i int_sqrt_v2(const uint64_t n0) } -template -static inline void cryptonight_monero_tweak(uint64_t* mem_out, const uint8_t* l, uint64_t idx, __m128i ax0, __m128i bx0, __m128i bx1, __m128i& cx) +void wow_soft_aes_compile_code(const V4_Instruction *code, int code_size, void *machine_code, xmrig::Assembly ASM); +void v4_soft_aes_compile_code(const V4_Instruction *code, int code_size, void *machine_code, xmrig::Assembly ASM); + + +namespace xmrig { + + +template +static inline void cryptonight_monero_tweak(uint64_t *mem_out, const uint8_t *l, uint64_t idx, __m128i ax0, __m128i bx0, __m128i bx1, __m128i& cx) { - if (BASE == xmrig::VARIANT_2) { - VARIANT2_SHUFFLE(l, idx, ax0, bx0, bx1, cx, (VARIANT == xmrig::VARIANT_RWZ ? 1 : 0)); - _mm_store_si128((__m128i *)mem_out, _mm_xor_si128(bx0, cx)); + constexpr CnAlgo props; + + if (props.base() == Algorithm::CN_2) { + VARIANT2_SHUFFLE(l, idx, ax0, bx0, bx1, cx, (ALGO == Algorithm::CN_RWZ ? 1 : 0)); + _mm_store_si128(reinterpret_cast<__m128i *>(mem_out), _mm_xor_si128(bx0, cx)); } else { __m128i tmp = _mm_xor_si128(bx0, cx); mem_out[0] = _mm_cvtsi128_si64(tmp); @@ -542,107 +538,105 @@ static inline void cryptonight_monero_tweak(uint64_t* mem_out, const uint8_t* l, uint8_t x = static_cast(vh >> 24); static const uint16_t table = 0x7531; - const uint8_t index = (((x >> (VARIANT == xmrig::VARIANT_XTL ? 4 : 3)) & 6) | (x & 1)) << 1; + const uint8_t index = (((x >> (3)) & 6) | (x & 1)) << 1; vh ^= ((table >> index) & 0x3) << 28; mem_out[1] = vh; } } -void wow_soft_aes_compile_code(const V4_Instruction* code, int code_size, void* machine_code, xmrig::Assembly ASM); -void v4_soft_aes_compile_code(const V4_Instruction* code, int code_size, void* machine_code, xmrig::Assembly ASM); -template +template inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t height) { - constexpr size_t MASK = xmrig::cn_select_mask(); - constexpr size_t ITERATIONS = xmrig::cn_select_iter(); - constexpr size_t MEM = xmrig::cn_select_memory(); - constexpr xmrig::Variant BASE = xmrig::cn_base_variant(); + constexpr CnAlgo props; + constexpr size_t MASK = props.mask(); + constexpr Algorithm::Id BASE = props.base(); - static_assert(MASK > 0 && ITERATIONS > 0 && MEM > 0, "unsupported algorithm/variant"); +# ifdef XMRIG_ALGO_CN_HEAVY + constexpr bool IS_CN_HEAVY_TUBE = ALGO == Algorithm::CN_HEAVY_TUBE; +# else + constexpr bool IS_CN_HEAVY_TUBE = false; +# endif - if (BASE == xmrig::VARIANT_1 && size < 43) { + if (BASE == Algorithm::CN_1 && size < 43) { memset(output, 0, 32); return; } - xmrig::keccak(input, size, ctx[0]->state); + keccak(input, size, ctx[0]->state); + cn_explode_scratchpad(reinterpret_cast(ctx[0]->state), reinterpret_cast<__m128i *>(ctx[0]->memory)); - cn_explode_scratchpad((__m128i*) ctx[0]->state, (__m128i*) ctx[0]->memory); + uint64_t *h0 = reinterpret_cast(ctx[0]->state); + uint8_t *l0 = ctx[0]->memory; - uint64_t* h0 = reinterpret_cast(ctx[0]->state); - -#ifndef XMRIG_NO_ASM - if (SOFT_AES && xmrig::cn_is_cryptonight_r()) - { - if (!ctx[0]->generated_code_data.match(VARIANT, height)) { +# ifdef XMRIG_FEATURE_ASM + if (SOFT_AES && props.isR()) { + if (!ctx[0]->generated_code_data.match(ALGO, height)) { V4_Instruction code[256]; - const int code_size = v4_random_math_init(code, height); + const int code_size = v4_random_math_init(code, height); - if (VARIANT == xmrig::VARIANT_WOW) - wow_soft_aes_compile_code(code, code_size, reinterpret_cast(ctx[0]->generated_code), xmrig::ASM_NONE); - else if (VARIANT == xmrig::VARIANT_4) - v4_soft_aes_compile_code(code, code_size, reinterpret_cast(ctx[0]->generated_code), xmrig::ASM_NONE); + if (ALGO == Algorithm::CN_WOW) { + wow_soft_aes_compile_code(code, code_size, reinterpret_cast(ctx[0]->generated_code), ASM_NONE); + } + else if (ALGO == Algorithm::CN_R) { + v4_soft_aes_compile_code(code, code_size, reinterpret_cast(ctx[0]->generated_code), ASM_NONE); + } - ctx[0]->generated_code_data.variant = VARIANT; - ctx[0]->generated_code_data.height = height; + ctx[0]->generated_code_data = { ALGO, height }; } - ctx[0]->saes_table = (const uint32_t*)saes_table; + ctx[0]->saes_table = reinterpret_cast(saes_table); ctx[0]->generated_code(ctx); } else { -#endif - - const uint8_t* l0 = ctx[0]->memory; +# endif VARIANT1_INIT(0); VARIANT2_INIT(0); VARIANT2_SET_ROUNDING_MODE(); VARIANT4_RANDOM_MATH_INIT(0); - uint64_t al0 = h0[0] ^ h0[4]; - uint64_t ah0 = h0[1] ^ h0[5]; - __m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]); - __m128i bx1 = _mm_set_epi64x(h0[9] ^ h0[11], h0[8] ^ h0[10]); - + uint64_t al0 = h0[0] ^ h0[4]; + uint64_t ah0 = h0[1] ^ h0[5]; uint64_t idx0 = al0; + __m128i bx0 = _mm_set_epi64x(static_cast(h0[3] ^ h0[7]), static_cast(h0[2] ^ h0[6])); + __m128i bx1 = _mm_set_epi64x(static_cast(h0[9] ^ h0[11]), static_cast(h0[8] ^ h0[10])); - for (size_t i = 0; i < ITERATIONS; i++) { + for (size_t i = 0; i < props.iterations(); i++) { __m128i cx; - if (VARIANT == xmrig::VARIANT_TUBE || !SOFT_AES) { - cx = _mm_load_si128((__m128i *) &l0[idx0 & MASK]); + if (IS_CN_HEAVY_TUBE || !SOFT_AES) { + cx = _mm_load_si128(reinterpret_cast(&l0[idx0 & MASK])); } - const __m128i ax0 = _mm_set_epi64x(ah0, al0); - if (VARIANT == xmrig::VARIANT_TUBE) { + const __m128i ax0 = _mm_set_epi64x(static_cast(ah0), static_cast(al0)); + if (IS_CN_HEAVY_TUBE) { cx = aes_round_tweak_div(cx, ax0); } else if (SOFT_AES) { - cx = soft_aesenc((uint32_t*)&l0[idx0 & MASK], ax0, (const uint32_t*)saes_table); + cx = soft_aesenc(&l0[idx0 & MASK], ax0, reinterpret_cast(saes_table)); } else { cx = _mm_aesenc_si128(cx, ax0); } - if (BASE == xmrig::VARIANT_1 || BASE == xmrig::VARIANT_2) { - cryptonight_monero_tweak((uint64_t*)&l0[idx0 & MASK], l0, idx0 & MASK, ax0, bx0, bx1, cx); + if (BASE == Algorithm::CN_1 || BASE == Algorithm::CN_2) { + cryptonight_monero_tweak(reinterpret_cast(&l0[idx0 & MASK]), l0, idx0 & MASK, ax0, bx0, bx1, cx); } else { - _mm_store_si128((__m128i *)&l0[idx0 & MASK], _mm_xor_si128(bx0, cx)); + _mm_store_si128(reinterpret_cast<__m128i *>(&l0[idx0 & MASK]), _mm_xor_si128(bx0, cx)); } - idx0 = _mm_cvtsi128_si64(cx); + idx0 = static_cast(_mm_cvtsi128_si64(cx)); uint64_t hi, lo, cl, ch; - cl = ((uint64_t*) &l0[idx0 & MASK])[0]; - ch = ((uint64_t*) &l0[idx0 & MASK])[1]; + cl = (reinterpret_cast(&l0[idx0 & MASK]))[0]; + ch = (reinterpret_cast(&l0[idx0 & MASK]))[1]; - if (BASE == xmrig::VARIANT_2) { - if ((VARIANT == xmrig::VARIANT_WOW) || (VARIANT == xmrig::VARIANT_4)) { + if (BASE == Algorithm::CN_2) { + if (props.isR()) { VARIANT4_RANDOM_MATH(0, al0, ah0, cl, bx0, bx1); - if (VARIANT == xmrig::VARIANT_4) { - al0 ^= r0[2] | ((uint64_t)(r0[3]) << 32); - ah0 ^= r0[0] | ((uint64_t)(r0[1]) << 32); + if (ALGO == Algorithm::CN_R) { + al0 ^= r0[2] | (static_cast(r0[3]) << 32); + ah0 ^= r0[0] | (static_cast(r0[1]) << 32); } } else { VARIANT2_INTEGER_MATH(0, cl, cx); @@ -651,63 +645,67 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si lo = __umul128(idx0, cl, &hi); - if (BASE == xmrig::VARIANT_2) { - if (VARIANT == xmrig::VARIANT_4) { + if (BASE == Algorithm::CN_2) { + if (ALGO == Algorithm::CN_R) { VARIANT2_SHUFFLE(l0, idx0 & MASK, ax0, bx0, bx1, cx, 0); } else { - VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx0, bx1, hi, lo, (VARIANT == xmrig::VARIANT_RWZ ? 1 : 0)); + VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx0, bx1, hi, lo, (ALGO == Algorithm::CN_RWZ ? 1 : 0)); } } al0 += hi; ah0 += lo; - ((uint64_t*)&l0[idx0 & MASK])[0] = al0; + reinterpret_cast(&l0[idx0 & MASK])[0] = al0; - if (BASE == xmrig::VARIANT_1 && (VARIANT == xmrig::VARIANT_TUBE || VARIANT == xmrig::VARIANT_RTO)) { - ((uint64_t*)&l0[idx0 & MASK])[1] = ah0 ^ tweak1_2_0 ^ al0; - } else if (BASE == xmrig::VARIANT_1) { - ((uint64_t*)&l0[idx0 & MASK])[1] = ah0 ^ tweak1_2_0; + if (IS_CN_HEAVY_TUBE || ALGO == Algorithm::CN_RTO) { + reinterpret_cast(&l0[idx0 & MASK])[1] = ah0 ^ tweak1_2_0 ^ al0; + } else if (BASE == Algorithm::CN_1) { + reinterpret_cast(&l0[idx0 & MASK])[1] = ah0 ^ tweak1_2_0; } else { - ((uint64_t*)&l0[idx0 & MASK])[1] = ah0; + reinterpret_cast(&l0[idx0 & MASK])[1] = ah0; } al0 ^= cl; ah0 ^= ch; idx0 = al0; - if (ALGO == xmrig::CRYPTONIGHT_HEAVY) { +# ifdef XMRIG_ALGO_CN_HEAVY + if (props.isHeavy()) { int64_t n = ((int64_t*)&l0[idx0 & MASK])[0]; int32_t d = ((int32_t*)&l0[idx0 & MASK])[2]; int64_t q = n / (d | 0x5); ((int64_t*)&l0[idx0 & MASK])[0] = n ^ q; - if (VARIANT == xmrig::VARIANT_XHV) { + if (ALGO == Algorithm::CN_HEAVY_XHV) { d = ~d; } idx0 = d ^ q; } +# endif - if (BASE == xmrig::VARIANT_2) { + if (BASE == Algorithm::CN_2) { bx1 = bx0; } bx0 = cx; } -#ifndef XMRIG_NO_ASM +# ifdef XMRIG_FEATURE_ASM } -#endif - - cn_implode_scratchpad((__m128i*) ctx[0]->memory, (__m128i*) ctx[0]->state); +# endif - xmrig::keccakf(h0, 24); + cn_implode_scratchpad(reinterpret_cast(ctx[0]->memory), reinterpret_cast<__m128i *>(ctx[0]->state)); + keccakf(h0, 24); extra_hashes[ctx[0]->state[0] & 3](ctx[0]->state, 200, output); } +} /* namespace xmrig */ + + #ifdef XMRIG_ALGO_CN_GPU template void cn_gpu_inner_avx(const uint8_t *spad, uint8_t *lpad); @@ -717,17 +715,41 @@ template void cn_gpu_inner_ssse3(const uint8_t *spad, uint8_t *lpad); -template -inline void cryptonight_single_hash_gpu(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t height) +namespace xmrig { + + +template +void cn_explode_scratchpad_gpu(const uint8_t *input, uint8_t *output) { - constexpr size_t MASK = xmrig::CRYPTONIGHT_GPU_MASK; - constexpr size_t ITERATIONS = xmrig::cn_select_iter(); - constexpr size_t MEM = xmrig::cn_select_memory(); + constexpr size_t hash_size = 200; // 25x8 bytes + alignas(16) uint64_t hash[25]; - static_assert(MASK > 0 && ITERATIONS > 0 && MEM > 0, "unsupported algorithm/variant"); + for (uint64_t i = 0; i < MEM / 512; i++) { + memcpy(hash, input, hash_size); + hash[0] ^= i; - xmrig::keccak(input, size, ctx[0]->state); - cn_explode_scratchpad_gpu(ctx[0]->state, ctx[0]->memory); + xmrig::keccakf(hash, 24); + memcpy(output, hash, 160); + output += 160; + + xmrig::keccakf(hash, 24); + memcpy(output, hash, 176); + output += 176; + + xmrig::keccakf(hash, 24); + memcpy(output, hash, 176); + output += 176; + } +} + + +template +inline void cryptonight_single_hash_gpu(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t) +{ + constexpr CnAlgo props; + + keccak(input, size, ctx[0]->state); + cn_explode_scratchpad_gpu(ctx[0]->state, ctx[0]->memory); # ifdef _MSC_VER _control87(RC_NEAR, MCW_RC); @@ -736,20 +758,22 @@ inline void cryptonight_single_hash_gpu(const uint8_t *__restrict__ input, size_ # endif if (xmrig::Cpu::info()->hasAVX2()) { - cn_gpu_inner_avx(ctx[0]->state, ctx[0]->memory); + cn_gpu_inner_avx(ctx[0]->state, ctx[0]->memory); } else { - cn_gpu_inner_ssse3(ctx[0]->state, ctx[0]->memory); + cn_gpu_inner_ssse3(ctx[0]->state, ctx[0]->memory); } - cn_implode_scratchpad((__m128i*) ctx[0]->memory, (__m128i*) ctx[0]->state); - - xmrig::keccakf((uint64_t*) ctx[0]->state, 24); + cn_implode_scratchpad(reinterpret_cast(ctx[0]->memory), reinterpret_cast<__m128i *>(ctx[0]->state)); + keccakf(reinterpret_cast(ctx[0]->state), 24); memcpy(output, ctx[0]->state, 32); } + + +} /* namespace xmrig */ #endif -#ifndef XMRIG_NO_ASM +#ifdef XMRIG_FEATURE_ASM extern "C" void cnv2_mainloop_ivybridge_asm(cryptonight_ctx **ctx); extern "C" void cnv2_mainloop_ryzen_asm(cryptonight_ctx **ctx); extern "C" void cnv2_mainloop_bulldozer_asm(cryptonight_ctx **ctx); @@ -757,212 +781,243 @@ extern "C" void cnv2_double_mainloop_sandybridge_asm(cryptonight_ctx **ctx); extern "C" void cnv2_rwz_mainloop_asm(cryptonight_ctx **ctx); extern "C" void cnv2_rwz_double_mainloop_asm(cryptonight_ctx **ctx); -extern xmrig::CpuThread::cn_mainloop_fun cn_half_mainloop_ivybridge_asm; -extern xmrig::CpuThread::cn_mainloop_fun cn_half_mainloop_ryzen_asm; -extern xmrig::CpuThread::cn_mainloop_fun cn_half_mainloop_bulldozer_asm; -extern xmrig::CpuThread::cn_mainloop_fun cn_half_double_mainloop_sandybridge_asm; -extern xmrig::CpuThread::cn_mainloop_fun cn_trtl_mainloop_ivybridge_asm; -extern xmrig::CpuThread::cn_mainloop_fun cn_trtl_mainloop_ryzen_asm; -extern xmrig::CpuThread::cn_mainloop_fun cn_trtl_mainloop_bulldozer_asm; -extern xmrig::CpuThread::cn_mainloop_fun cn_trtl_double_mainloop_sandybridge_asm; +namespace xmrig { -extern xmrig::CpuThread::cn_mainloop_fun cn_zls_mainloop_ivybridge_asm; -extern xmrig::CpuThread::cn_mainloop_fun cn_zls_mainloop_ryzen_asm; -extern xmrig::CpuThread::cn_mainloop_fun cn_zls_mainloop_bulldozer_asm; -extern xmrig::CpuThread::cn_mainloop_fun cn_zls_double_mainloop_sandybridge_asm; -extern xmrig::CpuThread::cn_mainloop_fun cn_double_mainloop_ivybridge_asm; -extern xmrig::CpuThread::cn_mainloop_fun cn_double_mainloop_ryzen_asm; -extern xmrig::CpuThread::cn_mainloop_fun cn_double_mainloop_bulldozer_asm; -extern xmrig::CpuThread::cn_mainloop_fun cn_double_double_mainloop_sandybridge_asm; +extern cn_mainloop_fun cn_half_mainloop_ivybridge_asm; +extern cn_mainloop_fun cn_half_mainloop_ryzen_asm; +extern cn_mainloop_fun cn_half_mainloop_bulldozer_asm; +extern cn_mainloop_fun cn_half_double_mainloop_sandybridge_asm; + +extern cn_mainloop_fun cn_trtl_mainloop_ivybridge_asm; +extern cn_mainloop_fun cn_trtl_mainloop_ryzen_asm; +extern cn_mainloop_fun cn_trtl_mainloop_bulldozer_asm; +extern cn_mainloop_fun cn_trtl_double_mainloop_sandybridge_asm; + +extern cn_mainloop_fun cn_zls_mainloop_ivybridge_asm; +extern cn_mainloop_fun cn_zls_mainloop_ryzen_asm; +extern cn_mainloop_fun cn_zls_mainloop_bulldozer_asm; +extern cn_mainloop_fun cn_zls_double_mainloop_sandybridge_asm; + +extern cn_mainloop_fun cn_double_mainloop_ivybridge_asm; +extern cn_mainloop_fun cn_double_mainloop_ryzen_asm; +extern cn_mainloop_fun cn_double_mainloop_bulldozer_asm; +extern cn_mainloop_fun cn_double_double_mainloop_sandybridge_asm; + + +} // namespace xmrig + void wow_compile_code(const V4_Instruction* code, int code_size, void* machine_code, xmrig::Assembly ASM); void v4_compile_code(const V4_Instruction* code, int code_size, void* machine_code, xmrig::Assembly ASM); void wow_compile_code_double(const V4_Instruction* code, int code_size, void* machine_code, xmrig::Assembly ASM); void v4_compile_code_double(const V4_Instruction* code, int code_size, void* machine_code, xmrig::Assembly ASM); -template + +template void cn_r_compile_code(const V4_Instruction* code, int code_size, void* machine_code, xmrig::Assembly ASM) { v4_compile_code(code, code_size, machine_code, ASM); } -template + +template void cn_r_compile_code_double(const V4_Instruction* code, int code_size, void* machine_code, xmrig::Assembly ASM) { v4_compile_code_double(code, code_size, machine_code, ASM); } + template<> -void cn_r_compile_code(const V4_Instruction* code, int code_size, void* machine_code, xmrig::Assembly ASM) +void cn_r_compile_code(const V4_Instruction* code, int code_size, void* machine_code, xmrig::Assembly ASM) { wow_compile_code(code, code_size, machine_code, ASM); } + template<> -void cn_r_compile_code_double(const V4_Instruction* code, int code_size, void* machine_code, xmrig::Assembly ASM) +void cn_r_compile_code_double(const V4_Instruction* code, int code_size, void* machine_code, xmrig::Assembly ASM) { wow_compile_code_double(code, code_size, machine_code, ASM); } -template + +namespace xmrig { + + +template inline void cryptonight_single_hash_asm(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t height) { - constexpr size_t MEM = xmrig::cn_select_memory(); + constexpr CnAlgo props; - if (xmrig::cn_is_cryptonight_r() && !ctx[0]->generated_code_data.match(VARIANT, height)) { + if (props.isR() && !ctx[0]->generated_code_data.match(ALGO, height)) { V4_Instruction code[256]; - const int code_size = v4_random_math_init(code, height); - cn_r_compile_code(code, code_size, reinterpret_cast(ctx[0]->generated_code), ASM); - ctx[0]->generated_code_data.variant = VARIANT; - ctx[0]->generated_code_data.height = height; + const int code_size = v4_random_math_init(code, height); + cn_r_compile_code(code, code_size, reinterpret_cast(ctx[0]->generated_code), ASM); + + ctx[0]->generated_code_data = { ALGO, height }; } - xmrig::keccak(input, size, ctx[0]->state); - cn_explode_scratchpad(reinterpret_cast<__m128i*>(ctx[0]->state), reinterpret_cast<__m128i*>(ctx[0]->memory)); + keccak(input, size, ctx[0]->state); + cn_explode_scratchpad(reinterpret_cast(ctx[0]->state), reinterpret_cast<__m128i*>(ctx[0]->memory)); - if (VARIANT == xmrig::VARIANT_2) { - if (ASM == xmrig::ASM_INTEL) { + if (ALGO == Algorithm::CN_2) { + if (ASM == ASM_INTEL) { cnv2_mainloop_ivybridge_asm(ctx); } - else if (ASM == xmrig::ASM_RYZEN) { + else if (ASM == ASM_RYZEN) { cnv2_mainloop_ryzen_asm(ctx); } else { cnv2_mainloop_bulldozer_asm(ctx); } } - else if (VARIANT == xmrig::VARIANT_HALF) { - if (ASM == xmrig::ASM_INTEL) { + else if (ALGO == Algorithm::CN_HALF) { + if (ASM == ASM_INTEL) { cn_half_mainloop_ivybridge_asm(ctx); } - else if (ASM == xmrig::ASM_RYZEN) { + else if (ASM == ASM_RYZEN) { cn_half_mainloop_ryzen_asm(ctx); } else { cn_half_mainloop_bulldozer_asm(ctx); } } - else if (VARIANT == xmrig::VARIANT_TRTL) { - if (ASM == xmrig::ASM_INTEL) { +# ifdef XMRIG_ALGO_CN_PICO + else if (ALGO == Algorithm::CN_PICO_0) { + if (ASM == ASM_INTEL) { cn_trtl_mainloop_ivybridge_asm(ctx); } - else if (ASM == xmrig::ASM_RYZEN) { + else if (ASM == ASM_RYZEN) { cn_trtl_mainloop_ryzen_asm(ctx); } else { cn_trtl_mainloop_bulldozer_asm(ctx); } } - else if (VARIANT == xmrig::VARIANT_RWZ) { +# endif + else if (ALGO == Algorithm::CN_RWZ) { cnv2_rwz_mainloop_asm(ctx); } - else if (VARIANT == xmrig::VARIANT_ZLS) { - if (ASM == xmrig::ASM_INTEL) { + else if (ALGO == Algorithm::CN_ZLS) { + if (ASM == ASM_INTEL) { cn_zls_mainloop_ivybridge_asm(ctx); } - else if (ASM == xmrig::ASM_RYZEN) { + else if (ASM == ASM_RYZEN) { cn_zls_mainloop_ryzen_asm(ctx); } else { cn_zls_mainloop_bulldozer_asm(ctx); } } - else if (VARIANT == xmrig::VARIANT_DOUBLE) { - if (ASM == xmrig::ASM_INTEL) { + else if (ALGO == Algorithm::CN_DOUBLE) { + if (ASM == ASM_INTEL) { cn_double_mainloop_ivybridge_asm(ctx); } - else if (ASM == xmrig::ASM_RYZEN) { + else if (ASM == ASM_RYZEN) { cn_double_mainloop_ryzen_asm(ctx); } else { cn_double_mainloop_bulldozer_asm(ctx); } } - else if (xmrig::cn_is_cryptonight_r()) { + else if (props.isR()) { ctx[0]->generated_code(ctx); } - cn_implode_scratchpad(reinterpret_cast<__m128i*>(ctx[0]->memory), reinterpret_cast<__m128i*>(ctx[0]->state)); - xmrig::keccakf(reinterpret_cast(ctx[0]->state), 24); + cn_implode_scratchpad(reinterpret_cast(ctx[0]->memory), reinterpret_cast<__m128i*>(ctx[0]->state)); + keccakf(reinterpret_cast(ctx[0]->state), 24); extra_hashes[ctx[0]->state[0] & 3](ctx[0]->state, 200, output); } -template +template inline void cryptonight_double_hash_asm(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t height) { - constexpr size_t MEM = xmrig::cn_select_memory(); + constexpr CnAlgo props; - if (xmrig::cn_is_cryptonight_r() && !ctx[0]->generated_code_data.match(VARIANT, height)) { + if (props.isR() && !ctx[0]->generated_code_data.match(ALGO, height)) { V4_Instruction code[256]; - const int code_size = v4_random_math_init(code, height); - cn_r_compile_code_double(code, code_size, reinterpret_cast(ctx[0]->generated_code), ASM); - ctx[0]->generated_code_data.variant = VARIANT; - ctx[0]->generated_code_data.height = height; + const int code_size = v4_random_math_init(code, height); + cn_r_compile_code_double(code, code_size, reinterpret_cast(ctx[0]->generated_code), ASM); + + ctx[0]->generated_code_data = { ALGO, height }; } - xmrig::keccak(input, size, ctx[0]->state); - xmrig::keccak(input + size, size, ctx[1]->state); + keccak(input, size, ctx[0]->state); + keccak(input + size, size, ctx[1]->state); - cn_explode_scratchpad(reinterpret_cast<__m128i*>(ctx[0]->state), reinterpret_cast<__m128i*>(ctx[0]->memory)); - cn_explode_scratchpad(reinterpret_cast<__m128i*>(ctx[1]->state), reinterpret_cast<__m128i*>(ctx[1]->memory)); + cn_explode_scratchpad(reinterpret_cast(ctx[0]->state), reinterpret_cast<__m128i*>(ctx[0]->memory)); + cn_explode_scratchpad(reinterpret_cast(ctx[1]->state), reinterpret_cast<__m128i*>(ctx[1]->memory)); - if (VARIANT == xmrig::VARIANT_2) { + if (ALGO == Algorithm::CN_2) { cnv2_double_mainloop_sandybridge_asm(ctx); } - else if (VARIANT == xmrig::VARIANT_HALF) { + else if (ALGO == Algorithm::CN_HALF) { cn_half_double_mainloop_sandybridge_asm(ctx); } - else if (VARIANT == xmrig::VARIANT_TRTL) { +# ifdef XMRIG_ALGO_CN_PICO + else if (ALGO == Algorithm::CN_PICO_0) { cn_trtl_double_mainloop_sandybridge_asm(ctx); } - else if (VARIANT == xmrig::VARIANT_RWZ) { +# endif + else if (ALGO == Algorithm::CN_RWZ) { cnv2_rwz_double_mainloop_asm(ctx); } - else if (VARIANT == xmrig::VARIANT_ZLS) { + else if (ALGO == Algorithm::CN_ZLS) { cn_zls_double_mainloop_sandybridge_asm(ctx); } - else if (VARIANT == xmrig::VARIANT_DOUBLE) { + else if (ALGO == Algorithm::CN_DOUBLE) { cn_double_double_mainloop_sandybridge_asm(ctx); } - else if (xmrig::cn_is_cryptonight_r()) { + else if (props.isR()) { ctx[0]->generated_code(ctx); } - cn_implode_scratchpad(reinterpret_cast<__m128i*>(ctx[0]->memory), reinterpret_cast<__m128i*>(ctx[0]->state)); - cn_implode_scratchpad(reinterpret_cast<__m128i*>(ctx[1]->memory), reinterpret_cast<__m128i*>(ctx[1]->state)); + cn_implode_scratchpad(reinterpret_cast(ctx[0]->memory), reinterpret_cast<__m128i*>(ctx[0]->state)); + cn_implode_scratchpad(reinterpret_cast(ctx[1]->memory), reinterpret_cast<__m128i*>(ctx[1]->state)); - xmrig::keccakf(reinterpret_cast(ctx[0]->state), 24); - xmrig::keccakf(reinterpret_cast(ctx[1]->state), 24); + keccakf(reinterpret_cast(ctx[0]->state), 24); + keccakf(reinterpret_cast(ctx[1]->state), 24); extra_hashes[ctx[0]->state[0] & 3](ctx[0]->state, 200, output); extra_hashes[ctx[1]->state[0] & 3](ctx[1]->state, 200, output + 32); } + + +} /* namespace xmrig */ #endif -template +namespace xmrig { + + +template inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t height) { - constexpr size_t MASK = xmrig::cn_select_mask(); - constexpr size_t ITERATIONS = xmrig::cn_select_iter(); - constexpr size_t MEM = xmrig::cn_select_memory(); - constexpr xmrig::Variant BASE = xmrig::cn_base_variant(); + constexpr CnAlgo props; + constexpr size_t MASK = props.mask(); + constexpr Algorithm::Id BASE = props.base(); - if (BASE == xmrig::VARIANT_1 && size < 43) { +# ifdef XMRIG_ALGO_CN_HEAVY + constexpr bool IS_CN_HEAVY_TUBE = ALGO == Algorithm::CN_HEAVY_TUBE; +# else + constexpr bool IS_CN_HEAVY_TUBE = false; +# endif + + if (BASE == Algorithm::CN_1 && size < 43) { memset(output, 0, 64); return; } - xmrig::keccak(input, size, ctx[0]->state); - xmrig::keccak(input + size, size, ctx[1]->state); + keccak(input, size, ctx[0]->state); + keccak(input + size, size, ctx[1]->state); - const uint8_t* l0 = ctx[0]->memory; - const uint8_t* l1 = ctx[1]->memory; - uint64_t* h0 = reinterpret_cast(ctx[0]->state); - uint64_t* h1 = reinterpret_cast(ctx[1]->state); + uint8_t *l0 = ctx[0]->memory; + uint8_t *l1 = ctx[1]->memory; + uint64_t *h0 = reinterpret_cast(ctx[0]->state); + uint64_t *h1 = reinterpret_cast(ctx[1]->state); VARIANT1_INIT(0); VARIANT1_INIT(1); @@ -972,8 +1027,8 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si VARIANT4_RANDOM_MATH_INIT(0); VARIANT4_RANDOM_MATH_INIT(1); - cn_explode_scratchpad((__m128i*) h0, (__m128i*) l0); - cn_explode_scratchpad((__m128i*) h1, (__m128i*) l1); + cn_explode_scratchpad(reinterpret_cast(h0), reinterpret_cast<__m128i *>(l0)); + cn_explode_scratchpad(reinterpret_cast(h1), reinterpret_cast<__m128i *>(l1)); uint64_t al0 = h0[0] ^ h0[4]; uint64_t al1 = h1[0] ^ h1[4]; @@ -988,31 +1043,31 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si uint64_t idx0 = al0; uint64_t idx1 = al1; - for (size_t i = 0; i < ITERATIONS; i++) { + for (size_t i = 0; i < props.iterations(); i++) { __m128i cx0, cx1; - if (VARIANT == xmrig::VARIANT_TUBE || !SOFT_AES) { - cx0 = _mm_load_si128((__m128i *) &l0[idx0 & MASK]); - cx1 = _mm_load_si128((__m128i *) &l1[idx1 & MASK]); + if (IS_CN_HEAVY_TUBE || !SOFT_AES) { + cx0 = _mm_load_si128(reinterpret_cast(&l0[idx0 & MASK])); + cx1 = _mm_load_si128(reinterpret_cast(&l1[idx1 & MASK])); } const __m128i ax0 = _mm_set_epi64x(ah0, al0); const __m128i ax1 = _mm_set_epi64x(ah1, al1); - if (VARIANT == xmrig::VARIANT_TUBE) { + if (IS_CN_HEAVY_TUBE) { cx0 = aes_round_tweak_div(cx0, ax0); cx1 = aes_round_tweak_div(cx1, ax1); } else if (SOFT_AES) { - cx0 = soft_aesenc((uint32_t*)&l0[idx0 & MASK], ax0, (const uint32_t*)saes_table); - cx1 = soft_aesenc((uint32_t*)&l1[idx1 & MASK], ax1, (const uint32_t*)saes_table); + cx0 = soft_aesenc(&l0[idx0 & MASK], ax0, reinterpret_cast(saes_table)); + cx1 = soft_aesenc(&l1[idx1 & MASK], ax1, reinterpret_cast(saes_table)); } else { cx0 = _mm_aesenc_si128(cx0, ax0); cx1 = _mm_aesenc_si128(cx1, ax1); } - if (BASE == xmrig::VARIANT_1 || (BASE == xmrig::VARIANT_2)) { - cryptonight_monero_tweak((uint64_t*)&l0[idx0 & MASK], l0, idx0 & MASK, ax0, bx00, bx01, cx0); - cryptonight_monero_tweak((uint64_t*)&l1[idx1 & MASK], l1, idx1 & MASK, ax1, bx10, bx11, cx1); + if (BASE == Algorithm::CN_1 || BASE == Algorithm::CN_2) { + cryptonight_monero_tweak((uint64_t*)&l0[idx0 & MASK], l0, idx0 & MASK, ax0, bx00, bx01, cx0); + cryptonight_monero_tweak((uint64_t*)&l1[idx1 & MASK], l1, idx1 & MASK, ax1, bx10, bx11, cx1); } else { _mm_store_si128((__m128i *) &l0[idx0 & MASK], _mm_xor_si128(bx00, cx0)); _mm_store_si128((__m128i *) &l1[idx1 & MASK], _mm_xor_si128(bx10, cx1)); @@ -1025,10 +1080,10 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si cl = ((uint64_t*) &l0[idx0 & MASK])[0]; ch = ((uint64_t*) &l0[idx0 & MASK])[1]; - if (BASE == xmrig::VARIANT_2) { - if ((VARIANT == xmrig::VARIANT_WOW) || (VARIANT == xmrig::VARIANT_4)) { + if (BASE == Algorithm::CN_2) { + if (props.isR()) { VARIANT4_RANDOM_MATH(0, al0, ah0, cl, bx00, bx01); - if (VARIANT == xmrig::VARIANT_4) { + if (ALGO == Algorithm::CN_R) { al0 ^= r0[2] | ((uint64_t)(r0[3]) << 32); ah0 ^= r0[0] | ((uint64_t)(r0[1]) << 32); } @@ -1039,11 +1094,11 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si lo = __umul128(idx0, cl, &hi); - if (BASE == xmrig::VARIANT_2) { - if (VARIANT == xmrig::VARIANT_4) { + if (BASE == Algorithm::CN_2) { + if (ALGO == Algorithm::CN_R) { VARIANT2_SHUFFLE(l0, idx0 & MASK, ax0, bx00, bx01, cx0, 0); } else { - VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx00, bx01, hi, lo, (VARIANT == xmrig::VARIANT_RWZ ? 1 : 0)); + VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx00, bx01, hi, lo, (ALGO == Algorithm::CN_RWZ ? 1 : 0)); } } @@ -1052,9 +1107,9 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si ((uint64_t*)&l0[idx0 & MASK])[0] = al0; - if (BASE == xmrig::VARIANT_1 && (VARIANT == xmrig::VARIANT_TUBE || VARIANT == xmrig::VARIANT_RTO)) { + if (IS_CN_HEAVY_TUBE || ALGO == Algorithm::CN_RTO) { ((uint64_t*) &l0[idx0 & MASK])[1] = ah0 ^ tweak1_2_0 ^ al0; - } else if (BASE == xmrig::VARIANT_1) { + } else if (BASE == Algorithm::CN_1) { ((uint64_t*) &l0[idx0 & MASK])[1] = ah0 ^ tweak1_2_0; } else { ((uint64_t*) &l0[idx0 & MASK])[1] = ah0; @@ -1064,27 +1119,29 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si ah0 ^= ch; idx0 = al0; - if (ALGO == xmrig::CRYPTONIGHT_HEAVY) { +# ifdef XMRIG_ALGO_CN_HEAVY + if (props.isHeavy()) { int64_t n = ((int64_t*)&l0[idx0 & MASK])[0]; int32_t d = ((int32_t*)&l0[idx0 & MASK])[2]; int64_t q = n / (d | 0x5); ((int64_t*)&l0[idx0 & MASK])[0] = n ^ q; - if (VARIANT == xmrig::VARIANT_XHV) { + if (ALGO == Algorithm::CN_HEAVY_XHV) { d = ~d; } idx0 = d ^ q; } +# endif cl = ((uint64_t*) &l1[idx1 & MASK])[0]; ch = ((uint64_t*) &l1[idx1 & MASK])[1]; - if (BASE == xmrig::VARIANT_2) { - if ((VARIANT == xmrig::VARIANT_WOW) || (VARIANT == xmrig::VARIANT_4)) { + if (BASE == Algorithm::CN_2) { + if (props.isR()) { VARIANT4_RANDOM_MATH(1, al1, ah1, cl, bx10, bx11); - if (VARIANT == xmrig::VARIANT_4) { + if (ALGO == Algorithm::CN_R) { al1 ^= r1[2] | ((uint64_t)(r1[3]) << 32); ah1 ^= r1[0] | ((uint64_t)(r1[1]) << 32); } @@ -1095,11 +1152,11 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si lo = __umul128(idx1, cl, &hi); - if (BASE == xmrig::VARIANT_2) { - if (VARIANT == xmrig::VARIANT_4) { + if (BASE == Algorithm::CN_2) { + if (ALGO == Algorithm::CN_R) { VARIANT2_SHUFFLE(l1, idx1 & MASK, ax1, bx10, bx11, cx1, 0); } else { - VARIANT2_SHUFFLE2(l1, idx1 & MASK, ax1, bx10, bx11, hi, lo, (VARIANT == xmrig::VARIANT_RWZ ? 1 : 0)); + VARIANT2_SHUFFLE2(l1, idx1 & MASK, ax1, bx10, bx11, hi, lo, (ALGO == Algorithm::CN_RWZ ? 1 : 0)); } } @@ -1108,9 +1165,9 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si ((uint64_t*)&l1[idx1 & MASK])[0] = al1; - if (BASE == xmrig::VARIANT_1 && (VARIANT == xmrig::VARIANT_TUBE || VARIANT == xmrig::VARIANT_RTO)) { + if (IS_CN_HEAVY_TUBE || ALGO == Algorithm::CN_RTO) { ((uint64_t*)&l1[idx1 & MASK])[1] = ah1 ^ tweak1_2_1 ^ al1; - } else if (BASE == xmrig::VARIANT_1) { + } else if (BASE == Algorithm::CN_1) { ((uint64_t*)&l1[idx1 & MASK])[1] = ah1 ^ tweak1_2_1; } else { ((uint64_t*)&l1[idx1 & MASK])[1] = ah1; @@ -1120,21 +1177,23 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si ah1 ^= ch; idx1 = al1; - if (ALGO == xmrig::CRYPTONIGHT_HEAVY) { +# ifdef XMRIG_ALGO_CN_HEAVY + if (props.isHeavy()) { int64_t n = ((int64_t*)&l1[idx1 & MASK])[0]; int32_t d = ((int32_t*)&l1[idx1 & MASK])[2]; int64_t q = n / (d | 0x5); ((int64_t*)&l1[idx1 & MASK])[0] = n ^ q; - if (VARIANT == xmrig::VARIANT_XHV) { + if (ALGO == Algorithm::CN_HEAVY_XHV) { d = ~d; } idx1 = d ^ q; } +# endif - if (BASE == xmrig::VARIANT_2) { + if (BASE == Algorithm::CN_2) { bx01 = bx00; bx11 = bx10; } @@ -1143,11 +1202,11 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si bx10 = cx1; } - cn_implode_scratchpad((__m128i*) l0, (__m128i*) h0); - cn_implode_scratchpad((__m128i*) l1, (__m128i*) h1); + cn_implode_scratchpad(reinterpret_cast(l0), reinterpret_cast<__m128i *>(h0)); + cn_implode_scratchpad(reinterpret_cast(l1), reinterpret_cast<__m128i *>(h1)); - xmrig::keccakf(h0, 24); - xmrig::keccakf(h1, 24); + keccakf(h0, 24); + keccakf(h1, 24); extra_hashes[ctx[0]->state[0] & 3](ctx[0]->state, 200, output); extra_hashes[ctx[1]->state[0] & 3](ctx[1]->state, 200, output + 32); @@ -1159,20 +1218,20 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si c = _mm_load_si128(ptr); -#define CN_STEP2(a, b0, b1, c, l, ptr, idx) \ - if (VARIANT == xmrig::VARIANT_TUBE) { \ - c = aes_round_tweak_div(c, a); \ - } \ - else if (SOFT_AES) { \ - c = soft_aesenc(&c, a, (const uint32_t*)saes_table); \ - } else { \ - c = _mm_aesenc_si128(c, a); \ - } \ - \ - if (BASE == xmrig::VARIANT_1 || BASE == xmrig::VARIANT_2) { \ - cryptonight_monero_tweak((uint64_t*)ptr, l, idx & MASK, a, b0, b1, c); \ - } else { \ - _mm_store_si128(ptr, _mm_xor_si128(b0, c)); \ +#define CN_STEP2(a, b0, b1, c, l, ptr, idx) \ + if (IS_CN_HEAVY_TUBE) { \ + c = aes_round_tweak_div(c, a); \ + } \ + else if (SOFT_AES) { \ + c = soft_aesenc(&c, a, (const uint32_t*)saes_table); \ + } else { \ + c = _mm_aesenc_si128(c, a); \ + } \ + \ + if (BASE == Algorithm::CN_1 || BASE == Algorithm::CN_2) { \ + cryptonight_monero_tweak((uint64_t*)ptr, l, idx & MASK, a, b0, b1, c); \ + } else { \ + _mm_store_si128(ptr, _mm_xor_si128(b0, c)); \ } @@ -1183,62 +1242,60 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si uint64_t ch##part = ((uint64_t*)ptr)[1]; -#define CN_STEP4(part, a, b0, b1, c, l, mc, ptr, idx) \ - uint64_t al##part, ah##part; \ - if (BASE == xmrig::VARIANT_2) { \ - if ((VARIANT == xmrig::VARIANT_WOW) || (VARIANT == xmrig::VARIANT_4)) { \ - al##part = _mm_cvtsi128_si64(a); \ - ah##part = _mm_cvtsi128_si64(_mm_srli_si128(a, 8)); \ - VARIANT4_RANDOM_MATH(part, al##part, ah##part, cl##part, b0, b1); \ - if (VARIANT == xmrig::VARIANT_4) { \ - al##part ^= r##part[2] | ((uint64_t)(r##part[3]) << 32); \ - ah##part ^= r##part[0] | ((uint64_t)(r##part[1]) << 32); \ - } \ - } else { \ - VARIANT2_INTEGER_MATH(part, cl##part, c); \ - } \ - } \ - lo = __umul128(idx, cl##part, &hi); \ - if (BASE == xmrig::VARIANT_2) { \ - if (VARIANT == xmrig::VARIANT_4) { \ - VARIANT2_SHUFFLE(l, idx & MASK, a, b0, b1, c, 0); \ - } else { \ - VARIANT2_SHUFFLE2(l, idx & MASK, a, b0, b1, hi, lo, (VARIANT == xmrig::VARIANT_RWZ ? 1 : 0)); \ - } \ - } \ - if (VARIANT == xmrig::VARIANT_4) { \ - a = _mm_set_epi64x(ah##part, al##part); \ - } \ - a = _mm_add_epi64(a, _mm_set_epi64x(lo, hi)); \ - \ - if (BASE == xmrig::VARIANT_1) { \ - _mm_store_si128(ptr, _mm_xor_si128(a, mc)); \ - \ - if (VARIANT == xmrig::VARIANT_TUBE || \ - VARIANT == xmrig::VARIANT_RTO) { \ - ((uint64_t*)ptr)[1] ^= ((uint64_t*)ptr)[0]; \ - } \ - } else { \ - _mm_store_si128(ptr, a); \ - } \ - \ - a = _mm_xor_si128(a, _mm_set_epi64x(ch##part, cl##part)); \ - idx = _mm_cvtsi128_si64(a); \ - \ - if (ALGO == xmrig::CRYPTONIGHT_HEAVY) { \ - int64_t n = ((int64_t*)&l[idx & MASK])[0]; \ - int32_t d = ((int32_t*)&l[idx & MASK])[2]; \ - int64_t q = n / (d | 0x5); \ - ((int64_t*)&l[idx & MASK])[0] = n ^ q; \ - if (VARIANT == xmrig::VARIANT_XHV) { \ - d = ~d; \ - } \ - \ - idx = d ^ q; \ - } \ - if (BASE == xmrig::VARIANT_2) { \ - b1 = b0; \ - } \ +#define CN_STEP4(part, a, b0, b1, c, l, mc, ptr, idx) \ + uint64_t al##part, ah##part; \ + if (BASE == Algorithm::CN_2) { \ + if (props.isR()) { \ + al##part = _mm_cvtsi128_si64(a); \ + ah##part = _mm_cvtsi128_si64(_mm_srli_si128(a, 8)); \ + VARIANT4_RANDOM_MATH(part, al##part, ah##part, cl##part, b0, b1); \ + if (ALGO == Algorithm::CN_R) { \ + al##part ^= r##part[2] | ((uint64_t)(r##part[3]) << 32); \ + ah##part ^= r##part[0] | ((uint64_t)(r##part[1]) << 32); \ + } \ + } else { \ + VARIANT2_INTEGER_MATH(part, cl##part, c); \ + } \ + } \ + lo = __umul128(idx, cl##part, &hi); \ + if (BASE == Algorithm::CN_2) { \ + if (ALGO == Algorithm::CN_R) { \ + VARIANT2_SHUFFLE(l, idx & MASK, a, b0, b1, c, 0); \ + } else { \ + VARIANT2_SHUFFLE2(l, idx & MASK, a, b0, b1, hi, lo, (ALGO == Algorithm::CN_RWZ ? 1 : 0)); \ + } \ + } \ + if (ALGO == Algorithm::CN_R) { \ + a = _mm_set_epi64x(ah##part, al##part); \ + } \ + a = _mm_add_epi64(a, _mm_set_epi64x(lo, hi)); \ + \ + if (BASE == Algorithm::CN_1) { \ + _mm_store_si128(ptr, _mm_xor_si128(a, mc)); \ + \ + if (IS_CN_HEAVY_TUBE || ALGO == Algorithm::CN_RTO) { \ + ((uint64_t*)ptr)[1] ^= ((uint64_t*)ptr)[0]; \ + } \ + } else { \ + _mm_store_si128(ptr, a); \ + } \ + \ + a = _mm_xor_si128(a, _mm_set_epi64x(ch##part, cl##part)); \ + idx = _mm_cvtsi128_si64(a); \ + if (props.isHeavy()) { \ + int64_t n = ((int64_t*)&l[idx & MASK])[0]; \ + int32_t d = ((int32_t*)&l[idx & MASK])[2]; \ + int64_t q = n / (d | 0x5); \ + ((int64_t*)&l[idx & MASK])[0] = n ^ q; \ + if (IS_CN_HEAVY_XHV) { \ + d = ~d; \ + } \ + \ + idx = d ^ q; \ + } \ + if (BASE == Algorithm::CN_2) { \ + b1 = b0; \ + } \ b0 = c; @@ -1246,11 +1303,11 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si __m128i mc##n; \ __m128i division_result_xmm_##n; \ __m128i sqrt_result_xmm_##n; \ - if (BASE == xmrig::VARIANT_1) { \ + if (BASE == Algorithm::CN_1) { \ mc##n = _mm_set_epi64x(*reinterpret_cast(input + n * size + 35) ^ \ *(reinterpret_cast((ctx)->state) + 24), 0); \ } \ - if (BASE == xmrig::VARIANT_2) { \ + if (BASE == Algorithm::CN_2) { \ division_result_xmm_##n = _mm_cvtsi64_si128(h##n[12]); \ sqrt_result_xmm_##n = _mm_cvtsi64_si128(h##n[13]); \ } \ @@ -1261,22 +1318,29 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si VARIANT4_RANDOM_MATH_INIT(n); -template +template inline void cryptonight_triple_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t height) { - constexpr size_t MASK = xmrig::cn_select_mask(); - constexpr size_t ITERATIONS = xmrig::cn_select_iter(); - constexpr size_t MEM = xmrig::cn_select_memory(); - constexpr xmrig::Variant BASE = xmrig::cn_base_variant(); + constexpr CnAlgo props; + constexpr size_t MASK = props.mask(); + constexpr Algorithm::Id BASE = props.base(); - if (BASE == xmrig::VARIANT_1 && size < 43) { +# ifdef XMRIG_ALGO_CN_HEAVY + constexpr bool IS_CN_HEAVY_TUBE = ALGO == Algorithm::CN_HEAVY_TUBE; + constexpr bool IS_CN_HEAVY_XHV = ALGO == Algorithm::CN_HEAVY_XHV; +# else + constexpr bool IS_CN_HEAVY_TUBE = false; + constexpr bool IS_CN_HEAVY_XHV = false; +# endif + + if (BASE == Algorithm::CN_1 && size < 43) { memset(output, 0, 32 * 3); return; } for (size_t i = 0; i < 3; i++) { - xmrig::keccak(input + size * i, size, ctx[i]->state); - cn_explode_scratchpad(reinterpret_cast<__m128i*>(ctx[i]->state), reinterpret_cast<__m128i*>(ctx[i]->memory)); + keccak(input + size * i, size, ctx[i]->state); + cn_explode_scratchpad(reinterpret_cast(ctx[i]->state), reinterpret_cast<__m128i*>(ctx[i]->memory)); } uint8_t* l0 = ctx[0]->memory; @@ -1296,7 +1360,7 @@ inline void cryptonight_triple_hash(const uint8_t *__restrict__ input, size_t si idx1 = _mm_cvtsi128_si64(ax1); idx2 = _mm_cvtsi128_si64(ax2); - for (size_t i = 0; i < ITERATIONS; i++) { + for (size_t i = 0; i < props.iterations(); i++) { uint64_t hi, lo; __m128i *ptr0, *ptr1, *ptr2; @@ -1318,29 +1382,36 @@ inline void cryptonight_triple_hash(const uint8_t *__restrict__ input, size_t si } for (size_t i = 0; i < 3; i++) { - cn_implode_scratchpad(reinterpret_cast<__m128i*>(ctx[i]->memory), reinterpret_cast<__m128i*>(ctx[i]->state)); - xmrig::keccakf(reinterpret_cast(ctx[i]->state), 24); + cn_implode_scratchpad(reinterpret_cast(ctx[i]->memory), reinterpret_cast<__m128i*>(ctx[i]->state)); + keccakf(reinterpret_cast(ctx[i]->state), 24); extra_hashes[ctx[i]->state[0] & 3](ctx[i]->state, 200, output + 32 * i); } } -template +template inline void cryptonight_quad_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t height) { - constexpr size_t MASK = xmrig::cn_select_mask(); - constexpr size_t ITERATIONS = xmrig::cn_select_iter(); - constexpr size_t MEM = xmrig::cn_select_memory(); - constexpr xmrig::Variant BASE = xmrig::cn_base_variant(); + constexpr CnAlgo props; + constexpr size_t MASK = props.mask(); + constexpr Algorithm::Id BASE = props.base(); - if (BASE == xmrig::VARIANT_1 && size < 43) { +# ifdef XMRIG_ALGO_CN_HEAVY + constexpr bool IS_CN_HEAVY_TUBE = ALGO == Algorithm::CN_HEAVY_TUBE; + constexpr bool IS_CN_HEAVY_XHV = ALGO == Algorithm::CN_HEAVY_XHV; +# else + constexpr bool IS_CN_HEAVY_TUBE = false; + constexpr bool IS_CN_HEAVY_XHV = false; +# endif + + if (BASE == Algorithm::CN_1 && size < 43) { memset(output, 0, 32 * 4); return; } for (size_t i = 0; i < 4; i++) { - xmrig::keccak(input + size * i, size, ctx[i]->state); - cn_explode_scratchpad(reinterpret_cast<__m128i*>(ctx[i]->state), reinterpret_cast<__m128i*>(ctx[i]->memory)); + keccak(input + size * i, size, ctx[i]->state); + cn_explode_scratchpad(reinterpret_cast(ctx[i]->state), reinterpret_cast<__m128i*>(ctx[i]->memory)); } uint8_t* l0 = ctx[0]->memory; @@ -1364,8 +1435,7 @@ inline void cryptonight_quad_hash(const uint8_t *__restrict__ input, size_t size idx2 = _mm_cvtsi128_si64(ax2); idx3 = _mm_cvtsi128_si64(ax3); - for (size_t i = 0; i < ITERATIONS; i++) - { + for (size_t i = 0; i < props.iterations(); i++) { uint64_t hi, lo; __m128i *ptr0, *ptr1, *ptr2, *ptr3; @@ -1391,29 +1461,36 @@ inline void cryptonight_quad_hash(const uint8_t *__restrict__ input, size_t size } for (size_t i = 0; i < 4; i++) { - cn_implode_scratchpad(reinterpret_cast<__m128i*>(ctx[i]->memory), reinterpret_cast<__m128i*>(ctx[i]->state)); - xmrig::keccakf(reinterpret_cast(ctx[i]->state), 24); + cn_implode_scratchpad(reinterpret_cast(ctx[i]->memory), reinterpret_cast<__m128i*>(ctx[i]->state)); + keccakf(reinterpret_cast(ctx[i]->state), 24); extra_hashes[ctx[i]->state[0] & 3](ctx[i]->state, 200, output + 32 * i); } } -template +template inline void cryptonight_penta_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t height) { - constexpr size_t MASK = xmrig::cn_select_mask(); - constexpr size_t ITERATIONS = xmrig::cn_select_iter(); - constexpr size_t MEM = xmrig::cn_select_memory(); - constexpr xmrig::Variant BASE = xmrig::cn_base_variant(); + constexpr CnAlgo props; + constexpr size_t MASK = props.mask(); + constexpr Algorithm::Id BASE = props.base(); - if (BASE == xmrig::VARIANT_1 && size < 43) { +# ifdef XMRIG_ALGO_CN_HEAVY + constexpr bool IS_CN_HEAVY_TUBE = ALGO == Algorithm::CN_HEAVY_TUBE; + constexpr bool IS_CN_HEAVY_XHV = ALGO == Algorithm::CN_HEAVY_XHV; +# else + constexpr bool IS_CN_HEAVY_TUBE = false; + constexpr bool IS_CN_HEAVY_XHV = false; +# endif + + if (BASE == Algorithm::CN_1 && size < 43) { memset(output, 0, 32 * 5); return; } for (size_t i = 0; i < 5; i++) { - xmrig::keccak(input + size * i, size, ctx[i]->state); - cn_explode_scratchpad(reinterpret_cast<__m128i*>(ctx[i]->state), reinterpret_cast<__m128i*>(ctx[i]->memory)); + keccak(input + size * i, size, ctx[i]->state); + cn_explode_scratchpad(reinterpret_cast(ctx[i]->state), reinterpret_cast<__m128i*>(ctx[i]->memory)); } uint8_t* l0 = ctx[0]->memory; @@ -1441,8 +1518,7 @@ inline void cryptonight_penta_hash(const uint8_t *__restrict__ input, size_t siz idx3 = _mm_cvtsi128_si64(ax3); idx4 = _mm_cvtsi128_si64(ax4); - for (size_t i = 0; i < ITERATIONS; i++) - { + for (size_t i = 0; i < props.iterations(); i++) { uint64_t hi, lo; __m128i *ptr0, *ptr1, *ptr2, *ptr3, *ptr4; @@ -1472,10 +1548,14 @@ inline void cryptonight_penta_hash(const uint8_t *__restrict__ input, size_t siz } for (size_t i = 0; i < 5; i++) { - cn_implode_scratchpad(reinterpret_cast<__m128i*>(ctx[i]->memory), reinterpret_cast<__m128i*>(ctx[i]->state)); - xmrig::keccakf(reinterpret_cast(ctx[i]->state), 24); + cn_implode_scratchpad(reinterpret_cast(ctx[i]->memory), reinterpret_cast<__m128i*>(ctx[i]->state)); + keccakf(reinterpret_cast(ctx[i]->state), 24); extra_hashes[ctx[i]->state[0] & 3](ctx[i]->state, 200, output + 32 * i); } } + +} /* namespace xmrig */ + + #endif /* XMRIG_CRYPTONIGHT_X86_H */ diff --git a/src/crypto/cn/gpu/cn_gpu_avx.cpp b/src/crypto/cn/gpu/cn_gpu_avx.cpp index 382be570..38da9714 100644 --- a/src/crypto/cn/gpu/cn_gpu_avx.cpp +++ b/src/crypto/cn/gpu/cn_gpu_avx.cpp @@ -22,7 +22,9 @@ * along with this program. If not, see . */ -#include "crypto/cn/CryptoNight_constants.h" + +#include "crypto/cn/CnAlgo.h" + #ifdef __GNUC__ # include @@ -206,4 +208,4 @@ void cn_gpu_inner_avx(const uint8_t* spad, uint8_t* lpad) } } -template void cn_gpu_inner_avx(const uint8_t* spad, uint8_t* lpad); +template void cn_gpu_inner_avx().iterations(), xmrig::CnAlgo().mask()>(const uint8_t* spad, uint8_t* lpad); diff --git a/src/crypto/cn/gpu/cn_gpu_ssse3.cpp b/src/crypto/cn/gpu/cn_gpu_ssse3.cpp index 42a11a1d..7cca096e 100644 --- a/src/crypto/cn/gpu/cn_gpu_ssse3.cpp +++ b/src/crypto/cn/gpu/cn_gpu_ssse3.cpp @@ -22,7 +22,9 @@ * along with this program. If not, see . */ -#include "crypto/cn/CryptoNight_constants.h" + +#include "crypto/cn/CnAlgo.h" + #ifdef __GNUC__ # include @@ -207,4 +209,4 @@ void cn_gpu_inner_ssse3(const uint8_t* spad, uint8_t* lpad) } } -template void cn_gpu_inner_ssse3(const uint8_t* spad, uint8_t* lpad); +template void cn_gpu_inner_ssse3().iterations(), xmrig::CnAlgo().mask()>(const uint8_t* spad, uint8_t* lpad); diff --git a/src/crypto/cn/r/variant4_random_math.h b/src/crypto/cn/r/variant4_random_math.h index c384df7a..48f0f6ce 100644 --- a/src/crypto/cn/r/variant4_random_math.h +++ b/src/crypto/cn/r/variant4_random_math.h @@ -1,6 +1,13 @@ #ifndef VARIANT4_RANDOM_MATH_H #define VARIANT4_RANDOM_MATH_H + +#include + + +#include "crypto/common/Algorithm.h" + + extern "C" { #include "crypto/cn/c_blake256.h" @@ -182,7 +189,7 @@ static FORCEINLINE void check_data(size_t* data_index, const size_t bytes_needed // Generates as many random math operations as possible with given latency and ALU restrictions // "code" array must have space for NUM_INSTRUCTIONS_MAX+1 instructions -template +template static int v4_random_math_init(struct V4_Instruction* code, const uint64_t height) { // MUL is 3 cycles, 3-way addition and rotations are 2 cycles, SUB/XOR are 1 cycle @@ -204,8 +211,7 @@ static int v4_random_math_init(struct V4_Instruction* code, const uint64_t heigh memset(data, 0, sizeof(data)); uint64_t tmp = SWAP64LE(height); memcpy(data, &tmp, sizeof(uint64_t)); - if (VARIANT == xmrig::VARIANT_4) - { + if (ALGO == xmrig::Algorithm::CN_R) { data[20] = -38; } @@ -249,7 +255,7 @@ static int v4_random_math_init(struct V4_Instruction* code, const uint64_t heigh code_size = 0; int total_iterations = 0; - r8_used = (VARIANT == xmrig::VARIANT_WOW); + r8_used = (ALGO == xmrig::Algorithm::CN_WOW); // Generate random code to achieve minimal required latency for our abstract CPU // Try to get this latency for all 4 registers @@ -291,10 +297,9 @@ static int v4_random_math_init(struct V4_Instruction* code, const uint64_t heigh int b = src_index; // Don't do ADD/SUB/XOR with the same register - if (((opcode == ADD) || (opcode == SUB) || (opcode == XOR)) && (a == b)) - { + if (((opcode == ADD) || (opcode == SUB) || (opcode == XOR)) && (a == b)) { // a is always < 4, so we don't need to check bounds here - b = (VARIANT == xmrig::VARIANT_WOW) ? (a + 4) : 8; + b = (ALGO == xmrig::Algorithm::CN_WOW) ? (a + 4) : 8; src_index = b; } diff --git a/src/crypto/common/Algorithm.cpp b/src/crypto/common/Algorithm.cpp index f85b0a6f..78272f79 100644 --- a/src/crypto/common/Algorithm.cpp +++ b/src/crypto/common/Algorithm.cpp @@ -56,12 +56,12 @@ struct AlgoName static AlgoName const algorithm_names[] = { - { "cryptonight/0", "cn/0", Algorithm::CN_0 }, - { "cryptonight", "cn", Algorithm::CN_0 }, - { "cryptonight/1", "cn/1", Algorithm::CN_1 }, - { "cryptonight-monerov7", nullptr, Algorithm::CN_1 }, - { "cryptonight_v7", nullptr, Algorithm::CN_1 }, - { "cryptonight/2", "cn/2", Algorithm::CN_2 }, + { "cryptonight/0", "cn/0", Algorithm::CN_0 }, + { "cryptonight", "cn", Algorithm::CN_0 }, + { "cryptonight/1", "cn/1", Algorithm::CN_1 }, + { "cryptonight-monerov7", nullptr, Algorithm::CN_1 }, + { "cryptonight_v7", nullptr, Algorithm::CN_1 }, + { "cryptonight/2", "cn/2", Algorithm::CN_2 }, { "cryptonight-monerov8", nullptr, Algorithm::CN_2 }, { "cryptonight_v8", nullptr, Algorithm::CN_2 }, { "cryptonight/r", "cn/r", Algorithm::CN_R }, @@ -75,7 +75,7 @@ static AlgoName const algorithm_names[] = { { "cryptonight/rto", "cn/rto", Algorithm::CN_RTO }, { "cryptonight/rwz", "cn/rwz", Algorithm::CN_RWZ }, { "cryptonight/zls", "cn/zls", Algorithm::CN_ZLS }, - { "cryptonight/double", "cn/double", Algorithm::CN_ZLS }, + { "cryptonight/double", "cn/double", Algorithm::CN_DOUBLE }, # ifdef XMRIG_ALGO_CN_GPU { "cryptonight/gpu", "cn/gpu", Algorithm::CN_GPU }, { "cryptonight_gpu", nullptr, Algorithm::CN_GPU }, @@ -99,11 +99,11 @@ static AlgoName const algorithm_names[] = { { "cryptonight-bittube2", nullptr, Algorithm::CN_HEAVY_TUBE }, # endif # ifdef XMRIG_ALGO_CN_PICO - { "cryptonight-pico", "cn-pico", Algorithm::CN_PICO }, - { "cryptonight-pico/trtl", "cn-pico/trtl", Algorithm::CN_PICO }, - { "cryptonight-turtle", "cn-trtl", Algorithm::CN_PICO }, - { "cryptonight-ultralite", "cn-ultralite", Algorithm::CN_PICO }, - { "cryptonight_turtle", "cn_turtle", Algorithm::CN_PICO }, + { "cryptonight-pico", "cn-pico", Algorithm::CN_PICO_0 }, + { "cryptonight-pico/trtl", "cn-pico/trtl", Algorithm::CN_PICO_0 }, + { "cryptonight-turtle", "cn-trtl", Algorithm::CN_PICO_0 }, + { "cryptonight-ultralite", "cn-ultralite", Algorithm::CN_PICO_0 }, + { "cryptonight_turtle", "cn_turtle", Algorithm::CN_PICO_0 }, # endif }; @@ -111,15 +111,48 @@ static AlgoName const algorithm_names[] = { } /* namespace xmrig */ -const char *xmrig::Algorithm::name(bool shortName) const +xmrig::Algorithm::Family xmrig::Algorithm::family(Id id) { - for (size_t i = 0; i < ARRAY_SIZE(algorithm_names); i++) { - if (algorithm_names[i].id == m_id) { - return shortName ? algorithm_names[i].shortName : algorithm_names[i].name; - } + switch (id) { + case CN_0: + case CN_1: + case CN_2: + case CN_R: + case CN_WOW: + case CN_FAST: + case CN_HALF: + case CN_XAO: + case CN_RTO: + case CN_RWZ: + case CN_DOUBLE: +# ifdef XMRIG_ALGO_CN_GPU + case CN_GPU: +# endif + return CN; + +# ifdef XMRIG_ALGO_CN_LITE + case CN_LITE_0: + case CN_LITE_1: + return CN_LITE; +# endif + +# ifdef XMRIG_ALGO_CN_HEAVY + case CN_HEAVY_0: + case CN_HEAVY_TUBE: + case CN_HEAVY_XHV: + return CN_HEAVY; +# endif + +# ifdef XMRIG_ALGO_CN_PICO + case Algorithm::CN_PICO_0: + return CN_PICO; +# endif + + default: + break; } - return "invalid"; + return UNKNOWN; } @@ -137,3 +170,15 @@ xmrig::Algorithm::Id xmrig::Algorithm::parse(const char *name) return INVALID; } + + +const char *xmrig::Algorithm::name(bool shortName) const +{ + for (size_t i = 0; i < ARRAY_SIZE(algorithm_names); i++) { + if (algorithm_names[i].id == m_id) { + return shortName ? algorithm_names[i].shortName : algorithm_names[i].name; + } + } + + return "invalid"; +} diff --git a/src/crypto/common/Algorithm.h b/src/crypto/common/Algorithm.h index c70e0caa..c9388dee 100644 --- a/src/crypto/common/Algorithm.h +++ b/src/crypto/common/Algorithm.h @@ -63,24 +63,35 @@ public: CN_HEAVY_XHV, // "cn-heavy/xhv" Modified CryptoNight-Heavy (Haven Protocol only) # endif # ifdef XMRIG_ALGO_CN_PICO - CN_PICO, // "cn-pico" CryptoNight Turtle (TRTL) + CN_PICO_0, // "cn-pico" CryptoNight Turtle (TRTL) # endif MAX }; + enum Family : int { + UNKNOWN, + CN, + CN_LITE, + CN_HEAVY, + CN_PICO + }; + inline Algorithm() {} inline Algorithm(const char *algo) : m_id(parse(algo)) {} inline Algorithm(Id id) : m_id(id) {} inline bool isEqual(const Algorithm &other) const { return m_id == other.m_id; } + inline bool isValid() const { return m_id != INVALID; } inline const char *name() const { return name(false); } inline const char *shortName() const { return name(true); } + inline Family family() const { return family(m_id); } inline Id id() const { return m_id; } - inline bool isValid() const { return m_id != INVALID; } inline bool operator!=(const Algorithm &other) const { return !isEqual(other); } inline bool operator==(const Algorithm &other) const { return isEqual(other); } + inline operator Algorithm::Id() const { return m_id; } + static Family family(Id id); static Id parse(const char *name); private: diff --git a/src/interfaces/IThread.h b/src/interfaces/IThread.h index e74b5bca..3c0a7287 100644 --- a/src/interfaces/IThread.h +++ b/src/interfaces/IThread.h @@ -27,7 +27,7 @@ #include -#include "common/xmrig.h" +#include "crypto/common/Algorithm.h" #include "rapidjson/fwd.h" @@ -53,7 +53,7 @@ public: virtual ~IThread() = default; - virtual Algo algorithm() const = 0; + virtual Algorithm algorithm() const = 0; virtual int priority() const = 0; virtual int64_t affinity() const = 0; virtual Multiway multiway() const = 0; diff --git a/src/workers/CpuThread.cpp b/src/workers/CpuThread.cpp index 5c98a5b3..e26b8a0a 100644 --- a/src/workers/CpuThread.cpp +++ b/src/workers/CpuThread.cpp @@ -28,20 +28,20 @@ #include "base/io/log/Log.h" #include "common/cpu/Cpu.h" #include "crypto/cn/Asm.h" +#include "crypto/cn/CnHash.h" #include "crypto/common/VirtualMemory.h" #include "Mem.h" #include "rapidjson/document.h" #include "workers/CpuThread.h" -#if defined(XMRIG_ARM) -# include "crypto/cn/CryptoNight_arm.h" -#else -# include "crypto/cn/CryptoNight_x86.h" -#endif -xmrig::CpuThread::CpuThread(size_t index, Algo algorithm, AlgoVariant av, Multiway multiway, int64_t affinity, int priority, bool softAES, bool prefetch, Assembly assembly) : + +static const xmrig::CnHash cnHash; + + +xmrig::CpuThread::CpuThread(size_t index, Algorithm algorithm, AlgoVariant av, Multiway multiway, int64_t affinity, int priority, bool softAES, bool prefetch, Assembly assembly) : m_algorithm(algorithm), m_av(av), m_assembly(assembly), @@ -55,119 +55,12 @@ xmrig::CpuThread::CpuThread(size_t index, Algo algorithm, AlgoVariant av, Multiw } -#ifndef XMRIG_NO_ASM -template -static void patchCode(T dst, U src, const uint32_t iterations, const uint32_t mask) +xmrig::cn_hash_fun xmrig::CpuThread::fn(const Algorithm &algorithm) const { - const uint8_t* p = reinterpret_cast(src); - - // Workaround for Visual Studio placing trampoline in debug builds. -# if defined(_MSC_VER) - if (p[0] == 0xE9) { - p += *(int32_t*)(p + 1) + 5; - } -# endif - - size_t size = 0; - while (*(uint32_t*)(p + size) != 0xDEADC0DE) { - ++size; - } - size += sizeof(uint32_t); - - memcpy((void*) dst, (const void*) src, size); - - uint8_t* patched_data = reinterpret_cast(dst); - for (size_t i = 0; i + sizeof(uint32_t) <= size; ++i) { - switch (*(uint32_t*)(patched_data + i)) { - case xmrig::CRYPTONIGHT_ITER: - *(uint32_t*)(patched_data + i) = iterations; - break; - - case xmrig::CRYPTONIGHT_MASK: - *(uint32_t*)(patched_data + i) = mask; - break; - } - } + return cnHash.fn(algorithm, m_av, m_assembly); } -extern "C" void cnv2_mainloop_ivybridge_asm(cryptonight_ctx **ctx); -extern "C" void cnv2_mainloop_ryzen_asm(cryptonight_ctx **ctx); -extern "C" void cnv2_mainloop_bulldozer_asm(cryptonight_ctx **ctx); -extern "C" void cnv2_double_mainloop_sandybridge_asm(cryptonight_ctx **ctx); - - -xmrig::CpuThread::cn_mainloop_fun cn_half_mainloop_ivybridge_asm = nullptr; -xmrig::CpuThread::cn_mainloop_fun cn_half_mainloop_ryzen_asm = nullptr; -xmrig::CpuThread::cn_mainloop_fun cn_half_mainloop_bulldozer_asm = nullptr; -xmrig::CpuThread::cn_mainloop_fun cn_half_double_mainloop_sandybridge_asm = nullptr; - -xmrig::CpuThread::cn_mainloop_fun cn_trtl_mainloop_ivybridge_asm = nullptr; -xmrig::CpuThread::cn_mainloop_fun cn_trtl_mainloop_ryzen_asm = nullptr; -xmrig::CpuThread::cn_mainloop_fun cn_trtl_mainloop_bulldozer_asm = nullptr; -xmrig::CpuThread::cn_mainloop_fun cn_trtl_double_mainloop_sandybridge_asm = nullptr; - -xmrig::CpuThread::cn_mainloop_fun cn_zls_mainloop_ivybridge_asm = nullptr; -xmrig::CpuThread::cn_mainloop_fun cn_zls_mainloop_ryzen_asm = nullptr; -xmrig::CpuThread::cn_mainloop_fun cn_zls_mainloop_bulldozer_asm = nullptr; -xmrig::CpuThread::cn_mainloop_fun cn_zls_double_mainloop_sandybridge_asm = nullptr; - -xmrig::CpuThread::cn_mainloop_fun cn_double_mainloop_ivybridge_asm = nullptr; -xmrig::CpuThread::cn_mainloop_fun cn_double_mainloop_ryzen_asm = nullptr; -xmrig::CpuThread::cn_mainloop_fun cn_double_mainloop_bulldozer_asm = nullptr; -xmrig::CpuThread::cn_mainloop_fun cn_double_double_mainloop_sandybridge_asm = nullptr; - - -void xmrig::CpuThread::patchAsmVariants() -{ - const int allocation_size = 65536; - uint8_t *base = static_cast(VirtualMemory::allocateExecutableMemory(allocation_size)); - - cn_half_mainloop_ivybridge_asm = reinterpret_cast (base + 0x0000); - cn_half_mainloop_ryzen_asm = reinterpret_cast (base + 0x1000); - cn_half_mainloop_bulldozer_asm = reinterpret_cast (base + 0x2000); - cn_half_double_mainloop_sandybridge_asm = reinterpret_cast (base + 0x3000); - - cn_trtl_mainloop_ivybridge_asm = reinterpret_cast (base + 0x4000); - cn_trtl_mainloop_ryzen_asm = reinterpret_cast (base + 0x5000); - cn_trtl_mainloop_bulldozer_asm = reinterpret_cast (base + 0x6000); - cn_trtl_double_mainloop_sandybridge_asm = reinterpret_cast (base + 0x7000); - - cn_zls_mainloop_ivybridge_asm = reinterpret_cast (base + 0x8000); - cn_zls_mainloop_ryzen_asm = reinterpret_cast (base + 0x9000); - cn_zls_mainloop_bulldozer_asm = reinterpret_cast (base + 0xA000); - cn_zls_double_mainloop_sandybridge_asm = reinterpret_cast (base + 0xB000); - - cn_double_mainloop_ivybridge_asm = reinterpret_cast (base + 0xC000); - cn_double_mainloop_ryzen_asm = reinterpret_cast (base + 0xD000); - cn_double_mainloop_bulldozer_asm = reinterpret_cast (base + 0xE000); - cn_double_double_mainloop_sandybridge_asm = reinterpret_cast (base + 0xF000); - - patchCode(cn_half_mainloop_ivybridge_asm, cnv2_mainloop_ivybridge_asm, xmrig::CRYPTONIGHT_HALF_ITER, xmrig::CRYPTONIGHT_MASK); - patchCode(cn_half_mainloop_ryzen_asm, cnv2_mainloop_ryzen_asm, xmrig::CRYPTONIGHT_HALF_ITER, xmrig::CRYPTONIGHT_MASK); - patchCode(cn_half_mainloop_bulldozer_asm, cnv2_mainloop_bulldozer_asm, xmrig::CRYPTONIGHT_HALF_ITER, xmrig::CRYPTONIGHT_MASK); - patchCode(cn_half_double_mainloop_sandybridge_asm, cnv2_double_mainloop_sandybridge_asm, xmrig::CRYPTONIGHT_HALF_ITER, xmrig::CRYPTONIGHT_MASK); - - patchCode(cn_trtl_mainloop_ivybridge_asm, cnv2_mainloop_ivybridge_asm, xmrig::CRYPTONIGHT_TRTL_ITER, xmrig::CRYPTONIGHT_PICO_MASK); - patchCode(cn_trtl_mainloop_ryzen_asm, cnv2_mainloop_ryzen_asm, xmrig::CRYPTONIGHT_TRTL_ITER, xmrig::CRYPTONIGHT_PICO_MASK); - patchCode(cn_trtl_mainloop_bulldozer_asm, cnv2_mainloop_bulldozer_asm, xmrig::CRYPTONIGHT_TRTL_ITER, xmrig::CRYPTONIGHT_PICO_MASK); - patchCode(cn_trtl_double_mainloop_sandybridge_asm, cnv2_double_mainloop_sandybridge_asm, xmrig::CRYPTONIGHT_TRTL_ITER, xmrig::CRYPTONIGHT_PICO_MASK); - - patchCode(cn_zls_mainloop_ivybridge_asm, cnv2_mainloop_ivybridge_asm, xmrig::CRYPTONIGHT_ZLS_ITER, xmrig::CRYPTONIGHT_MASK); - patchCode(cn_zls_mainloop_ryzen_asm, cnv2_mainloop_ryzen_asm, xmrig::CRYPTONIGHT_ZLS_ITER, xmrig::CRYPTONIGHT_MASK); - patchCode(cn_zls_mainloop_bulldozer_asm, cnv2_mainloop_bulldozer_asm, xmrig::CRYPTONIGHT_ZLS_ITER, xmrig::CRYPTONIGHT_MASK); - patchCode(cn_zls_double_mainloop_sandybridge_asm, cnv2_double_mainloop_sandybridge_asm, xmrig::CRYPTONIGHT_ZLS_ITER, xmrig::CRYPTONIGHT_MASK); - - patchCode(cn_double_mainloop_ivybridge_asm, cnv2_mainloop_ivybridge_asm, xmrig::CRYPTONIGHT_DOUBLE_ITER, xmrig::CRYPTONIGHT_MASK); - patchCode(cn_double_mainloop_ryzen_asm, cnv2_mainloop_ryzen_asm, xmrig::CRYPTONIGHT_DOUBLE_ITER, xmrig::CRYPTONIGHT_MASK); - patchCode(cn_double_mainloop_bulldozer_asm, cnv2_mainloop_bulldozer_asm, xmrig::CRYPTONIGHT_DOUBLE_ITER, xmrig::CRYPTONIGHT_MASK); - patchCode(cn_double_double_mainloop_sandybridge_asm, cnv2_double_mainloop_sandybridge_asm, xmrig::CRYPTONIGHT_DOUBLE_ITER, xmrig::CRYPTONIGHT_MASK); - - VirtualMemory::protectExecutableMemory(base, allocation_size); - VirtualMemory::flushInstructionCache(base, allocation_size); -} -#endif - bool xmrig::CpuThread::isSoftAES(AlgoVariant av) { @@ -175,418 +68,7 @@ bool xmrig::CpuThread::isSoftAES(AlgoVariant av) } -#ifndef XMRIG_NO_ASM -template -static inline void add_asm_func(xmrig::CpuThread::cn_hash_fun(&asm_func_map)[xmrig::ALGO_MAX][xmrig::AV_MAX][xmrig::VARIANT_MAX][xmrig::ASM_MAX]) -{ - asm_func_map[algo][xmrig::AV_SINGLE][variant][xmrig::ASM_INTEL] = cryptonight_single_hash_asm; - asm_func_map[algo][xmrig::AV_SINGLE][variant][xmrig::ASM_RYZEN] = cryptonight_single_hash_asm; - asm_func_map[algo][xmrig::AV_SINGLE][variant][xmrig::ASM_BULLDOZER] = cryptonight_single_hash_asm; - - asm_func_map[algo][xmrig::AV_DOUBLE][variant][xmrig::ASM_INTEL] = cryptonight_double_hash_asm; - asm_func_map[algo][xmrig::AV_DOUBLE][variant][xmrig::ASM_RYZEN] = cryptonight_double_hash_asm; - asm_func_map[algo][xmrig::AV_DOUBLE][variant][xmrig::ASM_BULLDOZER] = cryptonight_double_hash_asm; -} -#endif - -xmrig::CpuThread::cn_hash_fun xmrig::CpuThread::fn(Algo algorithm, AlgoVariant av, Variant variant, Assembly assembly) -{ - assert(variant >= VARIANT_0 && variant < VARIANT_MAX); - -# ifndef XMRIG_NO_ASM - if (assembly == ASM_AUTO) { - assembly = Cpu::info()->assembly(); - } - - static cn_hash_fun asm_func_map[ALGO_MAX][AV_MAX][VARIANT_MAX][ASM_MAX] = {}; - static bool asm_func_map_initialized = false; - - if (!asm_func_map_initialized) { - add_asm_func(asm_func_map); - add_asm_func(asm_func_map); - add_asm_func(asm_func_map); - add_asm_func(asm_func_map); - -# ifdef XMRIG_ALGO_CN_PICO - add_asm_func(asm_func_map); -# endif - - add_asm_func(asm_func_map); - add_asm_func(asm_func_map); - add_asm_func(asm_func_map); - - asm_func_map_initialized = true; - } - - cn_hash_fun fun = asm_func_map[algorithm][av][variant][assembly]; - if (fun) { - return fun; - } -# endif - - constexpr const size_t count = VARIANT_MAX * 10 * ALGO_MAX; - - static const cn_hash_fun func_table[] = { - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_TUBE - - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_XHV - - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_TRTL - -# ifdef XMRIG_ALGO_CN_GPU - cryptonight_single_hash_gpu, - nullptr, - cryptonight_single_hash_gpu, - nullptr, - nullptr, - nullptr, - nullptr, - nullptr, - nullptr, - nullptr, -# else - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_GPU -# endif - - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - -# ifdef XMRIG_ALGO_CN_LITE - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_TUBE - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_XTL - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_MSR - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_XHV - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_XAO - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_RTO - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_2 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_HALF - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_TRTL - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_GPU - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_WOW - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_4 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_RWZ - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_ZLS - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_DOUBLE -# else - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_0 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_1 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_TUBE - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_XTL - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_MSR - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_XHV - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_XAO - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_RTO - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_2 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_HALF - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_TRTL - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_GPU - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_WOW - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_4 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_RWZ - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_ZLS - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_DOUBLE -# endif - -# ifdef XMRIG_ALGO_CN_HEAVY - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_1 - - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_XTL - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_MSR - - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_XAO - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_RTO - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_2 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_HALF - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_TRTL - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_GPU - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_WOW - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_4 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_RWZ - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_ZLS - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_DOUBLE -# else - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_0 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_1 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_TUBE - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_XTL - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_MSR - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_XHV - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_XAO - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_RTO - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_2 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_HALF - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_TRTL - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_GPU - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_WOW - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_4 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_RWZ - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_ZLS - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_DOUBLE -# endif - -# ifdef XMRIG_ALGO_CN_PICO - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_0 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_1 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_TUBE - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_XTL - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_MSR - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_XHV - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_XAO - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_RTO - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_2 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_HALF - - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_single_hash, - cryptonight_double_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - cryptonight_triple_hash, - cryptonight_quad_hash, - cryptonight_penta_hash, - - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_GPU - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_WOW - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_4 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_RWZ - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_ZLS - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_DOUBLE -# else - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_0 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_1 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_TUBE - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_XTL - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_MSR - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_XHV - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_XAO - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_RTO - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_2 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_HALF - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_TRTL - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_GPU - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_WOW - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_4 - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_RWZ - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_ZLS - nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // VARIANT_DOUBLE -# endif - }; - - static_assert(count == sizeof(func_table) / sizeof(func_table[0]), "func_table size mismatch"); - - const size_t index = VARIANT_MAX * 10 * algorithm + 10 * variant + av - 1; - -# ifndef NDEBUG - cn_hash_fun func = func_table[index]; - - assert(index < sizeof(func_table) / sizeof(func_table[0])); - assert(func != nullptr); - - return func; -# else - return func_table[index]; -# endif -} - - -xmrig::CpuThread *xmrig::CpuThread::createFromAV(size_t index, Algo algorithm, AlgoVariant av, int64_t affinity, int priority, Assembly assembly) +xmrig::CpuThread *xmrig::CpuThread::createFromAV(size_t index, const Algorithm &algorithm, AlgoVariant av, int64_t affinity, int priority, Assembly assembly) { assert(av > AV_AUTO && av < AV_MAX); @@ -613,7 +95,7 @@ xmrig::CpuThread *xmrig::CpuThread::createFromAV(size_t index, Algo algorithm, A } -xmrig::CpuThread *xmrig::CpuThread::createFromData(size_t index, Algo algorithm, const CpuThread::Data &data, int priority, bool softAES) +xmrig::CpuThread *xmrig::CpuThread::createFromData(size_t index, const Algorithm &algorithm, const CpuThread::Data &data, int priority, bool softAES) { int av = AV_AUTO; const Multiway multiway = data.multiway; @@ -653,7 +135,7 @@ xmrig::CpuThread::Data xmrig::CpuThread::parse(const rapidjson::Value &object) data.affinity = affinity.GetInt64(); } -# ifndef XMRIG_NO_ASM +# ifdef XMRIG_FEATURE_ASM data.assembly = Asm::parse(object["asm"]); # endif @@ -698,7 +180,7 @@ void xmrig::CpuThread::print() const LOG_DEBUG(GREEN_BOLD("CPU thread: ") " index " WHITE_BOLD("%zu") ", multiway " WHITE_BOLD("%d") ", av " WHITE_BOLD("%d") ",", index(), static_cast(multiway()), static_cast(m_av)); -# ifndef XMRIG_NO_ASM +# ifdef XMRIG_FEATURE_ASM LOG_DEBUG(" assembly: %s, affine_to_cpu: %" PRId64, Asm::toString(m_assembly), affinity()); # else LOG_DEBUG(" affine_to_cpu: %" PRId64, affinity()); @@ -737,7 +219,7 @@ rapidjson::Value xmrig::CpuThread::toConfig(rapidjson::Document &doc) const obj.AddMember("low_power_mode", multiway(), allocator); obj.AddMember("affine_to_cpu", affinity() == -1L ? Value(kFalseType) : Value(affinity()), allocator); -# ifndef XMRIG_NO_ASM +# ifdef XMRIG_FEATURE_ASM obj.AddMember("asm", Asm::toJSON(m_assembly), allocator); # endif diff --git a/src/workers/CpuThread.h b/src/workers/CpuThread.h index 2af421be..08aa89cb 100644 --- a/src/workers/CpuThread.h +++ b/src/workers/CpuThread.h @@ -27,6 +27,7 @@ #include "common/xmrig.h" +#include "crypto/cn/CnHash.h" #include "interfaces/IThread.h" @@ -58,27 +59,20 @@ public: }; - CpuThread(size_t index, Algo algorithm, AlgoVariant av, Multiway multiway, int64_t affinity, int priority, bool softAES, bool prefetch, Assembly assembly); + CpuThread(size_t index, Algorithm algorithm, AlgoVariant av, Multiway multiway, int64_t affinity, int priority, bool softAES, bool prefetch, Assembly assembly); - typedef void (*cn_hash_fun)(const uint8_t *input, size_t size, uint8_t *output, cryptonight_ctx **ctx, uint64_t height); - typedef void (*cn_mainloop_fun)(cryptonight_ctx **ctx); - -# ifndef XMRIG_NO_ASM - static void patchAsmVariants(); -# endif + cn_hash_fun fn(const Algorithm &algorithm) const; static bool isSoftAES(AlgoVariant av); - static cn_hash_fun fn(Algo algorithm, AlgoVariant av, Variant variant, Assembly assembly); - static CpuThread *createFromAV(size_t index, Algo algorithm, AlgoVariant av, int64_t affinity, int priority, Assembly assembly); - static CpuThread *createFromData(size_t index, Algo algorithm, const CpuThread::Data &data, int priority, bool softAES); + static CpuThread *createFromAV(size_t index, const Algorithm &algorithm, AlgoVariant av, int64_t affinity, int priority, Assembly assembly); + static CpuThread *createFromData(size_t index, const Algorithm &algorithm, const CpuThread::Data &data, int priority, bool softAES); static Data parse(const rapidjson::Value &object); static Multiway multiway(AlgoVariant av); inline bool isPrefetch() const { return m_prefetch; } inline bool isSoftAES() const { return m_softAES; } - inline cn_hash_fun fn(Variant variant) const { return fn(m_algorithm, m_av, variant, m_assembly); } - inline Algo algorithm() const override { return m_algorithm; } + inline Algorithm algorithm() const override { return m_algorithm; } inline int priority() const override { return m_priority; } inline int64_t affinity() const override { return m_affinity; } inline Multiway multiway() const override { return m_multiway; } @@ -97,7 +91,7 @@ protected: rapidjson::Value toConfig(rapidjson::Document &doc) const override; private: - const Algo m_algorithm; + const Algorithm m_algorithm; const AlgoVariant m_av; const Assembly m_assembly; const bool m_prefetch; diff --git a/src/workers/MultiWorker.cpp b/src/workers/MultiWorker.cpp index 30c43000..f209ca76 100644 --- a/src/workers/MultiWorker.cpp +++ b/src/workers/MultiWorker.cpp @@ -34,7 +34,7 @@ template -MultiWorker::MultiWorker(ThreadHandle *handle) +xmrig::MultiWorker::MultiWorker(ThreadHandle *handle) : Worker(handle) { m_memory = Mem::create(m_ctx, m_thread->algorithm(), N); @@ -42,61 +42,58 @@ MultiWorker::MultiWorker(ThreadHandle *handle) template -MultiWorker::~MultiWorker() +xmrig::MultiWorker::~MultiWorker() { Mem::release(m_ctx, N, m_memory); } template -bool MultiWorker::selfTest() +bool xmrig::MultiWorker::selfTest() { - using namespace xmrig; - - if (m_thread->algorithm() == CRYPTONIGHT) { - const bool rc = verify(VARIANT_0, test_output_v0) && - verify(VARIANT_1, test_output_v1) && - verify(VARIANT_2, test_output_v2) && - verify(VARIANT_XTL, test_output_xtl) && - verify(VARIANT_MSR, test_output_msr) && - verify(VARIANT_XAO, test_output_xao) && - verify(VARIANT_RTO, test_output_rto) && - verify(VARIANT_HALF, test_output_half) && - verify2(VARIANT_WOW, test_output_wow) && - verify2(VARIANT_4, test_output_r) && - verify(VARIANT_RWZ, test_output_rwz) && - verify(VARIANT_ZLS, test_output_zls) && - verify(VARIANT_DOUBLE, test_output_double); + if (m_thread->algorithm().family() == Algorithm::CN) { + const bool rc = verify(Algorithm::CN_0, test_output_v0) && + verify(Algorithm::CN_1, test_output_v1) && + verify(Algorithm::CN_2, test_output_v2) && + verify(Algorithm::CN_FAST, test_output_msr) && + verify(Algorithm::CN_XAO, test_output_xao) && + verify(Algorithm::CN_RTO, test_output_rto) && + verify(Algorithm::CN_HALF, test_output_half) && + verify2(Algorithm::CN_WOW, test_output_wow) && + verify2(Algorithm::CN_R, test_output_r) && + verify(Algorithm::CN_RWZ, test_output_rwz) && + verify(Algorithm::CN_ZLS, test_output_zls) && + verify(Algorithm::CN_DOUBLE, test_output_double); # ifdef XMRIG_ALGO_CN_GPU if (!rc || N > 1) { return rc; } - return verify(VARIANT_GPU, test_output_gpu); + return verify(Algorithm::CN_GPU, test_output_gpu); # else return rc; # endif } # ifdef XMRIG_ALGO_CN_LITE - if (m_thread->algorithm() == CRYPTONIGHT_LITE) { - return verify(VARIANT_0, test_output_v0_lite) && - verify(VARIANT_1, test_output_v1_lite); + if (m_thread->algorithm().family() == Algorithm::CN_LITE) { + return verify(Algorithm::CN_LITE_0, test_output_v0_lite) && + verify(Algorithm::CN_LITE_1, test_output_v1_lite); } # endif # ifdef XMRIG_ALGO_CN_HEAVY - if (m_thread->algorithm() == CRYPTONIGHT_HEAVY) { - return verify(VARIANT_0, test_output_v0_heavy) && - verify(VARIANT_XHV, test_output_xhv_heavy) && - verify(VARIANT_TUBE, test_output_tube_heavy); + if (m_thread->algorithm().family() == Algorithm::CN_HEAVY) { + return verify(Algorithm::CN_HEAVY_0, test_output_v0_heavy) && + verify(Algorithm::CN_HEAVY_XHV, test_output_xhv_heavy) && + verify(Algorithm::CN_HEAVY_TUBE, test_output_tube_heavy); } # endif # ifdef XMRIG_ALGO_CN_PICO - if (m_thread->algorithm() == CRYPTONIGHT_PICO) { - return verify(VARIANT_TRTL, test_output_pico_trtl); + if (m_thread->algorithm().family() == Algorithm::CN_PICO) { + return verify(Algorithm::CN_PICO_0, test_output_pico_trtl); } # endif @@ -105,7 +102,7 @@ bool MultiWorker::selfTest() template -void MultiWorker::start() +void xmrig::MultiWorker::start() { while (Workers::sequence() > 0) { if (Workers::isPaused()) { @@ -126,12 +123,11 @@ void MultiWorker::start() storeStats(); } - // FIXME -// m_thread->fn(m_state.job.algorithm().variant())(m_state.blob, m_state.job.size(), m_hash, m_ctx, m_state.job.height()); + m_thread->fn(m_state.job.algorithm())(m_state.blob, m_state.job.size(), m_hash, m_ctx, m_state.job.height()); for (size_t i = 0; i < N; ++i) { if (*reinterpret_cast(m_hash + (i * 32) + 24) < m_state.job.target()) { - Workers::submit(xmrig::JobResult(m_state.job.poolId(), m_state.job.id(), m_state.job.clientId(), *nonce(i), m_hash + (i * 32), m_state.job.diff(), m_state.job.algorithm())); + Workers::submit(JobResult(m_state.job.poolId(), m_state.job.id(), m_state.job.clientId(), *nonce(i), m_hash + (i * 32), m_state.job.diff(), m_state.job.algorithm())); } *nonce(i) += 1; @@ -148,7 +144,7 @@ void MultiWorker::start() template -bool MultiWorker::resume(const xmrig::Job &job) +bool xmrig::MultiWorker::resume(const xmrig::Job &job) { if (m_state.job.poolId() == -1 && job.poolId() >= 0 && job.id() == m_pausedState.job.id()) { m_state = m_pausedState; @@ -160,10 +156,9 @@ bool MultiWorker::resume(const xmrig::Job &job) template -bool MultiWorker::verify(xmrig::Variant variant, const uint8_t *referenceValue) +bool xmrig::MultiWorker::verify(const Algorithm &algorithm, const uint8_t *referenceValue) { - - xmrig::CpuThread::cn_hash_fun func = m_thread->fn(variant); + cn_hash_fun func = m_thread->fn(algorithm); if (!func) { return false; } @@ -174,9 +169,9 @@ bool MultiWorker::verify(xmrig::Variant variant, const uint8_t *referenceValu template -bool MultiWorker::verify2(xmrig::Variant variant, const uint8_t *referenceValue) +bool xmrig::MultiWorker::verify2(const Algorithm &algorithm, const uint8_t *referenceValue) { - xmrig::CpuThread::cn_hash_fun func = m_thread->fn(variant); + cn_hash_fun func = m_thread->fn(algorithm); if (!func) { return false; } @@ -201,9 +196,9 @@ bool MultiWorker::verify2(xmrig::Variant variant, const uint8_t *referenceVal template<> -bool MultiWorker<1>::verify2(xmrig::Variant variant, const uint8_t *referenceValue) +bool xmrig::MultiWorker<1>::verify2(const Algorithm &algorithm, const uint8_t *referenceValue) { - xmrig::CpuThread::cn_hash_fun func = m_thread->fn(variant); + cn_hash_fun func = m_thread->fn(algorithm); if (!func) { return false; } @@ -221,9 +216,9 @@ bool MultiWorker<1>::verify2(xmrig::Variant variant, const uint8_t *referenceVal template -void MultiWorker::consumeJob() +void xmrig::MultiWorker::consumeJob() { - xmrig::Job job = Workers::job(); + Job job = Workers::job(); m_sequence = Workers::sequence(); if (m_state.job == job) { return; @@ -258,7 +253,7 @@ void MultiWorker::consumeJob() template -void MultiWorker::save(const xmrig::Job &job) +void xmrig::MultiWorker::save(const Job &job) { if (job.poolId() == -1 && m_state.job.poolId() >= 0) { m_pausedState = m_state; @@ -266,8 +261,13 @@ void MultiWorker::save(const xmrig::Job &job) } +namespace xmrig { + template class MultiWorker<1>; template class MultiWorker<2>; template class MultiWorker<3>; template class MultiWorker<4>; template class MultiWorker<5>; + +} + diff --git a/src/workers/MultiWorker.h b/src/workers/MultiWorker.h index 99d37e44..82898e1e 100644 --- a/src/workers/MultiWorker.h +++ b/src/workers/MultiWorker.h @@ -33,7 +33,7 @@ #include "workers/Worker.h" -class Handle; +namespace xmrig { template @@ -48,11 +48,11 @@ protected: void start() override; private: - bool resume(const xmrig::Job &job); - bool verify(xmrig::Variant variant, const uint8_t *referenceValue); - bool verify2(xmrig::Variant variant, const uint8_t *referenceValue); + bool resume(const Job &job); + bool verify(const Algorithm &algorithm, const uint8_t *referenceValue); + bool verify2(const Algorithm &algorithm, const uint8_t *referenceValue); void consumeJob(); - void save(const xmrig::Job &job); + void save(const Job &job); inline uint32_t *nonce(size_t index) { @@ -61,8 +61,8 @@ private: struct State { - alignas(16) uint8_t blob[xmrig::Job::kMaxBlobSize * N]; - xmrig::Job job; + alignas(16) uint8_t blob[Job::kMaxBlobSize * N]; + Job job; }; @@ -73,4 +73,7 @@ private: }; +} // namespace xmrig + + #endif /* XMRIG_MULTIWORKER_H */ diff --git a/src/workers/Workers.cpp b/src/workers/Workers.cpp index 62cbd1cf..1955677b 100644 --- a/src/workers/Workers.cpp +++ b/src/workers/Workers.cpp @@ -32,7 +32,6 @@ #include "base/tools/Handle.h" #include "core/config/Config.h" #include "core/Controller.h" -#include "crypto/cn/CryptoNight_constants.h" #include "interfaces/IJobResultListener.h" #include "interfaces/IThread.h" #include "Mem.h" @@ -169,14 +168,10 @@ void Workers::start(xmrig::Controller *controller) LOG_NOTICE("--------------------------------------------------------------------------"); # endif -# ifndef XMRIG_NO_ASM - xmrig::CpuThread::patchAsmVariants(); -# endif - m_controller = controller; const std::vector &threads = controller->config()->threads(); -// m_status.algo = controller->config()->algorithm().algo(); // FIXME + m_status.algo = xmrig::Algorithm::CN_0; // FIXME algo m_status.threads = threads.size(); for (const xmrig::IThread *thread : threads) { @@ -240,7 +235,7 @@ void Workers::threadsSummary(rapidjson::Document &doc) { uv_mutex_lock(&m_mutex); const uint64_t pages[2] = { m_status.hugePages, m_status.pages }; - const uint64_t memory = m_status.ways * xmrig::cn_select_memory(m_status.algo); + const uint64_t memory = m_status.ways * xmrig::CnAlgo<>::memory(m_status.algo); uv_mutex_unlock(&m_mutex); auto &allocator = doc.GetAllocator(); @@ -263,23 +258,23 @@ void Workers::onReady(void *arg) switch (handle->config()->multiway()) { case 1: - worker = new MultiWorker<1>(handle); + worker = new xmrig::MultiWorker<1>(handle); break; case 2: - worker = new MultiWorker<2>(handle); + worker = new xmrig::MultiWorker<2>(handle); break; case 3: - worker = new MultiWorker<3>(handle); + worker = new xmrig::MultiWorker<3>(handle); break; case 4: - worker = new MultiWorker<4>(handle); + worker = new xmrig::MultiWorker<4>(handle); break; case 5: - worker = new MultiWorker<5>(handle); + worker = new xmrig::MultiWorker<5>(handle); break; default: @@ -344,7 +339,7 @@ void Workers::start(IWorker *worker) if (m_status.started == m_status.threads) { const double percent = (double) m_status.hugePages / m_status.pages * 100.0; - const size_t memory = m_status.ways * xmrig::cn_select_memory(m_status.algo) / 1024; + const size_t memory = m_status.ways * xmrig::CnAlgo<>::memory(m_status.algo) / 1024; LOG_INFO(GREEN_BOLD("READY (CPU)") " threads " CYAN_BOLD("%zu(%zu)") " huge pages %s%zu/%zu %1.0f%%\x1B[0m memory " CYAN_BOLD("%zu KB") "", m_status.threads, m_status.ways, diff --git a/src/workers/Workers.h b/src/workers/Workers.h index 5b084fc2..24480517 100644 --- a/src/workers/Workers.h +++ b/src/workers/Workers.h @@ -86,8 +86,7 @@ private: pages(0), started(0), threads(0), - ways(0), - algo(xmrig::CRYPTONIGHT) + ways(0) {} size_t hugePages; @@ -95,7 +94,7 @@ private: size_t started; size_t threads; size_t ways; - xmrig::Algo algo; + xmrig::Algorithm algo; }; static bool m_active; From 088587fa7280abd8636f4e476aaba44dafa6ff53 Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 14 Jun 2019 05:21:17 +0700 Subject: [PATCH 003/172] Fixed build on Linux. --- src/workers/MultiWorker.cpp | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/workers/MultiWorker.cpp b/src/workers/MultiWorker.cpp index f209ca76..ffa34dda 100644 --- a/src/workers/MultiWorker.cpp +++ b/src/workers/MultiWorker.cpp @@ -195,8 +195,10 @@ bool xmrig::MultiWorker::verify2(const Algorithm &algorithm, const uint8_t *r } +namespace xmrig { + template<> -bool xmrig::MultiWorker<1>::verify2(const Algorithm &algorithm, const uint8_t *referenceValue) +bool MultiWorker<1>::verify2(const Algorithm &algorithm, const uint8_t *referenceValue) { cn_hash_fun func = m_thread->fn(algorithm); if (!func) { @@ -214,6 +216,8 @@ bool xmrig::MultiWorker<1>::verify2(const Algorithm &algorithm, const uint8_t *r return true; } +} // namespace xmrig + template void xmrig::MultiWorker::consumeJob() @@ -269,5 +273,5 @@ template class MultiWorker<3>; template class MultiWorker<4>; template class MultiWorker<5>; -} +} // namespace xmrig From b73c204e73bb35a6883b1810c364d3b8d9377ef8 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 15 Jun 2019 00:28:16 +0700 Subject: [PATCH 004/172] Fixed ARM mining code. --- src/crypto/cn/CryptoNight_arm.h | 315 ++++++++++++++++------------- src/crypto/cn/CryptoNight_monero.h | 2 +- src/crypto/cn/gpu/cn_gpu_arm.cpp | 4 +- 3 files changed, 182 insertions(+), 139 deletions(-) diff --git a/src/crypto/cn/CryptoNight_arm.h b/src/crypto/cn/CryptoNight_arm.h index d9be454b..6d56b548 100644 --- a/src/crypto/cn/CryptoNight_arm.h +++ b/src/crypto/cn/CryptoNight_arm.h @@ -29,11 +29,11 @@ #include "common/crypto/keccak.h" -#include "crypto/common/portable/mm_malloc.h" -#include "crypto/cn/CryptoNight_constants.h" +#include "crypto/cn/CnAlgo.h" #include "crypto/cn/CryptoNight_monero.h" #include "crypto/cn/CryptoNight.h" #include "crypto/cn/soft_aes.h" +#include "crypto/common/portable/mm_malloc.h" extern "C" @@ -226,9 +226,14 @@ inline void mix_and_propagate(__m128i& x0, __m128i& x1, __m128i& x2, __m128i& x3 } -template +namespace xmrig { + + +template static inline void cn_explode_scratchpad(const __m128i *input, __m128i *output) { + constexpr CnAlgo props; + __m128i xin0, xin1, xin2, xin3, xin4, xin5, xin6, xin7; __m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9; @@ -243,7 +248,7 @@ static inline void cn_explode_scratchpad(const __m128i *input, __m128i *output) xin6 = _mm_load_si128(input + 10); xin7 = _mm_load_si128(input + 11); - if (ALGO == xmrig::CRYPTONIGHT_HEAVY) { + if (props.isHeavy()) { for (size_t i = 0; i < 16; i++) { aes_round(k0, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); aes_round(k1, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); @@ -260,7 +265,7 @@ static inline void cn_explode_scratchpad(const __m128i *input, __m128i *output) } } - for (size_t i = 0; i < MEM / sizeof(__m128i); i += 8) { + for (size_t i = 0; i < props.memory() / sizeof(__m128i); i += 8) { aes_round(k0, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); aes_round(k1, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); aes_round(k2, &xin0, &xin1, &xin2, &xin3, &xin4, &xin5, &xin6, &xin7); @@ -284,37 +289,17 @@ static inline void cn_explode_scratchpad(const __m128i *input, __m128i *output) } -#ifdef XMRIG_ALGO_CN_GPU -template -void cn_explode_scratchpad_gpu(const uint8_t *input, uint8_t *output) -{ - constexpr size_t hash_size = 200; // 25x8 bytes - alignas(16) uint64_t hash[25]; - - for (uint64_t i = 0; i < MEM / 512; i++) - { - memcpy(hash, input, hash_size); - hash[0] ^= i; - - xmrig::keccakf(hash, 24); - memcpy(output, hash, 160); - output += 160; - - xmrig::keccakf(hash, 24); - memcpy(output, hash, 176); - output += 176; - - xmrig::keccakf(hash, 24); - memcpy(output, hash, 176); - output += 176; - } -} -#endif - - -template +template static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output) { + constexpr CnAlgo props; + +# ifdef XMRIG_ALGO_CN_GPU + constexpr bool IS_HEAVY = props.isHeavy() || ALGO == Algorithm::CN_GPU; +# else + constexpr bool IS_HEAVY = props.isHeavy(); +# endif + __m128i xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7; __m128i k0, k1, k2, k3, k4, k5, k6, k7, k8, k9; @@ -329,8 +314,7 @@ static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output) xout6 = _mm_load_si128(output + 10); xout7 = _mm_load_si128(output + 11); - for (size_t i = 0; i < MEM / sizeof(__m128i); i += 8) - { + for (size_t i = 0; i < props.memory() / sizeof(__m128i); i += 8) { xout0 = _mm_xor_si128(_mm_load_si128(input + i + 0), xout0); xout1 = _mm_xor_si128(_mm_load_si128(input + i + 1), xout1); xout2 = _mm_xor_si128(_mm_load_si128(input + i + 2), xout2); @@ -351,13 +335,13 @@ static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output) aes_round(k8, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); aes_round(k9, &xout0, &xout1, &xout2, &xout3, &xout4, &xout5, &xout6, &xout7); - if (ALGO == xmrig::CRYPTONIGHT_HEAVY) { + if (IS_HEAVY) { mix_and_propagate(xout0, xout1, xout2, xout3, xout4, xout5, xout6, xout7); } } - if (ALGO == xmrig::CRYPTONIGHT_HEAVY) { - for (size_t i = 0; i < MEM / sizeof(__m128i); i += 8) { + if (IS_HEAVY) { + for (size_t i = 0; i < props.memory() / sizeof(__m128i); i += 8) { xout0 = _mm_xor_si128(_mm_load_si128(input + i + 0), xout0); xout1 = _mm_xor_si128(_mm_load_si128(input + i + 1), xout1); xout2 = _mm_xor_si128(_mm_load_si128(input + i + 2), xout2); @@ -408,6 +392,9 @@ static inline void cn_implode_scratchpad(const __m128i *input, __m128i *output) } +} /* namespace xmrig */ + + static inline __m128i aes_round_tweak_div(const __m128i &in, const __m128i &key) { alignas(16) uint32_t k[4]; @@ -430,13 +417,18 @@ static inline __m128i aes_round_tweak_div(const __m128i &in, const __m128i &key) } -template +namespace xmrig { + + +template static inline void cryptonight_monero_tweak(const uint8_t* l, uint64_t idx, __m128i ax0, __m128i bx0, __m128i bx1, __m128i& cx) { + constexpr CnAlgo props; + uint64_t* mem_out = (uint64_t*)&l[idx]; - if (BASE == xmrig::VARIANT_2) { - VARIANT2_SHUFFLE(l, idx, ax0, bx0, bx1, cx, (VARIANT == xmrig::VARIANT_RWZ ? 1 : 0)); + if (props.base() == Algorithm::CN_2) { + VARIANT2_SHUFFLE(l, idx, ax0, bx0, bx1, cx, (ALGO == Algorithm::CN_RWZ ? 1 : 0)); _mm_store_si128((__m128i *)mem_out, _mm_xor_si128(bx0, cx)); } else { __m128i tmp = _mm_xor_si128(bx0, cx); @@ -446,7 +438,7 @@ static inline void cryptonight_monero_tweak(const uint8_t* l, uint64_t idx, __m1 uint8_t x = vh >> 24; static const uint16_t table = 0x7531; - const uint8_t index = (((x >> (VARIANT == xmrig::VARIANT_XTL ? 4 : 3)) & 6) | (x & 1)) << 1; + const uint8_t index = (((x >> (3)) & 6) | (x & 1)) << 1; vh ^= ((table >> index) & 0x3) << 28; mem_out[1] = vh; @@ -454,24 +446,28 @@ static inline void cryptonight_monero_tweak(const uint8_t* l, uint64_t idx, __m1 } -template +template inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t height) { - constexpr size_t MASK = xmrig::cn_select_mask(); - constexpr size_t ITERATIONS = xmrig::cn_select_iter(); - constexpr size_t MEM = xmrig::cn_select_memory(); - constexpr xmrig::Variant BASE = xmrig::cn_base_variant(); + constexpr CnAlgo props; + constexpr size_t MASK = props.mask(); + constexpr Algorithm::Id BASE = props.base(); - if (BASE == xmrig::VARIANT_1 && size < 43) { +# ifdef XMRIG_ALGO_CN_HEAVY + constexpr bool IS_CN_HEAVY_TUBE = ALGO == Algorithm::CN_HEAVY_TUBE; +# else + constexpr bool IS_CN_HEAVY_TUBE = false; +# endif + + if (BASE == Algorithm::CN_1 && size < 43) { memset(output, 0, 32); return; } - xmrig::keccak(input, size, ctx[0]->state); + keccak(input, size, ctx[0]->state); + cn_explode_scratchpad(reinterpret_cast(ctx[0]->state), reinterpret_cast<__m128i *>(ctx[0]->memory)); - cn_explode_scratchpad((__m128i*) ctx[0]->state, (__m128i*) ctx[0]->memory); - - const uint8_t* l0 = ctx[0]->memory; + uint8_t* l0 = ctx[0]->memory; uint64_t* h0 = reinterpret_cast(ctx[0]->state); VARIANT1_INIT(0); @@ -480,19 +476,19 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si uint64_t al0 = h0[0] ^ h0[4]; uint64_t ah0 = h0[1] ^ h0[5]; - __m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]); - __m128i bx1 = _mm_set_epi64x(h0[9] ^ h0[11], h0[8] ^ h0[10]); + __m128i bx0 = _mm_set_epi64x(h0[3] ^ h0[7], h0[2] ^ h0[6]); + __m128i bx1 = _mm_set_epi64x(h0[9] ^ h0[11], h0[8] ^ h0[10]); uint64_t idx0 = al0; - for (size_t i = 0; i < ITERATIONS; i++) { + for (size_t i = 0; i < props.iterations(); i++) { __m128i cx; - if (VARIANT == xmrig::VARIANT_TUBE || !SOFT_AES) { - cx = _mm_load_si128((__m128i *) &l0[idx0 & MASK]); + if (IS_CN_HEAVY_TUBE || !SOFT_AES) { + cx = _mm_load_si128(reinterpret_cast(&l0[idx0 & MASK])); } const __m128i ax0 = _mm_set_epi64x(ah0, al0); - if (VARIANT == xmrig::VARIANT_TUBE) { + if (IS_CN_HEAVY_TUBE) { cx = aes_round_tweak_div(cx, ax0); } else if (SOFT_AES) { @@ -502,8 +498,8 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si cx = _mm_aesenc_si128(cx, ax0); } - if (BASE == xmrig::VARIANT_1 || BASE == xmrig::VARIANT_2) { - cryptonight_monero_tweak(l0, idx0 & MASK, ax0, bx0, bx1, cx); + if (BASE == Algorithm::CN_1 || BASE == Algorithm::CN_2) { + cryptonight_monero_tweak(l0, idx0 & MASK, ax0, bx0, bx1, cx); } else { _mm_store_si128((__m128i *)&l0[idx0 & MASK], _mm_xor_si128(bx0, cx)); } @@ -514,10 +510,10 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si cl = ((uint64_t*) &l0[idx0 & MASK])[0]; ch = ((uint64_t*) &l0[idx0 & MASK])[1]; - if (BASE == xmrig::VARIANT_2) { - if ((VARIANT == xmrig::VARIANT_WOW) || (VARIANT == xmrig::VARIANT_4)) { + if (BASE == Algorithm::CN_2) { + if (props.isR()) { VARIANT4_RANDOM_MATH(0, al0, ah0, cl, bx0, bx1); - if (VARIANT == xmrig::VARIANT_4) { + if (ALGO == Algorithm::CN_R) { al0 ^= r0[2] | ((uint64_t)(r0[3]) << 32); ah0 ^= r0[0] | ((uint64_t)(r0[1]) << 32); } @@ -528,11 +524,11 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si lo = __umul128(idx0, cl, &hi); - if (BASE == xmrig::VARIANT_2) { - if (VARIANT == xmrig::VARIANT_4) { + if (BASE == Algorithm::CN_2) { + if (ALGO == Algorithm::CN_R) { VARIANT2_SHUFFLE(l0, idx0 & MASK, ax0, bx0, bx1, cx, 0); } else { - VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx0, bx1, hi, lo, (VARIANT == xmrig::VARIANT_RWZ ? 1 : 0)); + VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx0, bx1, hi, lo, (ALGO == Algorithm::CN_RWZ ? 1 : 0)); } } @@ -541,9 +537,9 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si ((uint64_t*)&l0[idx0 & MASK])[0] = al0; - if (BASE == xmrig::VARIANT_1 && (VARIANT == xmrig::VARIANT_TUBE || VARIANT == xmrig::VARIANT_RTO)) { + if (IS_CN_HEAVY_TUBE || ALGO == Algorithm::CN_RTO) { ((uint64_t*)&l0[idx0 & MASK])[1] = ah0 ^ tweak1_2_0 ^ al0; - } else if (BASE == xmrig::VARIANT_1) { + } else if (BASE == Algorithm::CN_1) { ((uint64_t*)&l0[idx0 & MASK])[1] = ah0 ^ tweak1_2_0; } else { ((uint64_t*)&l0[idx0 & MASK])[1] = ah0; @@ -553,7 +549,8 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si ah0 ^= ch; idx0 = al0; - if (ALGO == xmrig::CRYPTONIGHT_HEAVY) { +# ifdef XMRIG_ALGO_CN_HEAVY + if (props.isHeavy()) { const int64x2_t x = vld1q_s64(reinterpret_cast(&l0[idx0 & MASK])); const int64_t n = vgetq_lane_s64(x, 0); const int32_t d = vgetq_lane_s32(x, 2); @@ -561,77 +558,113 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si ((int64_t*)&l0[idx0 & MASK])[0] = n ^ q; - if (VARIANT == xmrig::VARIANT_XHV) { + if (ALGO == Algorithm::CN_HEAVY_XHV) { idx0 = (~d) ^ q; } else { idx0 = d ^ q; } } +# endif - if (BASE == xmrig::VARIANT_2) { + if (BASE == Algorithm::CN_2) { bx1 = bx0; } bx0 = cx; } - cn_implode_scratchpad((__m128i*) ctx[0]->memory, (__m128i*) ctx[0]->state); - - xmrig::keccakf(h0, 24); + cn_implode_scratchpad(reinterpret_cast(ctx[0]->memory), reinterpret_cast<__m128i *>(ctx[0]->state)); + keccakf(h0, 24); extra_hashes[ctx[0]->state[0] & 3](ctx[0]->state, 200, output); } +} /* namespace xmrig */ + + #ifdef XMRIG_ALGO_CN_GPU template void cn_gpu_inner_arm(const uint8_t *spad, uint8_t *lpad); -template +namespace xmrig { + + +template +void cn_explode_scratchpad_gpu(const uint8_t *input, uint8_t *output) +{ + constexpr size_t hash_size = 200; // 25x8 bytes + alignas(16) uint64_t hash[25]; + + for (uint64_t i = 0; i < MEM / 512; i++) { + memcpy(hash, input, hash_size); + hash[0] ^= i; + + xmrig::keccakf(hash, 24); + memcpy(output, hash, 160); + output += 160; + + xmrig::keccakf(hash, 24); + memcpy(output, hash, 176); + output += 176; + + xmrig::keccakf(hash, 24); + memcpy(output, hash, 176); + output += 176; + } +} + + +template inline void cryptonight_single_hash_gpu(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t height) { - constexpr size_t MASK = xmrig::CRYPTONIGHT_GPU_MASK; - constexpr size_t ITERATIONS = xmrig::cn_select_iter(); - constexpr size_t MEM = xmrig::cn_select_memory(); + constexpr CnAlgo props; - static_assert(MASK > 0 && ITERATIONS > 0 && MEM > 0, "unsupported algorithm/variant"); - - xmrig::keccak(input, size, ctx[0]->state); - cn_explode_scratchpad_gpu(ctx[0]->state, ctx[0]->memory); + keccak(input, size, ctx[0]->state); + cn_explode_scratchpad_gpu(ctx[0]->state, ctx[0]->memory); fesetround(FE_TONEAREST); - cn_gpu_inner_arm(ctx[0]->state, ctx[0]->memory); + cn_gpu_inner_arm(ctx[0]->state, ctx[0]->memory); - cn_implode_scratchpad((__m128i*) ctx[0]->memory, (__m128i*) ctx[0]->state); - - xmrig::keccakf((uint64_t*) ctx[0]->state, 24); + cn_implode_scratchpad(reinterpret_cast(ctx[0]->memory), reinterpret_cast<__m128i *>(ctx[0]->state)); + keccakf(reinterpret_cast(ctx[0]->state), 24); memcpy(output, ctx[0]->state, 32); } + +} /* namespace xmrig */ #endif -template +namespace xmrig { + + +template inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, struct cryptonight_ctx **__restrict__ ctx, uint64_t height) { - constexpr size_t MASK = xmrig::cn_select_mask(); - constexpr size_t ITERATIONS = xmrig::cn_select_iter(); - constexpr size_t MEM = xmrig::cn_select_memory(); - constexpr xmrig::Variant BASE = xmrig::cn_base_variant(); + constexpr CnAlgo props; + constexpr size_t MASK = props.mask(); + constexpr Algorithm::Id BASE = props.base(); - if (BASE == xmrig::VARIANT_1 && size < 43) { +# ifdef XMRIG_ALGO_CN_HEAVY + constexpr bool IS_CN_HEAVY_TUBE = ALGO == Algorithm::CN_HEAVY_TUBE; +# else + constexpr bool IS_CN_HEAVY_TUBE = false; +# endif + + if (BASE == Algorithm::CN_1 && size < 43) { memset(output, 0, 64); return; } - xmrig::keccak(input, size, ctx[0]->state); - xmrig::keccak(input + size, size, ctx[1]->state); + keccak(input, size, ctx[0]->state); + keccak(input + size, size, ctx[1]->state); - const uint8_t* l0 = ctx[0]->memory; - const uint8_t* l1 = ctx[1]->memory; - uint64_t* h0 = reinterpret_cast(ctx[0]->state); - uint64_t* h1 = reinterpret_cast(ctx[1]->state); + uint8_t *l0 = ctx[0]->memory; + uint8_t *l1 = ctx[1]->memory; + uint64_t *h0 = reinterpret_cast(ctx[0]->state); + uint64_t *h1 = reinterpret_cast(ctx[1]->state); VARIANT1_INIT(0); VARIANT1_INIT(1); @@ -640,8 +673,8 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si VARIANT4_RANDOM_MATH_INIT(0); VARIANT4_RANDOM_MATH_INIT(1); - cn_explode_scratchpad((__m128i*) h0, (__m128i*) l0); - cn_explode_scratchpad((__m128i*) h1, (__m128i*) l1); + cn_explode_scratchpad(reinterpret_cast(h0), reinterpret_cast<__m128i *>(l0)); + cn_explode_scratchpad(reinterpret_cast(h1), reinterpret_cast<__m128i *>(l1)); uint64_t al0 = h0[0] ^ h0[4]; uint64_t al1 = h1[0] ^ h1[4]; @@ -656,16 +689,16 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si uint64_t idx0 = al0; uint64_t idx1 = al1; - for (size_t i = 0; i < ITERATIONS; i++) { + for (size_t i = 0; i < props.iterations(); i++) { __m128i cx0, cx1; - if (VARIANT == xmrig::VARIANT_TUBE || !SOFT_AES) { + if (IS_CN_HEAVY_TUBE || !SOFT_AES) { cx0 = _mm_load_si128((__m128i *) &l0[idx0 & MASK]); cx1 = _mm_load_si128((__m128i *) &l1[idx1 & MASK]); } const __m128i ax0 = _mm_set_epi64x(ah0, al0); const __m128i ax1 = _mm_set_epi64x(ah1, al1); - if (VARIANT == xmrig::VARIANT_TUBE) { + if (IS_CN_HEAVY_TUBE) { cx0 = aes_round_tweak_div(cx0, ax0); cx1 = aes_round_tweak_div(cx1, ax1); } @@ -678,9 +711,9 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si cx1 = _mm_aesenc_si128(cx1, ax1); } - if (BASE == xmrig::VARIANT_1 || (BASE == xmrig::VARIANT_2)) { - cryptonight_monero_tweak(l0, idx0 & MASK, ax0, bx00, bx01, cx0); - cryptonight_monero_tweak(l1, idx1 & MASK, ax1, bx10, bx11, cx1); + if (BASE == Algorithm::CN_1 || BASE == Algorithm::CN_2) { + cryptonight_monero_tweak(l0, idx0 & MASK, ax0, bx00, bx01, cx0); + cryptonight_monero_tweak(l1, idx1 & MASK, ax1, bx10, bx11, cx1); } else { _mm_store_si128((__m128i *) &l0[idx0 & MASK], _mm_xor_si128(bx00, cx0)); _mm_store_si128((__m128i *) &l1[idx1 & MASK], _mm_xor_si128(bx10, cx1)); @@ -693,10 +726,10 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si cl = ((uint64_t*) &l0[idx0 & MASK])[0]; ch = ((uint64_t*) &l0[idx0 & MASK])[1]; - if (BASE == xmrig::VARIANT_2) { - if ((VARIANT == xmrig::VARIANT_WOW) || (VARIANT == xmrig::VARIANT_4)) { + if (BASE == Algorithm::CN_2) { + if (props.isR()) { VARIANT4_RANDOM_MATH(0, al0, ah0, cl, bx00, bx01); - if (VARIANT == xmrig::VARIANT_4) { + if (ALGO == Algorithm::CN_R) { al0 ^= r0[2] | ((uint64_t)(r0[3]) << 32); ah0 ^= r0[0] | ((uint64_t)(r0[1]) << 32); } @@ -707,11 +740,11 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si lo = __umul128(idx0, cl, &hi); - if (BASE == xmrig::VARIANT_2) { - if (VARIANT == xmrig::VARIANT_4) { + if (BASE == Algorithm::CN_2) { + if (ALGO == Algorithm::CN_R) { VARIANT2_SHUFFLE(l0, idx0 & MASK, ax0, bx00, bx01, cx0, 0); } else { - VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx00, bx01, hi, lo, (VARIANT == xmrig::VARIANT_RWZ ? 1 : 0)); + VARIANT2_SHUFFLE2(l0, idx0 & MASK, ax0, bx00, bx01, hi, lo, (ALGO == Algorithm::CN_RWZ ? 1 : 0)); } } @@ -720,9 +753,9 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si ((uint64_t*)&l0[idx0 & MASK])[0] = al0; - if (BASE == xmrig::VARIANT_1 && (VARIANT == xmrig::VARIANT_TUBE || VARIANT == xmrig::VARIANT_RTO)) { + if (IS_CN_HEAVY_TUBE || ALGO == Algorithm::CN_RTO) { ((uint64_t*)&l0[idx0 & MASK])[1] = ah0 ^ tweak1_2_0 ^ al0; - } else if (BASE == xmrig::VARIANT_1) { + } else if (BASE == Algorithm::CN_1) { ((uint64_t*)&l0[idx0 & MASK])[1] = ah0 ^ tweak1_2_0; } else { ((uint64_t*)&l0[idx0 & MASK])[1] = ah0; @@ -732,7 +765,8 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si ah0 ^= ch; idx0 = al0; - if (ALGO == xmrig::CRYPTONIGHT_HEAVY) { +# ifdef XMRIG_ALGO_CN_HEAVY + if (props.isHeavy()) { const int64x2_t x = vld1q_s64(reinterpret_cast(&l0[idx0 & MASK])); const int64_t n = vgetq_lane_s64(x, 0); const int32_t d = vgetq_lane_s32(x, 2); @@ -740,21 +774,22 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si ((int64_t*)&l0[idx0 & MASK])[0] = n ^ q; - if (VARIANT == xmrig::VARIANT_XHV) { + if (ALGO == Algorithm::CN_HEAVY_XHV) { idx0 = (~d) ^ q; } else { idx0 = d ^ q; } } +# endif cl = ((uint64_t*) &l1[idx1 & MASK])[0]; ch = ((uint64_t*) &l1[idx1 & MASK])[1]; - if (BASE == xmrig::VARIANT_2) { - if ((VARIANT == xmrig::VARIANT_WOW) || (VARIANT == xmrig::VARIANT_4)) { + if (BASE == Algorithm::CN_2) { + if (props.isR()) { VARIANT4_RANDOM_MATH(1, al1, ah1, cl, bx10, bx11); - if (VARIANT == xmrig::VARIANT_4) { + if (ALGO == Algorithm::CN_R) { al1 ^= r1[2] | ((uint64_t)(r1[3]) << 32); ah1 ^= r1[0] | ((uint64_t)(r1[1]) << 32); } @@ -765,11 +800,11 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si lo = __umul128(idx1, cl, &hi); - if (BASE == xmrig::VARIANT_2) { - if (VARIANT == xmrig::VARIANT_4) { + if (BASE == Algorithm::CN_2) { + if (ALGO == Algorithm::CN_R) { VARIANT2_SHUFFLE(l1, idx1 & MASK, ax1, bx10, bx11, cx1, 0); } else { - VARIANT2_SHUFFLE2(l1, idx1 & MASK, ax1, bx10, bx11, hi, lo, (VARIANT == xmrig::VARIANT_RWZ ? 1 : 0)); + VARIANT2_SHUFFLE2(l1, idx1 & MASK, ax1, bx10, bx11, hi, lo, (ALGO == Algorithm::CN_RWZ ? 1 : 0)); } } @@ -778,9 +813,9 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si ((uint64_t*)&l1[idx1 & MASK])[0] = al1; - if (BASE == xmrig::VARIANT_1 && (VARIANT == xmrig::VARIANT_TUBE || VARIANT == xmrig::VARIANT_RTO)) { + if (IS_CN_HEAVY_TUBE || ALGO == Algorithm::CN_RTO) { ((uint64_t*)&l1[idx1 & MASK])[1] = ah1 ^ tweak1_2_1 ^ al1; - } else if (BASE == xmrig::VARIANT_1) { + } else if (BASE == Algorithm::CN_1) { ((uint64_t*)&l1[idx1 & MASK])[1] = ah1 ^ tweak1_2_1; } else { ((uint64_t*)&l1[idx1 & MASK])[1] = ah1; @@ -790,7 +825,8 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si ah1 ^= ch; idx1 = al1; - if (ALGO == xmrig::CRYPTONIGHT_HEAVY) { +# ifdef XMRIG_ALGO_CN_HEAVY + if (props.isHeavy()) { const int64x2_t x = vld1q_s64(reinterpret_cast(&l1[idx1 & MASK])); const int64_t n = vgetq_lane_s64(x, 0); const int32_t d = vgetq_lane_s32(x, 2); @@ -798,47 +834,54 @@ inline void cryptonight_double_hash(const uint8_t *__restrict__ input, size_t si ((int64_t*)&l1[idx1 & MASK])[0] = n ^ q; - if (VARIANT == xmrig::VARIANT_XHV) { + if (ALGO == Algorithm::CN_HEAVY_XHV) { idx1 = (~d) ^ q; } else { idx1 = d ^ q; } } - if (BASE == xmrig::VARIANT_2) { +# endif + + if (BASE == Algorithm::CN_2) { bx01 = bx00; bx11 = bx10; } + bx00 = cx0; bx10 = cx1; } - cn_implode_scratchpad((__m128i*) l0, (__m128i*) h0); - cn_implode_scratchpad((__m128i*) l1, (__m128i*) h1); + cn_implode_scratchpad(reinterpret_cast(l0), reinterpret_cast<__m128i *>(h0)); + cn_implode_scratchpad(reinterpret_cast(l1), reinterpret_cast<__m128i *>(h1)); - xmrig::keccakf(h0, 24); - xmrig::keccakf(h1, 24); + keccakf(h0, 24); + keccakf(h1, 24); extra_hashes[ctx[0]->state[0] & 3](ctx[0]->state, 200, output); extra_hashes[ctx[1]->state[0] & 3](ctx[1]->state, 200, output + 32); } -template +template inline void cryptonight_triple_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, struct cryptonight_ctx **__restrict__ ctx, uint64_t height) { } -template +template inline void cryptonight_quad_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, struct cryptonight_ctx **__restrict__ ctx, uint64_t height) { } -template +template inline void cryptonight_penta_hash(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, struct cryptonight_ctx **__restrict__ ctx, uint64_t height) { } -#endif /* __CRYPTONIGHT_ARM_H__ */ + +} /* namespace xmrig */ + + +#endif /* XMRIG_CRYPTONIGHT_ARM_H */ diff --git a/src/crypto/cn/CryptoNight_monero.h b/src/crypto/cn/CryptoNight_monero.h index 259cb3b6..13948dcd 100644 --- a/src/crypto/cn/CryptoNight_monero.h +++ b/src/crypto/cn/CryptoNight_monero.h @@ -141,7 +141,7 @@ vst1q_u64((uint64_t*)((base_ptr) + ((offset) ^ 0x10)), vaddq_u64(chunk3, vreinterpretq_u64_u8(_b1))); \ vst1q_u64((uint64_t*)((base_ptr) + ((offset) ^ 0x20)), vaddq_u64(chunk1, vreinterpretq_u64_u8(_b))); \ vst1q_u64((uint64_t*)((base_ptr) + ((offset) ^ 0x30)), vaddq_u64(chunk2, vreinterpretq_u64_u8(_a))); \ - if (ALGO == Algorithm::CN_4) { \ + if (ALGO == Algorithm::CN_R) { \ _c = veorq_u64(veorq_u64(_c, chunk3), veorq_u64(chunk1, chunk2)); \ } \ } while (0) diff --git a/src/crypto/cn/gpu/cn_gpu_arm.cpp b/src/crypto/cn/gpu/cn_gpu_arm.cpp index a1df0cc7..520d3fc8 100644 --- a/src/crypto/cn/gpu/cn_gpu_arm.cpp +++ b/src/crypto/cn/gpu/cn_gpu_arm.cpp @@ -26,7 +26,7 @@ #include -#include "crypto/cn/CryptoNight_constants.h" +#include "crypto/cn/CnAlgo.h" inline void vandq_f32(float32x4_t &v, uint32_t v2) @@ -237,4 +237,4 @@ void cn_gpu_inner_arm(const uint8_t *spad, uint8_t *lpad) } } -template void cn_gpu_inner_arm(const uint8_t* spad, uint8_t* lpad); +template void cn_gpu_inner_arm().iterations(), xmrig::CnAlgo().mask()>(const uint8_t* spad, uint8_t* lpad); From 69903246812919f12ad1fa6f5fdae888da74c62b Mon Sep 17 00:00:00 2001 From: XMRig Date: Sun, 16 Jun 2019 03:50:22 +0700 Subject: [PATCH 005/172] Allow null algorithm for pools. --- src/base/net/stratum/Client.cpp | 36 ++++++++++++++++----------------- src/base/net/stratum/Pool.cpp | 36 +++++++++++++++++++++------------ src/base/net/stratum/Pools.cpp | 2 +- src/crypto/common/Algorithm.cpp | 9 +++++++++ src/crypto/common/Algorithm.h | 5 +++++ 5 files changed, 56 insertions(+), 32 deletions(-) diff --git a/src/base/net/stratum/Client.cpp b/src/base/net/stratum/Client.cpp index 05e53c78..4234407c 100644 --- a/src/base/net/stratum/Client.cpp +++ b/src/base/net/stratum/Client.cpp @@ -421,28 +421,28 @@ bool xmrig::Client::send(BIO *bio) bool xmrig::Client::verifyAlgorithm(const Algorithm &algorithm) const { -# ifdef XMRIG_PROXY_PROJECT - if (m_pool.algorithm().variant() == VARIANT_AUTO || m_id == -1) { - return true; - } -# endif +//# ifdef XMRIG_PROXY_PROJECT +// if (m_pool.algorithm().variant() == VARIANT_AUTO || m_id == -1) { +// return true; +// } +//# endif - if (m_pool.algorithm() == algorithm) { // FIXME - return true; - } +// if (m_pool.algorithm() == algorithm) { // FIXME +// return true; +// } - if (isQuiet()) { - return false; - } +// if (isQuiet()) { +// return false; +// } - if (algorithm.isValid()) { - LOG_ERR("Incompatible algorithm \"%s\" detected, reconnect", algorithm.name()); - } - else { - LOG_ERR("Unknown/unsupported algorithm detected, reconnect"); - } +// if (algorithm.isValid()) { +// LOG_ERR("Incompatible algorithm \"%s\" detected, reconnect", algorithm.name()); +// } +// else { +// LOG_ERR("Unknown/unsupported algorithm detected, reconnect"); +// } - return false; + return true; } diff --git a/src/base/net/stratum/Pool.cpp b/src/base/net/stratum/Pool.cpp index bb3fab72..b11e1159 100644 --- a/src/base/net/stratum/Pool.cpp +++ b/src/base/net/stratum/Pool.cpp @@ -172,7 +172,11 @@ bool xmrig::Pool::isEnabled() const } # endif - return m_flags.test(FLAG_ENABLED) && isValid() && algorithm().isValid(); + if (isDaemon() && !algorithm().isValid()) { + return false; + } + + return m_flags.test(FLAG_ENABLED) && isValid(); } @@ -259,28 +263,34 @@ rapidjson::Value xmrig::Pool::toJSON(rapidjson::Document &doc) const Value obj(kObjectType); - obj.AddMember(StringRef(kAlgo), StringRef(m_algorithm.shortName()), allocator); + obj.AddMember(StringRef(kAlgo), m_algorithm.toJSON(), allocator); obj.AddMember(StringRef(kUrl), m_url.toJSON(), allocator); obj.AddMember(StringRef(kUser), m_user.toJSON(), allocator); - obj.AddMember(StringRef(kPass), m_password.toJSON(), allocator); - obj.AddMember(StringRef(kRigId), m_rigId.toJSON(), allocator); -# ifndef XMRIG_PROXY_PROJECT - obj.AddMember(StringRef(kNicehash), isNicehash(), allocator); -# endif + if (!isDaemon()) { + obj.AddMember(StringRef(kPass), m_password.toJSON(), allocator); + obj.AddMember(StringRef(kRigId), m_rigId.toJSON(), allocator); - if (m_keepAlive == 0 || m_keepAlive == kKeepAliveTimeout) { - obj.AddMember(StringRef(kKeepalive), m_keepAlive > 0, allocator); - } - else { - obj.AddMember(StringRef(kKeepalive), m_keepAlive, allocator); +# ifndef XMRIG_PROXY_PROJECT + obj.AddMember(StringRef(kNicehash), isNicehash(), allocator); +# endif + + if (m_keepAlive == 0 || m_keepAlive == kKeepAliveTimeout) { + obj.AddMember(StringRef(kKeepalive), m_keepAlive > 0, allocator); + } + else { + obj.AddMember(StringRef(kKeepalive), m_keepAlive, allocator); + } } obj.AddMember(StringRef(kEnabled), m_flags.test(FLAG_ENABLED), allocator); obj.AddMember(StringRef(kTls), isTLS(), allocator); obj.AddMember(StringRef(kFingerprint), m_fingerprint.toJSON(), allocator); obj.AddMember(StringRef(kDaemon), m_flags.test(FLAG_DAEMON), allocator); - obj.AddMember(StringRef(kDaemonPollInterval), m_pollInterval, allocator); + + if (isDaemon()) { + obj.AddMember(StringRef(kDaemonPollInterval), m_pollInterval, allocator); + } return obj; } diff --git a/src/base/net/stratum/Pools.cpp b/src/base/net/stratum/Pools.cpp index 985e5d4e..4641ecd4 100644 --- a/src/base/net/stratum/Pools.cpp +++ b/src/base/net/stratum/Pools.cpp @@ -139,7 +139,7 @@ void xmrig::Pools::print() const i, (pool.isEnabled() ? (pool.isTLS() ? 32 : 36) : 31), pool.url().data(), - pool.algorithm().shortName() + pool.algorithm().isValid() ? pool.algorithm().shortName() : "auto" ); i++; diff --git a/src/crypto/common/Algorithm.cpp b/src/crypto/common/Algorithm.cpp index 78272f79..b2f93896 100644 --- a/src/crypto/common/Algorithm.cpp +++ b/src/crypto/common/Algorithm.cpp @@ -31,6 +31,7 @@ #include "crypto/common/Algorithm.h" +#include "rapidjson/document.h" #ifdef _MSC_VER @@ -111,6 +112,14 @@ static AlgoName const algorithm_names[] = { } /* namespace xmrig */ +rapidjson::Value xmrig::Algorithm::toJSON() const +{ + using namespace rapidjson; + + return isValid() ? Value(StringRef(shortName())) : Value(kNullType); +} + + xmrig::Algorithm::Family xmrig::Algorithm::family(Id id) { switch (id) { diff --git a/src/crypto/common/Algorithm.h b/src/crypto/common/Algorithm.h index c9388dee..690814e7 100644 --- a/src/crypto/common/Algorithm.h +++ b/src/crypto/common/Algorithm.h @@ -30,6 +30,9 @@ #include +#include "rapidjson/fwd.h" + + namespace xmrig { @@ -91,6 +94,8 @@ public: inline bool operator==(const Algorithm &other) const { return isEqual(other); } inline operator Algorithm::Id() const { return m_id; } + rapidjson::Value toJSON() const; + static Family family(Id id); static Id parse(const char *name); From b38e432647e41accf7a8f5f10a261592d9a10217 Mon Sep 17 00:00:00 2001 From: XMRig Date: Mon, 17 Jun 2019 04:06:38 +0700 Subject: [PATCH 006/172] Moved keccak files. --- CMakeLists.txt | 4 ++-- src/api/Api.cpp | 2 +- src/crypto/cn/CryptoNight_arm.h | 2 +- src/crypto/cn/CryptoNight_x86.h | 5 ++++- src/{common/crypto => crypto/common}/keccak.cpp | 2 +- src/{common/crypto => crypto/common}/keccak.h | 0 src/net/strategies/DonateStrategy.cpp | 2 +- 7 files changed, 10 insertions(+), 7 deletions(-) rename src/{common/crypto => crypto/common}/keccak.cpp (99%) rename src/{common/crypto => crypto/common}/keccak.h (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9c70a673..2dfd52e0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -25,7 +25,6 @@ set(HEADERS src/api/interfaces/IApiListener.h src/App.h src/common/cpu/Cpu.h - src/common/crypto/keccak.h src/common/interfaces/ICpuInfo.h src/common/Platform.h src/common/xmrig.h @@ -69,6 +68,7 @@ set(HEADERS_CRYPTO src/crypto/cn/skein_port.h src/crypto/cn/soft_aes.h src/crypto/common/Algorithm.h + src/crypto/common/keccak.h src/crypto/common/portable/mm_malloc.h src/crypto/common/VirtualMemory.h ) @@ -83,7 +83,6 @@ set(SOURCES "${SOURCES_BASE}" "${SOURCES_BASE_HTTP}" src/App.cpp - src/common/crypto/keccak.cpp src/common/Platform.cpp src/core/config/Config.cpp src/core/config/ConfigTransform.cpp @@ -109,6 +108,7 @@ set(SOURCES_CRYPTO src/crypto/cn/c_skein.c src/crypto/cn/CnHash.cpp src/crypto/common/Algorithm.cpp + src/crypto/common/keccak.cpp ) if (WIN32) diff --git a/src/api/Api.cpp b/src/api/Api.cpp index a11325f3..caebcba7 100644 --- a/src/api/Api.cpp +++ b/src/api/Api.cpp @@ -39,9 +39,9 @@ #include "base/kernel/Base.h" #include "base/tools/Buffer.h" #include "base/tools/Chrono.h" -#include "common/crypto/keccak.h" #include "core/config/Config.h" #include "core/Controller.h" +#include "crypto/common/keccak.h" #include "version.h" diff --git a/src/crypto/cn/CryptoNight_arm.h b/src/crypto/cn/CryptoNight_arm.h index 6d56b548..02266634 100644 --- a/src/crypto/cn/CryptoNight_arm.h +++ b/src/crypto/cn/CryptoNight_arm.h @@ -28,11 +28,11 @@ #define XMRIG_CRYPTONIGHT_ARM_H -#include "common/crypto/keccak.h" #include "crypto/cn/CnAlgo.h" #include "crypto/cn/CryptoNight_monero.h" #include "crypto/cn/CryptoNight.h" #include "crypto/cn/soft_aes.h" +#include "crypto/common/keccak.h" #include "crypto/common/portable/mm_malloc.h" diff --git a/src/crypto/cn/CryptoNight_x86.h b/src/crypto/cn/CryptoNight_x86.h index 994ee116..fc21c7b0 100644 --- a/src/crypto/cn/CryptoNight_x86.h +++ b/src/crypto/cn/CryptoNight_x86.h @@ -36,11 +36,11 @@ #include "common/cpu/Cpu.h" -#include "common/crypto/keccak.h" #include "crypto/cn/CnAlgo.h" #include "crypto/cn/CryptoNight_monero.h" #include "crypto/cn/CryptoNight.h" #include "crypto/cn/soft_aes.h" +#include "crypto/common/keccak.h" extern "C" @@ -785,6 +785,9 @@ extern "C" void cnv2_rwz_double_mainloop_asm(cryptonight_ctx **ctx); namespace xmrig { +typedef void (*cn_mainloop_fun)(cryptonight_ctx **ctx); + + extern cn_mainloop_fun cn_half_mainloop_ivybridge_asm; extern cn_mainloop_fun cn_half_mainloop_ryzen_asm; extern cn_mainloop_fun cn_half_mainloop_bulldozer_asm; diff --git a/src/common/crypto/keccak.cpp b/src/crypto/common/keccak.cpp similarity index 99% rename from src/common/crypto/keccak.cpp rename to src/crypto/common/keccak.cpp index 0219ce36..132ae0a8 100644 --- a/src/common/crypto/keccak.cpp +++ b/src/crypto/common/keccak.cpp @@ -27,7 +27,7 @@ #include -#include "common/crypto/keccak.h" +#include "crypto/common/keccak.h" #define HASH_DATA_AREA 136 diff --git a/src/common/crypto/keccak.h b/src/crypto/common/keccak.h similarity index 100% rename from src/common/crypto/keccak.h rename to src/crypto/common/keccak.h diff --git a/src/net/strategies/DonateStrategy.cpp b/src/net/strategies/DonateStrategy.cpp index 3d913087..9669db9a 100644 --- a/src/net/strategies/DonateStrategy.cpp +++ b/src/net/strategies/DonateStrategy.cpp @@ -32,11 +32,11 @@ #include "base/net/stratum/strategies/SinglePoolStrategy.h" #include "base/tools/Buffer.h" #include "base/tools/Timer.h" -#include "common/crypto/keccak.h" #include "common/Platform.h" #include "common/xmrig.h" #include "core/config/Config.h" #include "core/Controller.h" +#include "crypto/common/keccak.h" #include "net/Network.h" #include "net/strategies/DonateStrategy.h" #include "rapidjson/document.h" From 66d62de681bde910eb7e36f3a3a58c49016f1430 Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 28 Jun 2019 13:08:08 +0700 Subject: [PATCH 007/172] Merge Assembly enum and Asm class. --- cmake/asm.cmake | 6 +- src/Summary.cpp | 6 +- src/common/cpu/BasicCpuInfo.h | 2 +- src/common/interfaces/ICpuInfo.h | 6 +- src/common/xmrig.h | 10 --- src/core/config/Config.cpp | 7 +- src/core/cpu/AdvancedCpuInfo.cpp | 5 +- src/core/cpu/AdvancedCpuInfo.h | 2 +- src/crypto/cn/Asm.h | 50 ------------ src/crypto/cn/CnHash.cpp | 42 +++++----- src/crypto/cn/CnHash.h | 5 +- src/crypto/cn/CryptoNight_x86.h | 28 +++---- src/crypto/cn/r/CryptonightR_gen.cpp | 5 +- src/crypto/common/Algorithm.cpp | 1 - src/crypto/common/Algorithm.h | 2 +- .../{cn/Asm.cpp => common/Assembly.cpp} | 35 ++++---- src/crypto/common/Assembly.h | 79 +++++++++++++++++++ src/workers/CpuThread.cpp | 10 +-- src/workers/CpuThread.h | 2 +- 19 files changed, 164 insertions(+), 139 deletions(-) delete mode 100644 src/crypto/cn/Asm.h rename src/crypto/{cn/Asm.cpp => common/Assembly.cpp} (72%) create mode 100644 src/crypto/common/Assembly.h diff --git a/cmake/asm.cmake b/cmake/asm.cmake index d3010e51..e445defd 100644 --- a/cmake/asm.cmake +++ b/cmake/asm.cmake @@ -36,7 +36,11 @@ if (WITH_ASM AND NOT XMRIG_ARM AND CMAKE_SIZEOF_VOID_P EQUAL 8) endif() add_library(${XMRIG_ASM_LIBRARY} STATIC ${XMRIG_ASM_FILES}) - set(XMRIG_ASM_SOURCES src/crypto/cn/Asm.h src/crypto/cn/Asm.cpp src/crypto/cn/r/CryptonightR_gen.cpp) + set(XMRIG_ASM_SOURCES + src/crypto/common/Assembly.h + src/crypto/common/Assembly.cpp + src/crypto/cn/r/CryptonightR_gen.cpp + ) set_property(TARGET ${XMRIG_ASM_LIBRARY} PROPERTY LINKER_LANGUAGE C) add_definitions(/DXMRIG_FEATURE_ASM) diff --git a/src/Summary.cpp b/src/Summary.cpp index 2ba0fd57..13973c0f 100644 --- a/src/Summary.cpp +++ b/src/Summary.cpp @@ -33,7 +33,7 @@ #include "common/cpu/Cpu.h" #include "core/config/Config.h" #include "core/Controller.h" -#include "crypto/cn/Asm.h" +#include "crypto/common/Assembly.h" #include "Mem.h" #include "Summary.h" #include "version.h" @@ -49,7 +49,7 @@ static const char *coloredAsmNames[] = { }; -inline static const char *asmName(xmrig::Assembly assembly) +inline static const char *asmName(xmrig::Assembly::Id assembly) { return coloredAsmNames[assembly]; } @@ -109,7 +109,7 @@ static void print_threads(xmrig::Config *config) } # ifdef XMRIG_FEATURE_ASM - if (config->assembly() == xmrig::ASM_AUTO) { + if (config->assembly() == xmrig::Assembly::AUTO) { const xmrig::Assembly assembly = xmrig::Cpu::info()->assembly(); xmrig::Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13sauto:%s"), "ASSEMBLY", asmName(assembly)); diff --git a/src/common/cpu/BasicCpuInfo.h b/src/common/cpu/BasicCpuInfo.h index 95857ed2..f6daee54 100644 --- a/src/common/cpu/BasicCpuInfo.h +++ b/src/common/cpu/BasicCpuInfo.h @@ -40,7 +40,7 @@ public: protected: size_t optimalThreadsCount(size_t memSize, int maxCpuUsage) const override; - inline Assembly assembly() const override { return m_assembly; } + inline Assembly::Id assembly() const override { return m_assembly; } inline bool hasAES() const override { return m_aes; } inline bool hasAVX2() const override { return m_avx2; } inline bool isSupported() const override { return true; } diff --git a/src/common/interfaces/ICpuInfo.h b/src/common/interfaces/ICpuInfo.h index dd4034b3..907f3f63 100644 --- a/src/common/interfaces/ICpuInfo.h +++ b/src/common/interfaces/ICpuInfo.h @@ -30,7 +30,7 @@ #include -#include "common/xmrig.h" +#include "crypto/common/Assembly.h" namespace xmrig { @@ -39,7 +39,7 @@ namespace xmrig { class ICpuInfo { public: - virtual ~ICpuInfo() {} + virtual ~ICpuInfo() = default; virtual bool hasAES() const = 0; virtual bool hasAVX2() const = 0; @@ -53,7 +53,7 @@ public: virtual int32_t sockets() const = 0; virtual int32_t threads() const = 0; virtual size_t optimalThreadsCount(size_t memSize, int maxCpuUsage) const = 0; - virtual xmrig::Assembly assembly() const = 0; + virtual Assembly::Id assembly() const = 0; }; diff --git a/src/common/xmrig.h b/src/common/xmrig.h index 5dd41845..169c4c1f 100644 --- a/src/common/xmrig.h +++ b/src/common/xmrig.h @@ -72,16 +72,6 @@ enum OclVendor { }; -enum Assembly { - ASM_NONE, - ASM_AUTO, - ASM_INTEL, - ASM_RYZEN, - ASM_BULLDOZER, - ASM_MAX -}; - - } /* namespace xmrig */ diff --git a/src/core/config/Config.cpp b/src/core/config/Config.cpp index 69ac065f..93bd47ff 100644 --- a/src/core/config/Config.cpp +++ b/src/core/config/Config.cpp @@ -32,7 +32,7 @@ #include "base/kernel/interfaces/IJsonReader.h" #include "common/cpu/Cpu.h" #include "core/config/Config.h" -#include "crypto/cn/Asm.h" +#include "crypto/common/Assembly.h" #include "rapidjson/document.h" #include "rapidjson/filewritestream.h" #include "rapidjson/prettywriter.h" @@ -45,7 +45,6 @@ static char affinity_tmp[20] = { 0 }; xmrig::Config::Config() : m_aesMode(AES_AUTO), m_algoVariant(AV_AUTO), - m_assembly(ASM_AUTO), m_hugePages(true), m_safe(false), m_shouldSave(false), @@ -99,7 +98,7 @@ void xmrig::Config::getJSON(rapidjson::Document &doc) const doc.AddMember("http", m_http.toJSON(doc), allocator); # ifdef XMRIG_FEATURE_ASM - doc.AddMember("asm", Asm::toJSON(m_assembly), allocator); + doc.AddMember("asm", m_assembly.toJSON(), allocator); # endif doc.AddMember("autosave", isAutoSave(), allocator); @@ -285,6 +284,6 @@ xmrig::AlgoVariant xmrig::Config::getAlgoVariantLite() const #ifdef XMRIG_FEATURE_ASM void xmrig::Config::setAssembly(const rapidjson::Value &assembly) { - m_assembly = Asm::parse(assembly); + m_assembly = assembly; } #endif diff --git a/src/core/cpu/AdvancedCpuInfo.cpp b/src/core/cpu/AdvancedCpuInfo.cpp index df6a385e..922e8311 100644 --- a/src/core/cpu/AdvancedCpuInfo.cpp +++ b/src/core/cpu/AdvancedCpuInfo.cpp @@ -31,7 +31,6 @@ xmrig::AdvancedCpuInfo::AdvancedCpuInfo() : - m_assembly(ASM_NONE), m_aes(false), m_avx2(false), m_L2_exclusive(false), @@ -78,10 +77,10 @@ xmrig::AdvancedCpuInfo::AdvancedCpuInfo() : m_aes = true; if (data.vendor == VENDOR_AMD) { - m_assembly = (data.ext_family >= 23) ? ASM_RYZEN : ASM_BULLDOZER; + m_assembly = (data.ext_family >= 23) ? Assembly::RYZEN : Assembly::BULLDOZER; } else if (data.vendor == VENDOR_INTEL) { - m_assembly = ASM_INTEL; + m_assembly = Assembly::INTEL; } } diff --git a/src/core/cpu/AdvancedCpuInfo.h b/src/core/cpu/AdvancedCpuInfo.h index 0765da33..90152640 100644 --- a/src/core/cpu/AdvancedCpuInfo.h +++ b/src/core/cpu/AdvancedCpuInfo.h @@ -40,7 +40,7 @@ public: protected: size_t optimalThreadsCount(size_t memSize, int maxCpuUsage) const override; - inline Assembly assembly() const override { return m_assembly; } + inline Assembly::Id assembly() const override { return m_assembly; } inline bool hasAES() const override { return m_aes; } inline bool hasAVX2() const override { return m_avx2; } inline bool isSupported() const override { return true; } diff --git a/src/crypto/cn/Asm.h b/src/crypto/cn/Asm.h deleted file mode 100644 index 3b755fd6..00000000 --- a/src/crypto/cn/Asm.h +++ /dev/null @@ -1,50 +0,0 @@ -/* XMRig - * Copyright 2010 Jeff Garzik - * Copyright 2012-2014 pooler - * Copyright 2014 Lucas Jones - * Copyright 2014-2016 Wolf9466 - * Copyright 2016 Jay D Dee - * Copyright 2017-2018 XMR-Stak , - * Copyright 2016-2018 XMRig , - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef XMRIG_ASM_H -#define XMRIG_ASM_H - - -#include "common/xmrig.h" -#include "rapidjson/fwd.h" - - -namespace xmrig { - - -class Asm -{ -public: - static Assembly parse(const char *assembly, Assembly defaultValue = ASM_AUTO); - static Assembly parse(const rapidjson::Value &value, Assembly defaultValue = ASM_AUTO); - static const char *toString(Assembly assembly); - static rapidjson::Value toJSON(Assembly assembly); - - inline static Assembly parse(bool enable) { return enable ? ASM_AUTO : ASM_NONE; } -}; - - -} /* namespace xmrig */ - - -#endif /* XMRIG_ASM_H */ diff --git a/src/crypto/cn/CnHash.cpp b/src/crypto/cn/CnHash.cpp index 61d2ea69..d17a8e2d 100644 --- a/src/crypto/cn/CnHash.cpp +++ b/src/crypto/cn/CnHash.cpp @@ -39,26 +39,26 @@ #define ADD_FN(algo) \ - m_map[algo][AV_SINGLE][ASM_NONE] = cryptonight_single_hash; \ - m_map[algo][AV_SINGLE_SOFT][ASM_NONE] = cryptonight_single_hash; \ - m_map[algo][AV_DOUBLE][ASM_NONE] = cryptonight_double_hash; \ - m_map[algo][AV_DOUBLE_SOFT][ASM_NONE] = cryptonight_double_hash; \ - m_map[algo][AV_TRIPLE][ASM_NONE] = cryptonight_triple_hash; \ - m_map[algo][AV_TRIPLE_SOFT][ASM_NONE] = cryptonight_triple_hash; \ - m_map[algo][AV_QUAD][ASM_NONE] = cryptonight_quad_hash; \ - m_map[algo][AV_QUAD_SOFT][ASM_NONE] = cryptonight_quad_hash; \ - m_map[algo][AV_PENTA][ASM_NONE] = cryptonight_penta_hash; \ - m_map[algo][AV_PENTA_SOFT][ASM_NONE] = cryptonight_penta_hash; + m_map[algo][AV_SINGLE][Assembly::NONE] = cryptonight_single_hash; \ + m_map[algo][AV_SINGLE_SOFT][Assembly::NONE] = cryptonight_single_hash; \ + m_map[algo][AV_DOUBLE][Assembly::NONE] = cryptonight_double_hash; \ + m_map[algo][AV_DOUBLE_SOFT][Assembly::NONE] = cryptonight_double_hash; \ + m_map[algo][AV_TRIPLE][Assembly::NONE] = cryptonight_triple_hash; \ + m_map[algo][AV_TRIPLE_SOFT][Assembly::NONE] = cryptonight_triple_hash; \ + m_map[algo][AV_QUAD][Assembly::NONE] = cryptonight_quad_hash; \ + m_map[algo][AV_QUAD_SOFT][Assembly::NONE] = cryptonight_quad_hash; \ + m_map[algo][AV_PENTA][Assembly::NONE] = cryptonight_penta_hash; \ + m_map[algo][AV_PENTA_SOFT][Assembly::NONE] = cryptonight_penta_hash; #ifdef XMRIG_FEATURE_ASM # define ADD_FN_ASM(algo) \ - m_map[algo][AV_SINGLE][ASM_INTEL] = cryptonight_single_hash_asm; \ - m_map[algo][AV_SINGLE][ASM_RYZEN] = cryptonight_single_hash_asm; \ - m_map[algo][AV_SINGLE][ASM_BULLDOZER] = cryptonight_single_hash_asm; \ - m_map[algo][AV_DOUBLE][ASM_INTEL] = cryptonight_double_hash_asm; \ - m_map[algo][AV_DOUBLE][ASM_RYZEN] = cryptonight_double_hash_asm; \ - m_map[algo][AV_DOUBLE][ASM_BULLDOZER] = cryptonight_double_hash_asm; + m_map[algo][AV_SINGLE][Assembly::INTEL] = cryptonight_single_hash_asm; \ + m_map[algo][AV_SINGLE][Assembly::RYZEN] = cryptonight_single_hash_asm; \ + m_map[algo][AV_SINGLE][Assembly::BULLDOZER] = cryptonight_single_hash_asm; \ + m_map[algo][AV_DOUBLE][Assembly::INTEL] = cryptonight_double_hash_asm; \ + m_map[algo][AV_DOUBLE][Assembly::RYZEN] = cryptonight_double_hash_asm; \ + m_map[algo][AV_DOUBLE][Assembly::BULLDOZER] = cryptonight_double_hash_asm; extern "C" void cnv2_mainloop_ivybridge_asm(cryptonight_ctx **ctx); @@ -226,8 +226,8 @@ xmrig::CnHash::CnHash() ADD_FN_ASM(Algorithm::CN_DOUBLE); # ifdef XMRIG_ALGO_CN_GPU - m_map[Algorithm::CN_GPU][AV_SINGLE][ASM_NONE] = cryptonight_single_hash_gpu; - m_map[Algorithm::CN_GPU][AV_SINGLE_SOFT][ASM_NONE] = cryptonight_single_hash_gpu; + m_map[Algorithm::CN_GPU][AV_SINGLE][Assembly::NONE] = cryptonight_single_hash_gpu; + m_map[Algorithm::CN_GPU][AV_SINGLE_SOFT][Assembly::NONE] = cryptonight_single_hash_gpu; # endif # ifdef XMRIG_ALGO_CN_LITE @@ -252,18 +252,18 @@ xmrig::CnHash::CnHash() } -xmrig::cn_hash_fun xmrig::CnHash::fn(const Algorithm &algorithm, AlgoVariant av, Assembly assembly) const +xmrig::cn_hash_fun xmrig::CnHash::fn(const Algorithm &algorithm, AlgoVariant av, Assembly::Id assembly) const { if (!algorithm.isValid()) { return nullptr; } # ifdef XMRIG_FEATURE_ASM - cn_hash_fun fun = m_map[algorithm][av][assembly == ASM_AUTO ? Cpu::info()->assembly() : assembly]; + cn_hash_fun fun = m_map[algorithm][av][assembly == Assembly::AUTO ? Cpu::info()->assembly() : assembly]; if (fun) { return fun; } # endif - return m_map[algorithm][av][ASM_NONE]; + return m_map[algorithm][av][Assembly::NONE]; } diff --git a/src/crypto/cn/CnHash.h b/src/crypto/cn/CnHash.h index 5fbf5c8a..b57bff4c 100644 --- a/src/crypto/cn/CnHash.h +++ b/src/crypto/cn/CnHash.h @@ -33,6 +33,7 @@ #include "common/xmrig.h" #include "crypto/cn/CnAlgo.h" +#include "crypto/common/Assembly.h" struct cryptonight_ctx; @@ -50,10 +51,10 @@ class CnHash public: CnHash(); - cn_hash_fun fn(const Algorithm &algorithm, AlgoVariant av, Assembly assembly) const; + cn_hash_fun fn(const Algorithm &algorithm, AlgoVariant av, Assembly::Id assembly) const; private: - cn_hash_fun m_map[Algorithm::MAX][AV_MAX][ASM_MAX] = {}; + cn_hash_fun m_map[Algorithm::MAX][AV_MAX][Assembly::MAX] = {}; }; diff --git a/src/crypto/cn/CryptoNight_x86.h b/src/crypto/cn/CryptoNight_x86.h index fc21c7b0..b24dea57 100644 --- a/src/crypto/cn/CryptoNight_x86.h +++ b/src/crypto/cn/CryptoNight_x86.h @@ -577,10 +577,10 @@ inline void cryptonight_single_hash(const uint8_t *__restrict__ input, size_t si const int code_size = v4_random_math_init(code, height); if (ALGO == Algorithm::CN_WOW) { - wow_soft_aes_compile_code(code, code_size, reinterpret_cast(ctx[0]->generated_code), ASM_NONE); + wow_soft_aes_compile_code(code, code_size, reinterpret_cast(ctx[0]->generated_code), Assembly::NONE); } else if (ALGO == Algorithm::CN_R) { - v4_soft_aes_compile_code(code, code_size, reinterpret_cast(ctx[0]->generated_code), ASM_NONE); + v4_soft_aes_compile_code(code, code_size, reinterpret_cast(ctx[0]->generated_code), Assembly::NONE); } ctx[0]->generated_code_data = { ALGO, height }; @@ -849,7 +849,7 @@ void cn_r_compile_code_double(const V4_Instruction* co namespace xmrig { -template +template inline void cryptonight_single_hash_asm(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t height) { constexpr CnAlgo props; @@ -866,10 +866,10 @@ inline void cryptonight_single_hash_asm(const uint8_t *__restrict__ input, size_ cn_explode_scratchpad(reinterpret_cast(ctx[0]->state), reinterpret_cast<__m128i*>(ctx[0]->memory)); if (ALGO == Algorithm::CN_2) { - if (ASM == ASM_INTEL) { + if (ASM == Assembly::INTEL) { cnv2_mainloop_ivybridge_asm(ctx); } - else if (ASM == ASM_RYZEN) { + else if (ASM == Assembly::RYZEN) { cnv2_mainloop_ryzen_asm(ctx); } else { @@ -877,10 +877,10 @@ inline void cryptonight_single_hash_asm(const uint8_t *__restrict__ input, size_ } } else if (ALGO == Algorithm::CN_HALF) { - if (ASM == ASM_INTEL) { + if (ASM == Assembly::INTEL) { cn_half_mainloop_ivybridge_asm(ctx); } - else if (ASM == ASM_RYZEN) { + else if (ASM == Assembly::RYZEN) { cn_half_mainloop_ryzen_asm(ctx); } else { @@ -889,10 +889,10 @@ inline void cryptonight_single_hash_asm(const uint8_t *__restrict__ input, size_ } # ifdef XMRIG_ALGO_CN_PICO else if (ALGO == Algorithm::CN_PICO_0) { - if (ASM == ASM_INTEL) { + if (ASM == Assembly::INTEL) { cn_trtl_mainloop_ivybridge_asm(ctx); } - else if (ASM == ASM_RYZEN) { + else if (ASM == Assembly::RYZEN) { cn_trtl_mainloop_ryzen_asm(ctx); } else { @@ -904,10 +904,10 @@ inline void cryptonight_single_hash_asm(const uint8_t *__restrict__ input, size_ cnv2_rwz_mainloop_asm(ctx); } else if (ALGO == Algorithm::CN_ZLS) { - if (ASM == ASM_INTEL) { + if (ASM == Assembly::INTEL) { cn_zls_mainloop_ivybridge_asm(ctx); } - else if (ASM == ASM_RYZEN) { + else if (ASM == Assembly::RYZEN) { cn_zls_mainloop_ryzen_asm(ctx); } else { @@ -915,10 +915,10 @@ inline void cryptonight_single_hash_asm(const uint8_t *__restrict__ input, size_ } } else if (ALGO == Algorithm::CN_DOUBLE) { - if (ASM == ASM_INTEL) { + if (ASM == Assembly::INTEL) { cn_double_mainloop_ivybridge_asm(ctx); } - else if (ASM == ASM_RYZEN) { + else if (ASM == Assembly::RYZEN) { cn_double_mainloop_ryzen_asm(ctx); } else { @@ -935,7 +935,7 @@ inline void cryptonight_single_hash_asm(const uint8_t *__restrict__ input, size_ } -template +template inline void cryptonight_double_hash_asm(const uint8_t *__restrict__ input, size_t size, uint8_t *__restrict__ output, cryptonight_ctx **__restrict__ ctx, uint64_t height) { constexpr CnAlgo props; diff --git a/src/crypto/cn/r/CryptonightR_gen.cpp b/src/crypto/cn/r/CryptonightR_gen.cpp index 8491a33b..3037327a 100644 --- a/src/crypto/cn/r/CryptonightR_gen.cpp +++ b/src/crypto/cn/r/CryptonightR_gen.cpp @@ -29,6 +29,7 @@ typedef void(*void_func)(); #include "crypto/cn/asm/CryptonightR_template.h" +#include "crypto/common/Assembly.h" #include "crypto/common/VirtualMemory.h" #include "Mem.h" @@ -42,7 +43,7 @@ static inline void add_code(uint8_t* &p, void (*p1)(), void (*p2)()) } } -static inline void add_random_math(uint8_t* &p, const V4_Instruction* code, int code_size, const void_func* instructions, const void_func* instructions_mov, bool is_64_bit, xmrig::Assembly ASM) +static inline void add_random_math(uint8_t* &p, const V4_Instruction* code, int code_size, const void_func* instructions, const void_func* instructions_mov, bool is_64_bit, xmrig::Assembly::Id ASM) { uint32_t prev_rot_src = (uint32_t)(-1); @@ -76,7 +77,7 @@ static inline void add_random_math(uint8_t* &p, const V4_Instruction* code, int void_func begin = instructions[c]; - if ((ASM = xmrig::ASM_BULLDOZER) && (inst.opcode == MUL) && !is_64_bit) { + if ((ASM = xmrig::Assembly::BULLDOZER) && (inst.opcode == MUL) && !is_64_bit) { // AMD Bulldozer has latency 4 for 32-bit IMUL and 6 for 64-bit IMUL // Always use 32-bit IMUL for AMD Bulldozer in 32-bit mode - skip prefix 0x48 and change 0x49 to 0x41 uint8_t* prefix = reinterpret_cast(begin); diff --git a/src/crypto/common/Algorithm.cpp b/src/crypto/common/Algorithm.cpp index e70b0659..66b3ddda 100644 --- a/src/crypto/common/Algorithm.cpp +++ b/src/crypto/common/Algorithm.cpp @@ -35,7 +35,6 @@ #ifdef _MSC_VER -# define strncasecmp _strnicmp # define strcasecmp _stricmp #endif diff --git a/src/crypto/common/Algorithm.h b/src/crypto/common/Algorithm.h index ccaf7de5..92c6f405 100644 --- a/src/crypto/common/Algorithm.h +++ b/src/crypto/common/Algorithm.h @@ -69,7 +69,7 @@ public: CN_PICO_0, // "cn-pico" CryptoNight Turtle (TRTL) # endif # ifdef XMRIG_ALGO_RANDOMX - RX_WOW, // "rx/wow" RandomWOW + RX_WOW, // "rx/wow" RandomWOW (Wownero) # endif MAX }; diff --git a/src/crypto/cn/Asm.cpp b/src/crypto/common/Assembly.cpp similarity index 72% rename from src/crypto/cn/Asm.cpp rename to src/crypto/common/Assembly.cpp index 331c133d..44bf0a94 100644 --- a/src/crypto/cn/Asm.cpp +++ b/src/crypto/common/Assembly.cpp @@ -6,7 +6,8 @@ * Copyright 2016 Jay D Dee * Copyright 2017-2018 XMR-Stak , * Copyright 2018 SChernykh - * Copyright 2016-2018 XMRig , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -28,15 +29,17 @@ #ifdef _MSC_VER -# define strncasecmp _strnicmp # define strcasecmp _stricmp #endif -#include "crypto/cn/Asm.h" +#include "crypto/common/Assembly.h" #include "rapidjson/document.h" +namespace xmrig { + + static const char *asmNames[] = { "none", "auto", @@ -46,11 +49,13 @@ static const char *asmNames[] = { }; -xmrig::Assembly xmrig::Asm::parse(const char *assembly, Assembly defaultValue) +} /* namespace xmrig */ + + +xmrig::Assembly::Id xmrig::Assembly::parse(const char *assembly, Id defaultValue) { constexpr size_t const size = sizeof(asmNames) / sizeof((asmNames)[0]); - assert(assembly != nullptr); - assert(ASM_MAX == size); + static_assert(size == MAX, "asmNames size mismatch"); if (assembly == nullptr) { return defaultValue; @@ -58,7 +63,7 @@ xmrig::Assembly xmrig::Asm::parse(const char *assembly, Assembly defaultValue) for (size_t i = 0; i < size; i++) { if (strcasecmp(assembly, asmNames[i]) == 0) { - return static_cast(i); + return static_cast(i); } } @@ -66,10 +71,10 @@ xmrig::Assembly xmrig::Asm::parse(const char *assembly, Assembly defaultValue) } -xmrig::Assembly xmrig::Asm::parse(const rapidjson::Value &value, Assembly defaultValue) +xmrig::Assembly::Id xmrig::Assembly::parse(const rapidjson::Value &value, Id defaultValue) { if (value.IsBool()) { - return parse(value.GetBool()); + return value.GetBool() ? AUTO : NONE; } if (value.IsString()) { @@ -80,23 +85,23 @@ xmrig::Assembly xmrig::Asm::parse(const rapidjson::Value &value, Assembly defaul } -const char *xmrig::Asm::toString(Assembly assembly) +const char *xmrig::Assembly::toString() const { - return asmNames[assembly]; + return asmNames[m_id]; } -rapidjson::Value xmrig::Asm::toJSON(Assembly assembly) +rapidjson::Value xmrig::Assembly::toJSON() const { using namespace rapidjson; - if (assembly == ASM_NONE) { + if (m_id == NONE) { return Value(false); } - if (assembly == ASM_AUTO) { + if (m_id == AUTO) { return Value(true); } - return Value(StringRef(toString(assembly))); + return Value(StringRef(toString())); } diff --git a/src/crypto/common/Assembly.h b/src/crypto/common/Assembly.h new file mode 100644 index 00000000..e4964d07 --- /dev/null +++ b/src/crypto/common/Assembly.h @@ -0,0 +1,79 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_ASSEMBLY_H +#define XMRIG_ASSEMBLY_H + + +#include "common/xmrig.h" +#include "rapidjson/fwd.h" + + +namespace xmrig { + + +class Assembly +{ +public: + enum Id : int { + NONE, + AUTO, + INTEL, + RYZEN, + BULLDOZER, + MAX + }; + + + inline Assembly() {} + inline Assembly(Id id) : m_id(id) {} + inline Assembly(const char *assembly) : m_id(parse(assembly)) {} + inline Assembly(const rapidjson::Value &value) : m_id(parse(value)) {} + + static Id parse(const char *assembly, Id defaultValue = AUTO); + static Id parse(const rapidjson::Value &value, Id defaultValue = AUTO); + + const char *toString() const; + rapidjson::Value toJSON() const; + +// inline static Assembly parse(bool enable) { return enable ? ASM_AUTO : ASM_NONE; } + + inline bool isEqual(const Assembly &other) const { return m_id == other.m_id; } + + + inline bool operator!=(const Assembly &other) const { return !isEqual(other); } + inline bool operator!=(const Assembly::Id &id) const { return m_id != id; } + inline bool operator==(const Assembly &other) const { return isEqual(other); } + inline bool operator==(const Assembly::Id &id) const { return m_id == id; } + inline operator Assembly::Id() const { return m_id; } + +private: + Id m_id = AUTO; +}; + + +} /* namespace xmrig */ + + +#endif /* XMRIG_ASSEMBLY_H */ diff --git a/src/workers/CpuThread.cpp b/src/workers/CpuThread.cpp index e26b8a0a..9f20a35a 100644 --- a/src/workers/CpuThread.cpp +++ b/src/workers/CpuThread.cpp @@ -27,8 +27,8 @@ #include "base/io/log/Log.h" #include "common/cpu/Cpu.h" -#include "crypto/cn/Asm.h" #include "crypto/cn/CnHash.h" +#include "crypto/common/Assembly.h" #include "crypto/common/VirtualMemory.h" #include "Mem.h" #include "rapidjson/document.h" @@ -36,8 +36,6 @@ - - static const xmrig::CnHash cnHash; @@ -136,7 +134,7 @@ xmrig::CpuThread::Data xmrig::CpuThread::parse(const rapidjson::Value &object) } # ifdef XMRIG_FEATURE_ASM - data.assembly = Asm::parse(object["asm"]); + data.assembly = object["asm"]; # endif return data; @@ -181,7 +179,7 @@ void xmrig::CpuThread::print() const index(), static_cast(multiway()), static_cast(m_av)); # ifdef XMRIG_FEATURE_ASM - LOG_DEBUG(" assembly: %s, affine_to_cpu: %" PRId64, Asm::toString(m_assembly), affinity()); + LOG_DEBUG(" assembly: %s, affine_to_cpu: %" PRId64, m_assembly.toString(), affinity()); # else LOG_DEBUG(" affine_to_cpu: %" PRId64, affinity()); # endif @@ -220,7 +218,7 @@ rapidjson::Value xmrig::CpuThread::toConfig(rapidjson::Document &doc) const obj.AddMember("affine_to_cpu", affinity() == -1L ? Value(kFalseType) : Value(affinity()), allocator); # ifdef XMRIG_FEATURE_ASM - obj.AddMember("asm", Asm::toJSON(m_assembly), allocator); + obj.AddMember("asm", m_assembly.toJSON(), allocator); # endif return obj; diff --git a/src/workers/CpuThread.h b/src/workers/CpuThread.h index 08aa89cb..a43a0c09 100644 --- a/src/workers/CpuThread.h +++ b/src/workers/CpuThread.h @@ -42,7 +42,7 @@ class CpuThread : public IThread public: struct Data { - inline Data() : assembly(ASM_AUTO), valid(false), affinity(-1L), multiway(SingleWay) {} + inline Data() : valid(false), affinity(-1L), multiway(SingleWay) {} inline void setMultiway(int value) { From dd875c7c3797dbb44ffb85641fe3838d93e57236 Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 28 Jun 2019 22:28:40 +0700 Subject: [PATCH 008/172] Added class CpuConfig. --- CMakeLists.txt | 3 + src/App.cpp | 2 +- src/Summary.cpp | 10 +-- src/backend/cpu/CpuConfig.cpp | 110 ++++++++++++++++++++++++++++++++ src/backend/cpu/CpuConfig.h | 72 +++++++++++++++++++++ src/backend/cpu/cpu.cmake | 7 +++ src/base/kernel/Base.cpp | 2 +- src/core/config/Config.cpp | 115 ++++++++-------------------------- src/core/config/Config.h | 40 ++---------- src/crypto/common/Assembly.h | 5 +- 10 files changed, 233 insertions(+), 133 deletions(-) create mode 100644 src/backend/cpu/CpuConfig.cpp create mode 100644 src/backend/cpu/CpuConfig.h create mode 100644 src/backend/cpu/cpu.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index a402174a..832c95d7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -18,11 +18,13 @@ option(WITH_EMBEDDED_CONFIG "Enable internal embedded JSON config" OFF) include (CheckIncludeFile) include (cmake/cpu.cmake) include (src/base/base.cmake) +include (src/backend/cpu/cpu.cmake) set(HEADERS "${HEADERS_BASE}" "${HEADERS_BASE_HTTP}" + "${HEADERS_CPU}" src/api/interfaces/IApiListener.h src/App.h src/common/cpu/Cpu.h @@ -83,6 +85,7 @@ endif() set(SOURCES "${SOURCES_BASE}" "${SOURCES_BASE_HTTP}" + "${SOURCES_CPU}" src/App.cpp src/common/Platform.cpp src/core/config/Config.cpp diff --git a/src/App.cpp b/src/App.cpp index 66662eb1..082bbeef 100644 --- a/src/App.cpp +++ b/src/App.cpp @@ -77,7 +77,7 @@ int xmrig::App::exec() background(); - Mem::init(m_controller->config()->isHugePages()); + Mem::init(m_controller->config()->cpu().isHugePages()); Summary::print(m_controller); diff --git a/src/Summary.cpp b/src/Summary.cpp index 13973c0f..a51f8f59 100644 --- a/src/Summary.cpp +++ b/src/Summary.cpp @@ -86,9 +86,9 @@ static void print_threads(xmrig::Config *config) { if (config->threadsMode() != xmrig::Config::Advanced) { char buf[32] = { 0 }; - if (config->affinity() != -1L) { - snprintf(buf, sizeof buf, ", affinity=0x%" PRIX64, config->affinity()); - } +// if (config->affinity() != -1L) { +// snprintf(buf, sizeof buf, ", affinity=0x%" PRIX64, config->affinity()); +// } xmrig::Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") CYAN_BOLD("%d") WHITE_BOLD(", av=%d, %sdonate=%d%%") WHITE_BOLD("%s"), "THREADS", @@ -109,13 +109,13 @@ static void print_threads(xmrig::Config *config) } # ifdef XMRIG_FEATURE_ASM - if (config->assembly() == xmrig::Assembly::AUTO) { + if (config->cpu().assembly() == xmrig::Assembly::AUTO) { const xmrig::Assembly assembly = xmrig::Cpu::info()->assembly(); xmrig::Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13sauto:%s"), "ASSEMBLY", asmName(assembly)); } else { - xmrig::Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s%s"), "ASSEMBLY", asmName(config->assembly())); + xmrig::Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s%s"), "ASSEMBLY", asmName(config->cpu().assembly())); } # endif } diff --git a/src/backend/cpu/CpuConfig.cpp b/src/backend/cpu/CpuConfig.cpp new file mode 100644 index 00000000..b3d780f4 --- /dev/null +++ b/src/backend/cpu/CpuConfig.cpp @@ -0,0 +1,110 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#include "backend/cpu/CpuConfig.h" +#include "base/io/json/Json.h" +#include "common/cpu/Cpu.h" +#include "rapidjson/document.h" + + +namespace xmrig { + + +static const char *kEnabled = "enabled"; +static const char *kHugePages = "huge-pages"; +static const char *kHwAes = "hw-aes"; +static const char *kPriority = "priority"; + + +#ifdef XMRIG_FEATURE_ASM +static const char *kAsm = "asm"; +#endif + +} + + +xmrig::CpuConfig::CpuConfig() +{ +} + + +bool xmrig::CpuConfig::isHwAES() const +{ + return (m_aes == AES_AUTO ? (Cpu::info()->hasAES() ? AES_HW : AES_SOFT) : m_aes) == AES_HW; +} + + +rapidjson::Value xmrig::CpuConfig::toJSON(rapidjson::Document &doc) const +{ + using namespace rapidjson; + + auto &allocator = doc.GetAllocator(); + + Value obj(kObjectType); + + obj.AddMember(StringRef(kEnabled), m_enabled, allocator); + obj.AddMember(StringRef(kHugePages), m_hugePages, allocator); + obj.AddMember(StringRef(kHwAes), m_aes == AES_AUTO ? Value(kNullType) : Value(m_aes == AES_HW), allocator); + obj.AddMember(StringRef(kPriority), priority() != -1 ? Value(priority()) : Value(kNullType), allocator); + +# ifdef XMRIG_FEATURE_ASM + obj.AddMember(StringRef(kAsm), m_assembly.toJSON(), allocator); +# endif + + return obj; +} + + +void xmrig::CpuConfig::read(const rapidjson::Value &value) +{ + if (value.IsObject()) { + m_enabled = Json::getBool(value, kEnabled, m_enabled); + m_hugePages = Json::getBool(value, kHugePages, m_hugePages); + + setAesMode(Json::getValue(value, kHwAes)); + setPriority(Json::getInt(value, kPriority, -1)); + +# ifdef XMRIG_FEATURE_ASM + m_assembly = Json::getValue(value, kAsm); +# endif + } +} + + +void xmrig::CpuConfig::setAesMode(const rapidjson::Value &aesMode) +{ + if (aesMode.IsBool()) { + m_aes = aesMode.GetBool() ? AES_HW : AES_SOFT; + } + else { + m_aes = AES_AUTO; + } +} + + +void xmrig::CpuConfig::setPriority(int priority) +{ + m_priority = (priority >= -1 && priority <= 5) ? priority : -1; +} diff --git a/src/backend/cpu/CpuConfig.h b/src/backend/cpu/CpuConfig.h new file mode 100644 index 00000000..04dd9175 --- /dev/null +++ b/src/backend/cpu/CpuConfig.h @@ -0,0 +1,72 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_CPUCONFIG_H +#define XMRIG_CPUCONFIG_H + + +#include "crypto/common/Assembly.h" + + +namespace xmrig { + + +class CpuConfig +{ +public: + enum AesMode { + AES_AUTO, + AES_HW, + AES_SOFT + }; + + CpuConfig(); + + bool isHwAES() const; + rapidjson::Value toJSON(rapidjson::Document &doc) const; + void read(const rapidjson::Value &value); + + inline bool isEnabled() const { return m_enabled; } + inline bool isHugePages() const { return m_hugePages; } + inline bool isShouldSave() const { return m_shouldSave; } + inline const Assembly &assembly() const { return m_assembly; } + inline int priority() const { return m_priority; } + +private: + void setAesMode(const rapidjson::Value &aesMode); + void setPriority(int priority); + + AesMode m_aes = AES_AUTO; + Assembly m_assembly; + bool m_enabled = true; + bool m_hugePages = true; + bool m_shouldSave = false; + int m_priority = -1; +}; + + +} /* namespace xmrig */ + + +#endif /* XMRIG_CPUCONFIG_H */ diff --git a/src/backend/cpu/cpu.cmake b/src/backend/cpu/cpu.cmake new file mode 100644 index 00000000..88159893 --- /dev/null +++ b/src/backend/cpu/cpu.cmake @@ -0,0 +1,7 @@ +set(HEADERS_CPU + src/backend/cpu/CpuConfig.h + ) + +set(SOURCES_CPU + src/backend/cpu/CpuConfig.cpp + ) diff --git a/src/base/kernel/Base.cpp b/src/base/kernel/Base.cpp index 1083efe9..031daed7 100644 --- a/src/base/kernel/Base.cpp +++ b/src/base/kernel/Base.cpp @@ -172,7 +172,7 @@ int xmrig::Base::init() Platform::init(config()->userAgent()); # ifndef XMRIG_PROXY_PROJECT - Platform::setProcessPriority(config()->priority()); + Platform::setProcessPriority(config()->cpu().priority()); # endif if (!config()->isBackground()) { diff --git a/src/core/config/Config.cpp b/src/core/config/Config.cpp index 93bd47ff..5b445c0f 100644 --- a/src/core/config/Config.cpp +++ b/src/core/config/Config.cpp @@ -39,46 +39,24 @@ #include "workers/CpuThread.h" -static char affinity_tmp[20] = { 0 }; - - xmrig::Config::Config() : - m_aesMode(AES_AUTO), m_algoVariant(AV_AUTO), - m_hugePages(true), - m_safe(false), - m_shouldSave(false), - m_maxCpuUsage(100), - m_priority(-1) + m_shouldSave(false) { } -bool xmrig::Config::isHwAES() const -{ - return (m_aesMode == AES_AUTO ? (Cpu::info()->hasAES() ? AES_HW : AES_SOFT) : m_aesMode) == AES_HW; -} - - bool xmrig::Config::read(const IJsonReader &reader, const char *fileName) { if (!BaseConfig::read(reader, fileName)) { return false; } - m_hugePages = reader.getBool("huge-pages", true); - m_safe = reader.getBool("safe"); + m_cpu.read(reader.getValue("cpu")); - setAesMode(reader.getValue("hw-aes")); setAlgoVariant(reader.getInt("av")); - setMaxCpuUsage(reader.getInt("max-cpu-usage", 100)); - setPriority(reader.getInt("cpu-priority", -1)); setThreads(reader.getValue("threads")); -# ifdef XMRIG_FEATURE_ASM - setAssembly(reader.getValue("asm")); -# endif - return finalize(); } @@ -96,36 +74,29 @@ void xmrig::Config::getJSON(rapidjson::Document &doc) const api.AddMember("worker-id", m_apiWorkerId.toJSON(), allocator); doc.AddMember("api", api, allocator); doc.AddMember("http", m_http.toJSON(doc), allocator); - -# ifdef XMRIG_FEATURE_ASM - doc.AddMember("asm", m_assembly.toJSON(), allocator); -# endif - doc.AddMember("autosave", isAutoSave(), allocator); doc.AddMember("av", algoVariant(), allocator); doc.AddMember("background", isBackground(), allocator); doc.AddMember("colors", Log::colors, allocator); - if (affinity() != -1L) { - snprintf(affinity_tmp, sizeof(affinity_tmp) - 1, "0x%" PRIX64, affinity()); - doc.AddMember("cpu-affinity", StringRef(affinity_tmp), allocator); - } - else { - doc.AddMember("cpu-affinity", kNullType, allocator); - } +// if (affinity() != -1L) { +// snprintf(affinity_tmp, sizeof(affinity_tmp) - 1, "0x%" PRIX64, affinity()); +// doc.AddMember("cpu-affinity", StringRef(affinity_tmp), allocator); +// } +// else { +// doc.AddMember("cpu-affinity", kNullType, allocator); +// } + + + doc.AddMember("cpu", m_cpu.toJSON(doc), allocator); - doc.AddMember("cpu-priority", priority() != -1 ? Value(priority()) : Value(kNullType), allocator); doc.AddMember("donate-level", m_pools.donateLevel(), allocator); doc.AddMember("donate-over-proxy", m_pools.proxyDonate(), allocator); - doc.AddMember("huge-pages", isHugePages(), allocator); - doc.AddMember("hw-aes", m_aesMode == AES_AUTO ? Value(kNullType) : Value(m_aesMode == AES_HW), allocator); doc.AddMember("log-file", m_logFile.toJSON(), allocator); - doc.AddMember("max-cpu-usage", m_maxCpuUsage, allocator); doc.AddMember("pools", m_pools.toJSON(doc), allocator); doc.AddMember("print-time", printTime(), allocator); doc.AddMember("retries", m_pools.retries(), allocator); doc.AddMember("retry-pause", m_pools.retryPause(), allocator); - doc.AddMember("safe", m_safe, allocator); if (threadsMode() != Simple) { Value threads(kArrayType); @@ -154,7 +125,7 @@ bool xmrig::Config::finalize() m_threads.mode = Advanced; for (size_t i = 0; i < m_threads.cpu.size(); ++i) { - m_threads.list.push_back(CpuThread::createFromData(i, algorithm, m_threads.cpu[i], m_priority, !isHwAES())); + m_threads.list.push_back(CpuThread::createFromData(i, algorithm, m_threads.cpu[i], m_cpu.priority(), !m_cpu.isHwAES())); } return true; @@ -166,17 +137,17 @@ bool xmrig::Config::finalize() const size_t size = CpuThread::multiway(av) * CnAlgo<>::memory(algorithm) / 1024; // FIXME MEMORY if (!m_threads.count) { - m_threads.count = Cpu::info()->optimalThreadsCount(size, m_maxCpuUsage); - } - else if (m_safe) { - const size_t count = Cpu::info()->optimalThreadsCount(size, m_maxCpuUsage); - if (m_threads.count > count) { - m_threads.count = count; - } + m_threads.count = Cpu::info()->optimalThreadsCount(size, 100); } +// else if (m_safe) { +// const size_t count = Cpu::info()->optimalThreadsCount(size, m_maxCpuUsage); +// if (m_threads.count > count) { +// m_threads.count = count; +// } +// } for (size_t i = 0; i < m_threads.count; ++i) { - m_threads.list.push_back(CpuThread::createFromAV(i, algorithm, av, m_threads.mask, m_priority, m_assembly)); + m_threads.list.push_back(CpuThread::createFromAV(i, algorithm, av, m_threads.mask, m_cpu.priority(), m_cpu.assembly())); } m_shouldSave = m_threads.mode == Automatic; @@ -185,14 +156,6 @@ bool xmrig::Config::finalize() } -void xmrig::Config::setAesMode(const rapidjson::Value &aesMode) -{ - if (aesMode.IsBool()) { - m_aesMode = aesMode.GetBool() ? AES_HW : AES_SOFT; - } -} - - void xmrig::Config::setAlgoVariant(int av) { if (av >= AV_AUTO && av < AV_MAX) { @@ -201,22 +164,6 @@ void xmrig::Config::setAlgoVariant(int av) } -void xmrig::Config::setMaxCpuUsage(int max) -{ - if (max > 0 && max <= 100) { - m_maxCpuUsage = max; - } -} - - -void xmrig::Config::setPriority(int priority) -{ - if (priority >= 0 && priority <= 5) { - m_priority = priority; - } -} - - void xmrig::Config::setThreads(const rapidjson::Value &threads) { if (threads.IsArray()) { @@ -257,9 +204,9 @@ xmrig::AlgoVariant xmrig::Config::getAlgoVariant() const return Cpu::info()->hasAES() ? AV_SINGLE : AV_SINGLE_SOFT; } - if (m_safe && !Cpu::info()->hasAES() && m_algoVariant <= AV_DOUBLE) { - return static_cast(m_algoVariant + 2); - } +// if (m_safe && !Cpu::info()->hasAES() && m_algoVariant <= AV_DOUBLE) { +// return static_cast(m_algoVariant + 2); +// } return m_algoVariant; } @@ -272,18 +219,10 @@ xmrig::AlgoVariant xmrig::Config::getAlgoVariantLite() const return Cpu::info()->hasAES() ? AV_DOUBLE : AV_DOUBLE_SOFT; } - if (m_safe && !Cpu::info()->hasAES() && m_algoVariant <= AV_DOUBLE) { - return static_cast(m_algoVariant + 2); - } +// if (m_safe && !Cpu::info()->hasAES() && m_algoVariant <= AV_DOUBLE) { +// return static_cast(m_algoVariant + 2); +// } return m_algoVariant; } #endif - - -#ifdef XMRIG_FEATURE_ASM -void xmrig::Config::setAssembly(const rapidjson::Value &assembly) -{ - m_assembly = assembly; -} -#endif diff --git a/src/core/config/Config.h b/src/core/config/Config.h index 0ff13fe7..76720889 100644 --- a/src/core/config/Config.h +++ b/src/core/config/Config.h @@ -5,7 +5,8 @@ * Copyright 2014-2016 Wolf9466 * Copyright 2016 Jay D Dee * Copyright 2017-2018 XMR-Stak , - * Copyright 2016-2018 XMRig , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -29,6 +30,7 @@ #include +#include "backend/cpu/CpuConfig.h" #include "base/kernel/config/BaseConfig.h" #include "common/xmrig.h" #include "rapidjson/fwd.h" @@ -38,23 +40,9 @@ namespace xmrig { -class ConfigLoader; class IThread; -class IConfigListener; -class Process; -/** - * @brief The Config class - * - * Options with dynamic reload: - * colors - * debug - * verbose - * custom-diff (only for new connections) - * api/worker-id - * pools/ - */ class Config : public BaseConfig { public: @@ -67,26 +55,19 @@ public: Config(); - bool isHwAES() const; bool read(const IJsonReader &reader, const char *fileName) override; void getJSON(rapidjson::Document &doc) const override; inline AlgoVariant algoVariant() const { return m_algoVariant; } - inline Assembly assembly() const { return m_assembly; } - inline bool isHugePages() const { return m_hugePages; } inline bool isShouldSave() const { return (m_shouldSave || m_upgrade) && isAutoSave(); } + inline const CpuConfig &cpu() const { return m_cpu; } inline const std::vector &threads() const { return m_threads.list; } - inline int priority() const { return m_priority; } inline int threadsCount() const { return static_cast(m_threads.list.size()); } - inline int64_t affinity() const { return m_threads.mask; } inline ThreadsMode threadsMode() const { return m_threads.mode; } private: bool finalize(); - void setAesMode(const rapidjson::Value &aesMode); void setAlgoVariant(int av); - void setMaxCpuUsage(int max); - void setPriority(int priority); void setThreads(const rapidjson::Value &threads); AlgoVariant getAlgoVariant() const; @@ -94,11 +75,6 @@ private: AlgoVariant getAlgoVariantLite() const; # endif -# ifdef XMRIG_FEATURE_ASM - void setAssembly(const rapidjson::Value &assembly); -# endif - - struct Threads { inline Threads() : mask(-1L), count(0), mode(Automatic) {} @@ -111,18 +87,14 @@ private: }; - AesMode m_aesMode; AlgoVariant m_algoVariant; - Assembly m_assembly; - bool m_hugePages; - bool m_safe; bool m_shouldSave; - int m_maxCpuUsage; - int m_priority; + CpuConfig m_cpu; Threads m_threads; }; } /* namespace xmrig */ + #endif /* XMRIG_CONFIG_H */ diff --git a/src/crypto/common/Assembly.h b/src/crypto/common/Assembly.h index e4964d07..0b3f29b3 100644 --- a/src/crypto/common/Assembly.h +++ b/src/crypto/common/Assembly.h @@ -57,10 +57,7 @@ public: const char *toString() const; rapidjson::Value toJSON() const; -// inline static Assembly parse(bool enable) { return enable ? ASM_AUTO : ASM_NONE; } - - inline bool isEqual(const Assembly &other) const { return m_id == other.m_id; } - + inline bool isEqual(const Assembly &other) const { return m_id == other.m_id; } inline bool operator!=(const Assembly &other) const { return !isEqual(other); } inline bool operator!=(const Assembly::Id &id) const { return m_id != id; } From 62edb2fc0ace1b078e821e13229f18ec94ec3c42 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 29 Jun 2019 09:51:23 +0700 Subject: [PATCH 009/172] Move CPU information classes to new location. --- CMakeLists.txt | 19 ------- src/App.cpp | 4 +- src/Summary.cpp | 4 +- src/api/v1/ApiRouter.cpp | 2 +- src/{core => backend}/cpu/Cpu.cpp | 17 ++++-- src/{common => backend}/cpu/Cpu.h | 5 +- src/backend/cpu/CpuConfig.cpp | 2 +- src/backend/cpu/cpu.cmake | 22 +++++++ .../cpu}/interfaces/ICpuInfo.h | 7 ++- .../cpu/platform}/AdvancedCpuInfo.cpp | 2 +- .../cpu/platform}/AdvancedCpuInfo.h | 8 +-- .../cpu/platform}/BasicCpuInfo.cpp | 9 +-- .../cpu/platform}/BasicCpuInfo.h | 8 +-- .../cpu/platform}/BasicCpuInfo_arm.cpp | 0 src/common/cpu/Cpu.cpp | 57 ------------------- src/core/Controller.cpp | 2 +- src/core/config/Config.cpp | 2 +- src/crypto/cn/CnHash.cpp | 2 +- src/crypto/cn/CryptoNight_x86.h | 2 +- src/workers/CpuThread.cpp | 1 - src/workers/Worker.cpp | 2 +- 21 files changed, 62 insertions(+), 115 deletions(-) rename src/{core => backend}/cpu/Cpu.cpp (73%) rename src/{common => backend}/cpu/Cpu.h (88%) rename src/{common => backend/cpu}/interfaces/ICpuInfo.h (91%) rename src/{core/cpu => backend/cpu/platform}/AdvancedCpuInfo.cpp (98%) rename src/{core/cpu => backend/cpu/platform}/AdvancedCpuInfo.h (91%) rename src/{common/cpu => backend/cpu/platform}/BasicCpuInfo.cpp (94%) rename src/{common/cpu => backend/cpu/platform}/BasicCpuInfo.h (89%) rename src/{common/cpu => backend/cpu/platform}/BasicCpuInfo_arm.cpp (100%) delete mode 100644 src/common/cpu/Cpu.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 832c95d7..7320b63d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -27,8 +27,6 @@ set(HEADERS "${HEADERS_CPU}" src/api/interfaces/IApiListener.h src/App.h - src/common/cpu/Cpu.h - src/common/interfaces/ICpuInfo.h src/common/Platform.h src/common/xmrig.h src/core/config/Config_default.h @@ -178,23 +176,6 @@ endif() include(cmake/flags.cmake) -if (WITH_LIBCPUID) - add_subdirectory(src/3rdparty/libcpuid) - - include_directories(src/3rdparty/libcpuid) - set(CPUID_LIB cpuid) - set(SOURCES_CPUID src/core/cpu/AdvancedCpuInfo.h src/core/cpu/AdvancedCpuInfo.cpp src/core/cpu/Cpu.cpp) -else() - add_definitions(/DXMRIG_NO_LIBCPUID) - set(SOURCES_CPUID src/common/cpu/BasicCpuInfo.h src/common/cpu/Cpu.cpp) - - if (XMRIG_ARM) - set(SOURCES_CPUID ${SOURCES_CPUID} src/common/cpu/BasicCpuInfo_arm.cpp) - else() - set(SOURCES_CPUID ${SOURCES_CPUID} src/common/cpu/BasicCpuInfo.cpp) - endif() -endif() - include(cmake/OpenSSL.cmake) include(cmake/asm.cmake) include(cmake/cn-gpu.cmake) diff --git a/src/App.cpp b/src/App.cpp index 082bbeef..6e42ac30 100644 --- a/src/App.cpp +++ b/src/App.cpp @@ -6,7 +6,7 @@ * Copyright 2016 Jay D Dee * Copyright 2017-2018 XMR-Stak , * Copyright 2018 Lee Clagett - * Copyright 2018 SChernykh + * Copyright 2018-2019 SChernykh * Copyright 2016-2019 XMRig , * * This program is free software: you can redistribute it and/or modify @@ -30,10 +30,10 @@ #include "api/Api.h" #include "App.h" +#include "backend/cpu/Cpu.h" #include "base/io/Console.h" #include "base/io/log/Log.h" #include "base/kernel/Signals.h" -#include "common/cpu/Cpu.h" #include "common/Platform.h" #include "core/config/Config.h" #include "core/Controller.h" diff --git a/src/Summary.cpp b/src/Summary.cpp index a51f8f59..59e540d4 100644 --- a/src/Summary.cpp +++ b/src/Summary.cpp @@ -28,9 +28,9 @@ #include +#include "backend/cpu/Cpu.h" #include "base/io/log/Log.h" #include "base/net/stratum/Pool.h" -#include "common/cpu/Cpu.h" #include "core/config/Config.h" #include "core/Controller.h" #include "crypto/common/Assembly.h" @@ -76,7 +76,7 @@ static void print_cpu(xmrig::Config *) Cpu::info()->hasAES() ? GREEN_BOLD_S : RED_BOLD_S "-", Cpu::info()->hasAVX2() ? GREEN_BOLD_S : RED_BOLD_S "-" ); -# ifndef XMRIG_NO_LIBCPUID +# ifdef XMRIG_FEATURE_LIBCPUID Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s%.1f MB/%.1f MB"), "CPU L2/L3", Cpu::info()->L2() / 1024.0, Cpu::info()->L3() / 1024.0); # endif } diff --git a/src/api/v1/ApiRouter.cpp b/src/api/v1/ApiRouter.cpp index d066b0b1..0f754e17 100644 --- a/src/api/v1/ApiRouter.cpp +++ b/src/api/v1/ApiRouter.cpp @@ -29,8 +29,8 @@ #include "api/interfaces/IApiRequest.h" #include "api/v1/ApiRouter.h" +#include "backend/cpu/Cpu.h" #include "base/kernel/Base.h" -#include "common/cpu/Cpu.h" #include "common/Platform.h" #include "core/config/Config.h" #include "interfaces/IThread.h" diff --git a/src/core/cpu/Cpu.cpp b/src/backend/cpu/Cpu.cpp similarity index 73% rename from src/core/cpu/Cpu.cpp rename to src/backend/cpu/Cpu.cpp index 773255d2..fdcad5a8 100644 --- a/src/core/cpu/Cpu.cpp +++ b/src/backend/cpu/Cpu.cpp @@ -4,8 +4,9 @@ * Copyright 2014 Lucas Jones * Copyright 2014-2016 Wolf9466 * Copyright 2016 Jay D Dee - * Copyright 2016-2017 XMRig - * + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -25,11 +26,13 @@ #include -#include "common/cpu/Cpu.h" +#include "backend/cpu/Cpu.h" -#ifndef XMRIG_NO_LIBCPUID -# include "core/cpu/AdvancedCpuInfo.h" +#ifdef XMRIG_FEATURE_LIBCPUID +# include "backend/cpu/platform/AdvancedCpuInfo.h" +#else +# include "backend/cpu/platform/BasicCpuInfo.h" #endif @@ -48,7 +51,11 @@ void xmrig::Cpu::init() { assert(cpuInfo == nullptr); +# ifdef XMRIG_FEATURE_LIBCPUID cpuInfo = new AdvancedCpuInfo(); +# else + cpuInfo = new BasicCpuInfo(); +# endif } diff --git a/src/common/cpu/Cpu.h b/src/backend/cpu/Cpu.h similarity index 88% rename from src/common/cpu/Cpu.h rename to src/backend/cpu/Cpu.h index 1d5a9fb1..9c8afced 100644 --- a/src/common/cpu/Cpu.h +++ b/src/backend/cpu/Cpu.h @@ -5,7 +5,8 @@ * Copyright 2014-2016 Wolf9466 * Copyright 2016 Jay D Dee * Copyright 2017-2018 XMR-Stak , - * Copyright 2016-2018 XMRig , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -25,7 +26,7 @@ #define XMRIG_CPU_H -#include "common/interfaces/ICpuInfo.h" +#include "backend/cpu/interfaces/ICpuInfo.h" namespace xmrig { diff --git a/src/backend/cpu/CpuConfig.cpp b/src/backend/cpu/CpuConfig.cpp index b3d780f4..5284d607 100644 --- a/src/backend/cpu/CpuConfig.cpp +++ b/src/backend/cpu/CpuConfig.cpp @@ -23,9 +23,9 @@ */ +#include "backend/cpu/Cpu.h" #include "backend/cpu/CpuConfig.h" #include "base/io/json/Json.h" -#include "common/cpu/Cpu.h" #include "rapidjson/document.h" diff --git a/src/backend/cpu/cpu.cmake b/src/backend/cpu/cpu.cmake index 88159893..03ca7075 100644 --- a/src/backend/cpu/cpu.cmake +++ b/src/backend/cpu/cpu.cmake @@ -1,7 +1,29 @@ set(HEADERS_CPU + src/backend/cpu/Cpu.h src/backend/cpu/CpuConfig.h + src/backend/cpu/interfaces/ICpuInfo.h ) set(SOURCES_CPU + src/backend/cpu/Cpu.cpp src/backend/cpu/CpuConfig.cpp ) + + +if (WITH_LIBCPUID) + add_subdirectory(src/3rdparty/libcpuid) + include_directories(src/3rdparty/libcpuid) + add_definitions(/DXMRIG_FEATURE_LIBCPUID) + + set(CPUID_LIB cpuid) + set(SOURCES_CPUID src/backend/cpu/platform/AdvancedCpuInfo.h src/backend/cpu/platform/AdvancedCpuInfo.cpp src/backend/cpu/Cpu.cpp) +else() + remove_definitions(/DXMRIG_FEATURE_LIBCPUID) + set(SOURCES_CPUID src/backend/cpu/platform/BasicCpuInfo.h src/backend/cpu/Cpu.cpp) + + if (XMRIG_ARM) + set(SOURCES_CPUID ${SOURCES_CPUID} src/backend/cpu/platform/BasicCpuInfo_arm.cpp) + else() + set(SOURCES_CPUID ${SOURCES_CPUID} src/backend/cpu/platform/BasicCpuInfo.cpp) + endif() +endif() diff --git a/src/common/interfaces/ICpuInfo.h b/src/backend/cpu/interfaces/ICpuInfo.h similarity index 91% rename from src/common/interfaces/ICpuInfo.h rename to src/backend/cpu/interfaces/ICpuInfo.h index 907f3f63..9618f489 100644 --- a/src/common/interfaces/ICpuInfo.h +++ b/src/backend/cpu/interfaces/ICpuInfo.h @@ -41,10 +41,15 @@ class ICpuInfo public: virtual ~ICpuInfo() = default; +# if defined(__x86_64__) || defined(_M_AMD64) || defined (__arm64__) || defined (__aarch64__) + inline constexpr bool isX64() const { return true; } +# else + inline constexpr bool isX64() const { return false; } +# endif + virtual bool hasAES() const = 0; virtual bool hasAVX2() const = 0; virtual bool isSupported() const = 0; - virtual bool isX64() const = 0; virtual const char *brand() const = 0; virtual int32_t cores() const = 0; virtual int32_t L2() const = 0; diff --git a/src/core/cpu/AdvancedCpuInfo.cpp b/src/backend/cpu/platform/AdvancedCpuInfo.cpp similarity index 98% rename from src/core/cpu/AdvancedCpuInfo.cpp rename to src/backend/cpu/platform/AdvancedCpuInfo.cpp index 922e8311..fc7f734d 100644 --- a/src/core/cpu/AdvancedCpuInfo.cpp +++ b/src/backend/cpu/platform/AdvancedCpuInfo.cpp @@ -27,7 +27,7 @@ #include -#include "core/cpu/AdvancedCpuInfo.h" +#include "backend/cpu/platform/AdvancedCpuInfo.h" xmrig::AdvancedCpuInfo::AdvancedCpuInfo() : diff --git a/src/core/cpu/AdvancedCpuInfo.h b/src/backend/cpu/platform/AdvancedCpuInfo.h similarity index 91% rename from src/core/cpu/AdvancedCpuInfo.h rename to src/backend/cpu/platform/AdvancedCpuInfo.h index 90152640..83c3d8e5 100644 --- a/src/core/cpu/AdvancedCpuInfo.h +++ b/src/backend/cpu/platform/AdvancedCpuInfo.h @@ -26,7 +26,7 @@ #define XMRIG_ADVANCEDCPUINFO_H -#include "common/interfaces/ICpuInfo.h" +#include "backend/cpu/interfaces/ICpuInfo.h" namespace xmrig { @@ -52,12 +52,6 @@ protected: inline int32_t sockets() const override { return m_sockets; } inline int32_t threads() const override { return m_threads; } -# if defined(__x86_64__) || defined(_M_AMD64) - inline bool isX64() const override { return true; } -# else - inline bool isX64() const override { return false; } -# endif - private: Assembly m_assembly; bool m_aes; diff --git a/src/common/cpu/BasicCpuInfo.cpp b/src/backend/cpu/platform/BasicCpuInfo.cpp similarity index 94% rename from src/common/cpu/BasicCpuInfo.cpp rename to src/backend/cpu/platform/BasicCpuInfo.cpp index c5b8ed0a..04ff589b 100644 --- a/src/common/cpu/BasicCpuInfo.cpp +++ b/src/backend/cpu/platform/BasicCpuInfo.cpp @@ -45,7 +45,8 @@ #endif -#include "common/cpu/BasicCpuInfo.h" +#include "backend/cpu/platform/BasicCpuInfo.h" +#include "crypto/common/Assembly.h" #define VENDOR_ID (0) @@ -121,7 +122,7 @@ static inline bool has_ossave() xmrig::BasicCpuInfo::BasicCpuInfo() : - m_assembly(ASM_NONE), + m_assembly(Assembly::NONE), m_aes(has_aes_ni()), m_avx2(has_avx2() && has_ossave()), m_brand(), @@ -141,10 +142,10 @@ xmrig::BasicCpuInfo::BasicCpuInfo() : memcpy(vendor + 8, &data[2], 4); if (memcmp(vendor, "GenuineIntel", 12) == 0) { - m_assembly = ASM_INTEL; + m_assembly = Assembly::INTEL; } else if (memcmp(vendor, "AuthenticAMD", 12) == 0) { - m_assembly = ASM_RYZEN; + m_assembly = Assembly::RYZEN; } } # endif diff --git a/src/common/cpu/BasicCpuInfo.h b/src/backend/cpu/platform/BasicCpuInfo.h similarity index 89% rename from src/common/cpu/BasicCpuInfo.h rename to src/backend/cpu/platform/BasicCpuInfo.h index f6daee54..4d4a5163 100644 --- a/src/common/cpu/BasicCpuInfo.h +++ b/src/backend/cpu/platform/BasicCpuInfo.h @@ -26,7 +26,7 @@ #define XMRIG_BASICCPUINFO_H -#include "common/interfaces/ICpuInfo.h" +#include "backend/cpu/interfaces/ICpuInfo.h" namespace xmrig { @@ -52,12 +52,6 @@ protected: inline int32_t sockets() const override { return 1; } inline int32_t threads() const override { return m_threads; } -# if defined(__x86_64__) || defined(_M_AMD64) || defined (__arm64__) || defined (__aarch64__) - inline bool isX64() const override { return true; } -# else - inline bool isX64() const override { return false; } -# endif - private: Assembly m_assembly; bool m_aes; diff --git a/src/common/cpu/BasicCpuInfo_arm.cpp b/src/backend/cpu/platform/BasicCpuInfo_arm.cpp similarity index 100% rename from src/common/cpu/BasicCpuInfo_arm.cpp rename to src/backend/cpu/platform/BasicCpuInfo_arm.cpp diff --git a/src/common/cpu/Cpu.cpp b/src/common/cpu/Cpu.cpp deleted file mode 100644 index b1bb28ac..00000000 --- a/src/common/cpu/Cpu.cpp +++ /dev/null @@ -1,57 +0,0 @@ -/* XMRig - * Copyright 2010 Jeff Garzik - * Copyright 2012-2014 pooler - * Copyright 2014 Lucas Jones - * Copyright 2014-2016 Wolf9466 - * Copyright 2016 Jay D Dee - * Copyright 2016-2017 XMRig - * - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - - -#include - - -#include "common/cpu/BasicCpuInfo.h" -#include "common/cpu/Cpu.h" - - -static xmrig::ICpuInfo *cpuInfo = nullptr; - - -xmrig::ICpuInfo *xmrig::Cpu::info() -{ - assert(cpuInfo != nullptr); - - return cpuInfo; -} - - -void xmrig::Cpu::init() -{ - assert(cpuInfo == nullptr); - - cpuInfo = new BasicCpuInfo(); -} - - -void xmrig::Cpu::release() -{ - assert(cpuInfo != nullptr); - - delete cpuInfo; - cpuInfo = nullptr; -} diff --git a/src/core/Controller.cpp b/src/core/Controller.cpp index 493b3e11..8e2e03a1 100644 --- a/src/core/Controller.cpp +++ b/src/core/Controller.cpp @@ -26,7 +26,7 @@ #include -#include "common/cpu/Cpu.h" +#include "backend/cpu/Cpu.h" #include "common/Platform.h" #include "core/Controller.h" #include "net/Network.h" diff --git a/src/core/config/Config.cpp b/src/core/config/Config.cpp index 5b445c0f..33f4cc44 100644 --- a/src/core/config/Config.cpp +++ b/src/core/config/Config.cpp @@ -28,9 +28,9 @@ #include +#include "backend/cpu/Cpu.h" #include "base/io/log/Log.h" #include "base/kernel/interfaces/IJsonReader.h" -#include "common/cpu/Cpu.h" #include "core/config/Config.h" #include "crypto/common/Assembly.h" #include "rapidjson/document.h" diff --git a/src/crypto/cn/CnHash.cpp b/src/crypto/cn/CnHash.cpp index d17a8e2d..6582db10 100644 --- a/src/crypto/cn/CnHash.cpp +++ b/src/crypto/cn/CnHash.cpp @@ -26,7 +26,7 @@ #include -#include "common/cpu/Cpu.h" +#include "backend/cpu/Cpu.h" #include "crypto/cn/CnHash.h" #include "crypto/common/VirtualMemory.h" diff --git a/src/crypto/cn/CryptoNight_x86.h b/src/crypto/cn/CryptoNight_x86.h index b24dea57..ae51cd18 100644 --- a/src/crypto/cn/CryptoNight_x86.h +++ b/src/crypto/cn/CryptoNight_x86.h @@ -35,7 +35,7 @@ #endif -#include "common/cpu/Cpu.h" +#include "backend/cpu/Cpu.h" #include "crypto/cn/CnAlgo.h" #include "crypto/cn/CryptoNight_monero.h" #include "crypto/cn/CryptoNight.h" diff --git a/src/workers/CpuThread.cpp b/src/workers/CpuThread.cpp index 9f20a35a..7011da12 100644 --- a/src/workers/CpuThread.cpp +++ b/src/workers/CpuThread.cpp @@ -26,7 +26,6 @@ #include "base/io/log/Log.h" -#include "common/cpu/Cpu.h" #include "crypto/cn/CnHash.h" #include "crypto/common/Assembly.h" #include "crypto/common/VirtualMemory.h" diff --git a/src/workers/Worker.cpp b/src/workers/Worker.cpp index 234e7bfd..c6ea6d9a 100644 --- a/src/workers/Worker.cpp +++ b/src/workers/Worker.cpp @@ -24,7 +24,7 @@ #include -#include "common/cpu/Cpu.h" +#include "backend/cpu/Cpu.h" #include "common/Platform.h" #include "workers/CpuThread.h" #include "workers/ThreadHandle.h" From e10671fa5121c1af75a51ecf333db179f652a545 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 29 Jun 2019 10:25:06 +0700 Subject: [PATCH 010/172] Fixed ARM build. --- src/backend/cpu/interfaces/ICpuInfo.h | 4 ++-- src/backend/cpu/platform/BasicCpuInfo_arm.cpp | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/backend/cpu/interfaces/ICpuInfo.h b/src/backend/cpu/interfaces/ICpuInfo.h index 9618f489..abff7a6c 100644 --- a/src/backend/cpu/interfaces/ICpuInfo.h +++ b/src/backend/cpu/interfaces/ICpuInfo.h @@ -42,9 +42,9 @@ public: virtual ~ICpuInfo() = default; # if defined(__x86_64__) || defined(_M_AMD64) || defined (__arm64__) || defined (__aarch64__) - inline constexpr bool isX64() const { return true; } + inline constexpr static bool isX64() { return true; } # else - inline constexpr bool isX64() const { return false; } + inline constexpr static bool isX64() { return false; } # endif virtual bool hasAES() const = 0; diff --git a/src/backend/cpu/platform/BasicCpuInfo_arm.cpp b/src/backend/cpu/platform/BasicCpuInfo_arm.cpp index dea8de73..49e300e4 100644 --- a/src/backend/cpu/platform/BasicCpuInfo_arm.cpp +++ b/src/backend/cpu/platform/BasicCpuInfo_arm.cpp @@ -32,7 +32,7 @@ #endif -#include "common/cpu/BasicCpuInfo.h" +#include "backend/cpu/platform/BasicCpuInfo.h" xmrig::BasicCpuInfo::BasicCpuInfo() : From 83fdbbf29cba31b1b238567f7040fe6db2c133c9 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 29 Jun 2019 10:57:05 +0700 Subject: [PATCH 011/172] Added "features" and "algorithms" fields to API summary response. --- src/api/Api.cpp | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/src/api/Api.cpp b/src/api/Api.cpp index caebcba7..a1aeb4c2 100644 --- a/src/api/Api.cpp +++ b/src/api/Api.cpp @@ -41,6 +41,7 @@ #include "base/tools/Chrono.h" #include "core/config/Config.h" #include "core/Controller.h" +#include "crypto/common/Algorithm.h" #include "crypto/common/keccak.h" #include "version.h" @@ -126,6 +127,32 @@ void xmrig::Api::exec(IApiRequest &request) request.reply().AddMember("id", StringRef(m_id), allocator); request.reply().AddMember("worker_id", StringRef(m_workerId), allocator); request.reply().AddMember("uptime", (Chrono::steadyMSecs() - m_timestamp) / 1000, allocator); + + Value features(kArrayType); +# ifdef XMRIG_FEATURE_API + features.PushBack("api", allocator); +# endif +# ifdef XMRIG_FEATURE_ASM + features.PushBack("asm", allocator); +# endif +# ifdef XMRIG_FEATURE_HTTP + features.PushBack("http", allocator); +# endif +# ifdef XMRIG_FEATURE_LIBCPUID + features.PushBack("cpuid", allocator); +# endif +# ifdef XMRIG_FEATURE_TLS + features.PushBack("tls", allocator); +# endif + request.reply().AddMember("features", features, allocator); + + Value algorithms(kArrayType); + + for (int i = 0; i < Algorithm::MAX; ++i) { + algorithms.PushBack(StringRef(Algorithm(static_cast(i)).shortName()), allocator); + } + + request.reply().AddMember("algorithms", algorithms, allocator); } for (IApiListener *listener : m_listeners) { From b92807e8d83eddd6677cc811b6b97436b72c52ec Mon Sep 17 00:00:00 2001 From: XMRig Date: Tue, 2 Jul 2019 22:56:28 +0700 Subject: [PATCH 012/172] Added support for multi-algorithm CPU threads settings. --- CMakeLists.txt | 10 +- src/backend/Threads.cpp | 145 ++++++++++++++++++ src/backend/Threads.h | 67 ++++++++ src/backend/backend.cmake | 12 ++ src/backend/cpu/CpuConfig.cpp | 52 ++++++- src/backend/cpu/CpuConfig.h | 14 +- src/backend/cpu/CpuThread.cpp | 71 +++++++++ src/backend/cpu/CpuThread.h | 63 ++++++++ src/backend/cpu/cpu.cmake | 8 +- src/backend/cpu/interfaces/ICpuInfo.h | 21 ++- src/backend/cpu/platform/AdvancedCpuInfo.cpp | 76 ++++++--- src/backend/cpu/platform/AdvancedCpuInfo.h | 29 ++-- src/backend/cpu/platform/BasicCpuInfo.cpp | 27 +++- src/backend/cpu/platform/BasicCpuInfo.h | 21 +-- src/core/config/Config.cpp | 10 +- src/core/config/Config.h | 6 +- src/crypto/common/Algorithm.cpp | 15 ++ src/crypto/common/Algorithm.h | 3 + src/crypto/common/Assembly.h | 4 +- .../{CpuThread.cpp => CpuThreadLegacy.cpp} | 26 ++-- .../{CpuThread.h => CpuThreadLegacy.h} | 14 +- src/workers/MultiWorker.cpp | 2 +- src/workers/Worker.cpp | 4 +- src/workers/Worker.h | 4 +- 24 files changed, 595 insertions(+), 109 deletions(-) create mode 100644 src/backend/Threads.cpp create mode 100644 src/backend/Threads.h create mode 100644 src/backend/backend.cmake create mode 100644 src/backend/cpu/CpuThread.cpp create mode 100644 src/backend/cpu/CpuThread.h rename src/workers/{CpuThread.cpp => CpuThreadLegacy.cpp} (78%) rename src/workers/{CpuThread.h => CpuThreadLegacy.h} (83%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7320b63d..ef4f8cee 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -18,13 +18,13 @@ option(WITH_EMBEDDED_CONFIG "Enable internal embedded JSON config" OFF) include (CheckIncludeFile) include (cmake/cpu.cmake) include (src/base/base.cmake) -include (src/backend/cpu/cpu.cmake) +include (src/backend/backend.cmake) set(HEADERS "${HEADERS_BASE}" "${HEADERS_BASE_HTTP}" - "${HEADERS_CPU}" + "${HEADERS_BACKEND}" src/api/interfaces/IApiListener.h src/App.h src/common/Platform.h @@ -45,7 +45,7 @@ set(HEADERS src/net/strategies/DonateStrategy.h src/Summary.h src/version.h - src/workers/CpuThread.h + src/workers/CpuThreadLegacy.h src/workers/Hashrate.h src/workers/MultiWorker.h src/workers/ThreadHandle.h @@ -83,7 +83,7 @@ endif() set(SOURCES "${SOURCES_BASE}" "${SOURCES_BASE_HTTP}" - "${SOURCES_CPU}" + "${SOURCES_BACKEND}" src/App.cpp src/common/Platform.cpp src/core/config/Config.cpp @@ -94,7 +94,7 @@ set(SOURCES src/net/NetworkState.cpp src/net/strategies/DonateStrategy.cpp src/Summary.cpp - src/workers/CpuThread.cpp + src/workers/CpuThreadLegacy.cpp src/workers/Hashrate.cpp src/workers/MultiWorker.cpp src/workers/ThreadHandle.cpp diff --git a/src/backend/Threads.cpp b/src/backend/Threads.cpp new file mode 100644 index 00000000..11e1ec15 --- /dev/null +++ b/src/backend/Threads.cpp @@ -0,0 +1,145 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#include "backend/cpu/CpuThread.h" +#include "backend/Threads.h" +#include "rapidjson/document.h" + + +template +const std::vector &xmrig::Threads::get(const String &profileName) const +{ + static std::vector empty; + if (profileName.isNull() || !has(profileName)) { + return empty; + } + + return m_profiles.at(profileName); +} + + +template +xmrig::String xmrig::Threads::profileName(const Algorithm &algorithm, bool strict) const +{ + if (isDisabled(algorithm)) { + return String(); + } + + const String name = algorithm.shortName(); + if (has(name)) { + return name; + } + + if (m_aliases.count(algorithm) > 0) { + return m_aliases.at(algorithm); + } + + if (!strict && name.contains("/")) { + const String base = name.split('/').at(0); + if (has(base)) { + return base; + } + } + + return String(); +} + + +template +void xmrig::Threads::read(const rapidjson::Value &value) +{ + using namespace rapidjson; + + for (auto &member : value.GetObject()) { + if (member.value.IsArray()) { + std::vector threads; + + for (auto &v : member.value.GetArray()) { + T thread(v); + if (thread.isValid()) { + threads.push_back(std::move(thread)); + } + } + + if (!threads.empty()) { + move(member.name.GetString(), std::move(threads)); + } + + continue; + } + + const Algorithm algo(member.name.GetString()); + if (!algo.isValid()) { + continue; + } + + if (member.value.IsBool() && member.value.IsFalse()) { + disable(algo); + continue; + } + + if (member.value.IsString()) { + if (has(member.value.GetString())) { + m_aliases.insert({ algo, member.value.GetString() }); + } + else { + m_disabled.insert(algo); + } + } + } +} + + +template +void xmrig::Threads::toJSON(rapidjson::Value &out, rapidjson::Document &doc) const +{ + using namespace rapidjson; + auto &allocator = doc.GetAllocator(); + + for (const auto &kv : m_profiles) { + Value arr(kArrayType); + + for (const T &thread : kv.second) { + arr.PushBack(thread.toJSON(doc), allocator); + } + + out.AddMember(kv.first.toJSON(), arr, allocator); + } + + for (const Algorithm &algo : m_disabled) { + out.AddMember(StringRef(algo.shortName()), false, allocator); + } + + for (const auto &kv : m_aliases) { + out.AddMember(StringRef(kv.first.shortName()), kv.second.toJSON(), allocator); + } +} + + +namespace xmrig { + +template class Threads; + +} // namespace xmrig diff --git a/src/backend/Threads.h b/src/backend/Threads.h new file mode 100644 index 00000000..70bc02a4 --- /dev/null +++ b/src/backend/Threads.h @@ -0,0 +1,67 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_THREADS_H +#define XMRIG_THREADS_H + + +#include +#include + + +#include "base/tools/String.h" +#include "crypto/common/Algorithm.h" +#include "rapidjson/fwd.h" + + +namespace xmrig { + + +template +class Threads +{ +public: + inline bool has(const char *profile) const { return m_profiles.count(profile) > 0; } + inline bool isDisabled(const Algorithm &algo) const { return m_disabled.count(algo) > 0; } + inline bool isExist(const Algorithm &algo) const { return isDisabled(algo) || m_aliases.count(algo) > 0 || has(algo.shortName()); } + inline const std::vector &get(const Algorithm &algo, bool strict = false) const { return get(profileName(algo, strict)); } + inline void disable(const Algorithm &algo) { m_disabled.insert(algo); } + inline void move(const char *profile, std::vector &&threads) { m_profiles.insert({ profile, threads }); } + + const std::vector &get(const String &profileName) const; + String profileName(const Algorithm &algorithm, bool strict = false) const; + void read(const rapidjson::Value &value); + void toJSON(rapidjson::Value &out, rapidjson::Document &doc) const; + +private: + std::map m_aliases; + std::map > m_profiles; + std::set m_disabled; +}; + + +} /* namespace xmrig */ + + +#endif /* XMRIG_THREADS_H */ diff --git a/src/backend/backend.cmake b/src/backend/backend.cmake new file mode 100644 index 00000000..750cc9cb --- /dev/null +++ b/src/backend/backend.cmake @@ -0,0 +1,12 @@ +include (src/backend/cpu/cpu.cmake) + + +set(HEADERS_BACKEND + "${HEADERS_CPU}" + src/backend/Threads.h + ) + +set(SOURCES_BACKEND + "${SOURCES_CPU}" + src/backend/Threads.cpp + ) diff --git a/src/backend/cpu/CpuConfig.cpp b/src/backend/cpu/CpuConfig.cpp index 5284d607..34dcff44 100644 --- a/src/backend/cpu/CpuConfig.cpp +++ b/src/backend/cpu/CpuConfig.cpp @@ -31,17 +31,34 @@ namespace xmrig { - +static const char *kCn = "cn"; static const char *kEnabled = "enabled"; static const char *kHugePages = "huge-pages"; static const char *kHwAes = "hw-aes"; static const char *kPriority = "priority"; - #ifdef XMRIG_FEATURE_ASM static const char *kAsm = "asm"; #endif +#ifdef XMRIG_ALGO_CN_GPU +static const char *kCnGPU = "cn/gpu"; +#endif + +#ifdef XMRIG_ALGO_CN_LITE +static const char *kCnLite = "cn-lite"; +#endif + +#ifdef XMRIG_ALGO_CN_HEAVY +static const char *kCnHeavy = "cn-heavy"; +#endif + +#ifdef XMRIG_ALGO_CN_PICO +static const char *kCnPico = "cn-pico"; +#endif + +extern template class Threads; + } @@ -59,7 +76,6 @@ bool xmrig::CpuConfig::isHwAES() const rapidjson::Value xmrig::CpuConfig::toJSON(rapidjson::Document &doc) const { using namespace rapidjson; - auto &allocator = doc.GetAllocator(); Value obj(kObjectType); @@ -73,6 +89,8 @@ rapidjson::Value xmrig::CpuConfig::toJSON(rapidjson::Document &doc) const obj.AddMember(StringRef(kAsm), m_assembly.toJSON(), allocator); # endif + m_threads.toJSON(obj, doc); + return obj; } @@ -89,6 +107,34 @@ void xmrig::CpuConfig::read(const rapidjson::Value &value) # ifdef XMRIG_FEATURE_ASM m_assembly = Json::getValue(value, kAsm); # endif + + m_threads.read(value); + } + else if (value.IsBool() && value.IsFalse()) { + m_enabled = false; + } + else { + m_shouldSave = true; + + m_threads.disable(Algorithm::CN_0); + m_threads.move(kCn, Cpu::info()->threads(Algorithm::CN_0)); + +# ifdef XMRIG_ALGO_CN_GPU + m_threads.move(kCnGPU, Cpu::info()->threads(Algorithm::CN_GPU)); +# endif + +# ifdef XMRIG_ALGO_CN_LITE + m_threads.disable(Algorithm::CN_LITE_0); + m_threads.move(kCnLite, Cpu::info()->threads(Algorithm::CN_LITE_1)); +# endif + +# ifdef XMRIG_ALGO_CN_HEAVY + m_threads.move(kCnHeavy, Cpu::info()->threads(Algorithm::CN_HEAVY_0)); +# endif + +# ifdef XMRIG_ALGO_CN_PICO + m_threads.move(kCnPico, Cpu::info()->threads(Algorithm::CN_PICO_0)); +# endif } } diff --git a/src/backend/cpu/CpuConfig.h b/src/backend/cpu/CpuConfig.h index 04dd9175..66da3a5f 100644 --- a/src/backend/cpu/CpuConfig.h +++ b/src/backend/cpu/CpuConfig.h @@ -26,6 +26,8 @@ #define XMRIG_CPUCONFIG_H +#include "backend/cpu/CpuThread.h" +#include "backend/Threads.h" #include "crypto/common/Assembly.h" @@ -47,11 +49,12 @@ public: rapidjson::Value toJSON(rapidjson::Document &doc) const; void read(const rapidjson::Value &value); - inline bool isEnabled() const { return m_enabled; } - inline bool isHugePages() const { return m_hugePages; } - inline bool isShouldSave() const { return m_shouldSave; } - inline const Assembly &assembly() const { return m_assembly; } - inline int priority() const { return m_priority; } + inline bool isEnabled() const { return m_enabled; } + inline bool isHugePages() const { return m_hugePages; } + inline bool isShouldSave() const { return m_shouldSave; } + inline const Assembly &assembly() const { return m_assembly; } + inline const Threads &threads() const { return m_threads; } + inline int priority() const { return m_priority; } private: void setAesMode(const rapidjson::Value &aesMode); @@ -63,6 +66,7 @@ private: bool m_hugePages = true; bool m_shouldSave = false; int m_priority = -1; + Threads m_threads; }; diff --git a/src/backend/cpu/CpuThread.cpp b/src/backend/cpu/CpuThread.cpp new file mode 100644 index 00000000..e7132cfa --- /dev/null +++ b/src/backend/cpu/CpuThread.cpp @@ -0,0 +1,71 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#include "backend/cpu/CpuThread.h" +#include "base/io/json/Json.h" +#include "rapidjson/document.h" + + +namespace xmrig { + + +static const char *kAffinity = "affinity"; +static const char *kIntensity = "intensity"; + + +} + + + +xmrig::CpuThread::CpuThread(const rapidjson::Value &value) +{ + if (value.IsObject()) { + m_intensity = Json::getInt(value, kIntensity, -1); + m_affinity = Json::getInt(value, kAffinity, -1); + } + else if (value.IsInt()) { + m_intensity = 1; + m_affinity = value.GetInt(); + } +} + + +rapidjson::Value xmrig::CpuThread::toJSON(rapidjson::Document &doc) const +{ + using namespace rapidjson; + + if (intensity() > 1) { + auto &allocator = doc.GetAllocator(); + + Value obj(kObjectType); + + obj.AddMember(StringRef(kIntensity), m_intensity, allocator); + obj.AddMember(StringRef(kAffinity), m_affinity, allocator); + + return obj; + } + + return Value(m_affinity); +} diff --git a/src/backend/cpu/CpuThread.h b/src/backend/cpu/CpuThread.h new file mode 100644 index 00000000..444b2709 --- /dev/null +++ b/src/backend/cpu/CpuThread.h @@ -0,0 +1,63 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_CPUTHREADCONFIG_H +#define XMRIG_CPUTHREADCONFIG_H + + +#include + + +#include "rapidjson/fwd.h" + + +namespace xmrig { + + +class CpuThread +{ +public: + inline constexpr CpuThread(int intensity = 1, int affinity = -1) : m_affinity(affinity), m_intensity(intensity) {} + + CpuThread(const rapidjson::Value &value); + + inline bool isValid() const { return m_intensity >= 1 && m_intensity <= 5; } + inline int affinity() const { return m_affinity; } + inline int intensity() const { return m_intensity; } + + rapidjson::Value toJSON(rapidjson::Document &doc) const; + +private: + int m_affinity = -1; + int m_intensity = -1; +}; + + +typedef std::vector CpuThreads; + + +} /* namespace xmrig */ + + +#endif /* XMRIG_CPUTHREADCONFIG_H */ diff --git a/src/backend/cpu/cpu.cmake b/src/backend/cpu/cpu.cmake index 03ca7075..df9b7cea 100644 --- a/src/backend/cpu/cpu.cmake +++ b/src/backend/cpu/cpu.cmake @@ -1,12 +1,14 @@ set(HEADERS_CPU - src/backend/cpu/Cpu.h - src/backend/cpu/CpuConfig.h - src/backend/cpu/interfaces/ICpuInfo.h + src/backend/cpu/Cpu.h + src/backend/cpu/CpuConfig.h + src/backend/cpu/CpuThread.h + src/backend/cpu/interfaces/ICpuInfo.h ) set(SOURCES_CPU src/backend/cpu/Cpu.cpp src/backend/cpu/CpuConfig.cpp + src/backend/cpu/CpuThread.cpp ) diff --git a/src/backend/cpu/interfaces/ICpuInfo.h b/src/backend/cpu/interfaces/ICpuInfo.h index abff7a6c..74f6baee 100644 --- a/src/backend/cpu/interfaces/ICpuInfo.h +++ b/src/backend/cpu/interfaces/ICpuInfo.h @@ -26,11 +26,9 @@ #define XMRIG_CPUINFO_H -#include -#include - - +#include "backend/cpu/CpuThread.h" #include "crypto/common/Assembly.h" +#include "crypto/common/Algorithm.h" namespace xmrig { @@ -47,18 +45,19 @@ public: inline constexpr static bool isX64() { return false; } # endif + virtual Assembly::Id assembly() const = 0; virtual bool hasAES() const = 0; virtual bool hasAVX2() const = 0; virtual bool isSupported() const = 0; virtual const char *brand() const = 0; - virtual int32_t cores() const = 0; - virtual int32_t L2() const = 0; - virtual int32_t L3() const = 0; - virtual int32_t nodes() const = 0; - virtual int32_t sockets() const = 0; - virtual int32_t threads() const = 0; + virtual CpuThreads threads(const Algorithm &algorithm) const = 0; + virtual size_t cores() const = 0; + virtual size_t L2() const = 0; + virtual size_t L3() const = 0; + virtual size_t nodes() const = 0; virtual size_t optimalThreadsCount(size_t memSize, int maxCpuUsage) const = 0; - virtual Assembly::Id assembly() const = 0; + virtual size_t sockets() const = 0; + virtual size_t threads() const = 0; }; diff --git a/src/backend/cpu/platform/AdvancedCpuInfo.cpp b/src/backend/cpu/platform/AdvancedCpuInfo.cpp index fc7f734d..4c3b30ab 100644 --- a/src/backend/cpu/platform/AdvancedCpuInfo.cpp +++ b/src/backend/cpu/platform/AdvancedCpuInfo.cpp @@ -26,51 +26,43 @@ #include #include +#include + #include "backend/cpu/platform/AdvancedCpuInfo.h" xmrig::AdvancedCpuInfo::AdvancedCpuInfo() : - m_aes(false), - m_avx2(false), - m_L2_exclusive(false), - m_brand(), - m_cores(0), - m_L2(0), - m_L3(0), - m_sockets(1), - m_threads(0) + m_brand() { - struct cpu_raw_data_t raw = { 0 }; - struct cpu_id_t data = { 0 }; + struct cpu_raw_data_t raw = {}; + struct cpu_id_t data = {}; cpuid_get_raw_data(&raw); cpu_identify(&raw, &data); strncpy(m_brand, data.brand_str, sizeof(m_brand)); - m_threads = data.total_logical_cpus; - m_sockets = threads() / data.num_logical_cpus; - if (m_sockets == 0) { - m_sockets = 1; - } + m_threads = static_cast(data.total_logical_cpus); + m_sockets = std::max(threads() / static_cast(data.num_logical_cpus), 1); + m_cores = static_cast(data.num_cores) * m_sockets; + m_L3 = data.l3_cache > 0 ? static_cast(data.l3_cache) * m_sockets : 0; - m_cores = data.num_cores * m_sockets; - m_L3 = data.l3_cache > 0 ? data.l3_cache * m_sockets : 0; + const size_t l2 = static_cast(data.l2_cache); // Workaround for AMD CPUs https://github.com/anrieff/libcpuid/issues/97 if (data.vendor == VENDOR_AMD && data.ext_family >= 0x15 && data.ext_family < 0x17) { - m_L2 = data.l2_cache * (cores() / 2) * m_sockets; + m_L2 = l2 * (cores() / 2) * m_sockets; m_L2_exclusive = true; } // Workaround for Intel Pentium Dual-Core, Core Duo, Core 2 Duo, Core 2 Quad and their Xeon homologue // These processors have L2 cache shared by 2 cores. else if (data.vendor == VENDOR_INTEL && data.ext_family == 0x06 && (data.ext_model == 0x0E || data.ext_model == 0x0F || data.ext_model == 0x17)) { - int l2_count_per_socket = cores() > 1 ? cores() / 2 : 1; - m_L2 = data.l2_cache > 0 ? data.l2_cache * l2_count_per_socket * m_sockets : 0; + size_t l2_count_per_socket = cores() > 1 ? cores() / 2 : 1; + m_L2 = data.l2_cache > 0 ? l2 * l2_count_per_socket * m_sockets : 0; } else{ - m_L2 = data.l2_cache > 0 ? data.l2_cache * cores() * m_sockets : 0; + m_L2 = data.l2_cache > 0 ? l2 * cores() * m_sockets : 0; } if (data.flags[CPU_FEATURE_AES]) { @@ -125,3 +117,43 @@ size_t xmrig::AdvancedCpuInfo::optimalThreadsCount(size_t memSize, int maxCpuUsa return count < 1 ? 1 : count; } + + +xmrig::CpuThreads xmrig::AdvancedCpuInfo::threads(const Algorithm &algorithm) const +{ + if (threads() == 1) { + return CpuThreads(1); + } + +# ifdef XMRIG_ALGO_CN_GPU + if (algorithm == Algorithm::CN_GPU) { + return CpuThreads(threads()); + } +# endif + + size_t cache = 0; + size_t count = 0; + + if (m_L3) { + cache = m_L2_exclusive ? (m_L2 + m_L3) : m_L3; + } + else { + cache = m_L2; + } + + if (cache) { + cache *= 1024; + const size_t memory = algorithm.memory(); + + count = cache / memory; + + if (cache % memory >= memory / 2) { + count++; + } + } + else { + count = threads() / 2; + } + + return CpuThreads(std::max(std::min(count, threads()), 1)); +} diff --git a/src/backend/cpu/platform/AdvancedCpuInfo.h b/src/backend/cpu/platform/AdvancedCpuInfo.h index 83c3d8e5..9852f6bd 100644 --- a/src/backend/cpu/platform/AdvancedCpuInfo.h +++ b/src/backend/cpu/platform/AdvancedCpuInfo.h @@ -39,30 +39,31 @@ public: protected: size_t optimalThreadsCount(size_t memSize, int maxCpuUsage) const override; + CpuThreads threads(const Algorithm &algorithm) const override; inline Assembly::Id assembly() const override { return m_assembly; } inline bool hasAES() const override { return m_aes; } inline bool hasAVX2() const override { return m_avx2; } inline bool isSupported() const override { return true; } inline const char *brand() const override { return m_brand; } - inline int32_t cores() const override { return m_cores; } - inline int32_t L2() const override { return m_L2; } - inline int32_t L3() const override { return m_L3; } - inline int32_t nodes() const override { return -1; } - inline int32_t sockets() const override { return m_sockets; } - inline int32_t threads() const override { return m_threads; } + inline size_t cores() const override { return m_cores; } + inline size_t L2() const override { return m_L2; } + inline size_t L3() const override { return m_L3; } + inline size_t nodes() const override { return 0; } + inline size_t sockets() const override { return m_sockets; } + inline size_t threads() const override { return m_threads; } private: Assembly m_assembly; - bool m_aes; - bool m_avx2; - bool m_L2_exclusive; + bool m_aes = false; + bool m_avx2 = false; + bool m_L2_exclusive = false; char m_brand[64]; - int32_t m_cores; - int32_t m_L2; - int32_t m_L3; - int32_t m_sockets; - int32_t m_threads; + size_t m_cores = 0; + size_t m_L2 = 0; + size_t m_L3 = 0; + size_t m_sockets = 1; + size_t m_threads = 0; }; diff --git a/src/backend/cpu/platform/BasicCpuInfo.cpp b/src/backend/cpu/platform/BasicCpuInfo.cpp index 04ff589b..26237468 100644 --- a/src/backend/cpu/platform/BasicCpuInfo.cpp +++ b/src/backend/cpu/platform/BasicCpuInfo.cpp @@ -22,6 +22,7 @@ * along with this program. If not, see . */ +#include #include #include @@ -123,9 +124,9 @@ static inline bool has_ossave() xmrig::BasicCpuInfo::BasicCpuInfo() : m_assembly(Assembly::NONE), + m_brand(), m_aes(has_aes_ni()), m_avx2(has_avx2() && has_ossave()), - m_brand(), m_threads(std::thread::hardware_concurrency()) { cpu_brand_string(m_brand); @@ -158,3 +159,27 @@ size_t xmrig::BasicCpuInfo::optimalThreadsCount(size_t memSize, int maxCpuUsage) return count < 1 ? 1 : count; } + + +xmrig::CpuThreads xmrig::BasicCpuInfo::threads(const Algorithm &algorithm) const +{ + if (threads() == 1) { + return CpuThreads(1); + } + +# ifdef XMRIG_ALGO_CN_GPU + if (algorithm == Algorithm::CN_GPU) { + return CpuThreads(threads()); + } +# endif + + if (algorithm.family() == Algorithm::CN_LITE || algorithm.family() == Algorithm::CN_PICO) { + return CpuThreads(threads()); + } + + if (algorithm.family() == Algorithm::CN_HEAVY) { + return CpuThreads(std::max(threads() / 4, 1)); + } + + return CpuThreads(std::max(threads() / 2, 1)); +} diff --git a/src/backend/cpu/platform/BasicCpuInfo.h b/src/backend/cpu/platform/BasicCpuInfo.h index 4d4a5163..12c275dd 100644 --- a/src/backend/cpu/platform/BasicCpuInfo.h +++ b/src/backend/cpu/platform/BasicCpuInfo.h @@ -39,25 +39,26 @@ public: protected: size_t optimalThreadsCount(size_t memSize, int maxCpuUsage) const override; + CpuThreads threads(const Algorithm &algorithm) const override; inline Assembly::Id assembly() const override { return m_assembly; } inline bool hasAES() const override { return m_aes; } inline bool hasAVX2() const override { return m_avx2; } inline bool isSupported() const override { return true; } inline const char *brand() const override { return m_brand; } - inline int32_t cores() const override { return -1; } - inline int32_t L2() const override { return -1; } - inline int32_t L3() const override { return -1; } - inline int32_t nodes() const override { return -1; } - inline int32_t sockets() const override { return 1; } - inline int32_t threads() const override { return m_threads; } + inline size_t cores() const override { return 0; } + inline size_t L2() const override { return 0; } + inline size_t L3() const override { return 0; } + inline size_t nodes() const override { return 0; } + inline size_t sockets() const override { return 1; } + inline size_t threads() const override { return m_threads; } private: Assembly m_assembly; - bool m_aes; - bool m_avx2; - char m_brand[64]; - int32_t m_threads; + char m_brand[64 + 6]; + const bool m_aes; + const bool m_avx2; + const size_t m_threads; }; diff --git a/src/core/config/Config.cpp b/src/core/config/Config.cpp index 33f4cc44..87abbb91 100644 --- a/src/core/config/Config.cpp +++ b/src/core/config/Config.cpp @@ -36,7 +36,7 @@ #include "rapidjson/document.h" #include "rapidjson/filewritestream.h" #include "rapidjson/prettywriter.h" -#include "workers/CpuThread.h" +#include "workers/CpuThreadLegacy.h" xmrig::Config::Config() : @@ -125,7 +125,7 @@ bool xmrig::Config::finalize() m_threads.mode = Advanced; for (size_t i = 0; i < m_threads.cpu.size(); ++i) { - m_threads.list.push_back(CpuThread::createFromData(i, algorithm, m_threads.cpu[i], m_cpu.priority(), !m_cpu.isHwAES())); + m_threads.list.push_back(CpuThreadLegacy::createFromData(i, algorithm, m_threads.cpu[i], m_cpu.priority(), !m_cpu.isHwAES())); } return true; @@ -134,7 +134,7 @@ bool xmrig::Config::finalize() const AlgoVariant av = getAlgoVariant(); m_threads.mode = m_threads.count ? Simple : Automatic; - const size_t size = CpuThread::multiway(av) * CnAlgo<>::memory(algorithm) / 1024; // FIXME MEMORY + const size_t size = CpuThreadLegacy::multiway(av) * CnAlgo<>::memory(algorithm) / 1024; // FIXME MEMORY if (!m_threads.count) { m_threads.count = Cpu::info()->optimalThreadsCount(size, 100); @@ -147,7 +147,7 @@ bool xmrig::Config::finalize() // } for (size_t i = 0; i < m_threads.count; ++i) { - m_threads.list.push_back(CpuThread::createFromAV(i, algorithm, av, m_threads.mask, m_cpu.priority(), m_cpu.assembly())); + m_threads.list.push_back(CpuThreadLegacy::createFromAV(i, algorithm, av, m_threads.mask, m_cpu.priority(), m_cpu.assembly())); } m_shouldSave = m_threads.mode == Automatic; @@ -175,7 +175,7 @@ void xmrig::Config::setThreads(const rapidjson::Value &threads) } if (value.HasMember("low_power_mode")) { - auto data = CpuThread::parse(value); + auto data = CpuThreadLegacy::parse(value); if (data.valid) { m_threads.cpu.push_back(std::move(data)); diff --git a/src/core/config/Config.h b/src/core/config/Config.h index 76720889..7b765892 100644 --- a/src/core/config/Config.h +++ b/src/core/config/Config.h @@ -34,7 +34,7 @@ #include "base/kernel/config/BaseConfig.h" #include "common/xmrig.h" #include "rapidjson/fwd.h" -#include "workers/CpuThread.h" +#include "workers/CpuThreadLegacy.h" namespace xmrig { @@ -59,7 +59,7 @@ public: void getJSON(rapidjson::Document &doc) const override; inline AlgoVariant algoVariant() const { return m_algoVariant; } - inline bool isShouldSave() const { return (m_shouldSave || m_upgrade) && isAutoSave(); } + inline bool isShouldSave() const { return (m_shouldSave || m_upgrade || m_cpu.isShouldSave()) && isAutoSave(); } inline const CpuConfig &cpu() const { return m_cpu; } inline const std::vector &threads() const { return m_threads.list; } inline int threadsCount() const { return static_cast(m_threads.list.size()); } @@ -81,7 +81,7 @@ private: int64_t mask; size_t count; - std::vector cpu; + std::vector cpu; std::vector list; ThreadsMode mode; }; diff --git a/src/crypto/common/Algorithm.cpp b/src/crypto/common/Algorithm.cpp index 66b3ddda..81ee6655 100644 --- a/src/crypto/common/Algorithm.cpp +++ b/src/crypto/common/Algorithm.cpp @@ -30,6 +30,7 @@ #include +#include "crypto/cn/CnAlgo.h" #include "crypto/common/Algorithm.h" #include "rapidjson/document.h" @@ -123,6 +124,20 @@ rapidjson::Value xmrig::Algorithm::toJSON() const } +size_t xmrig::Algorithm::memory() const +{ + if (family() < RANDOM_X) { + return CnAlgo<>::memory(m_id); + } + + if (m_id == RX_WOW) { + return 0x100000; + } + + return 0; +} + + xmrig::Algorithm::Family xmrig::Algorithm::family(Id id) { switch (id) { diff --git a/src/crypto/common/Algorithm.h b/src/crypto/common/Algorithm.h index 92c6f405..08d1c4cd 100644 --- a/src/crypto/common/Algorithm.h +++ b/src/crypto/common/Algorithm.h @@ -94,11 +94,14 @@ public: inline Family family() const { return family(m_id); } inline Id id() const { return m_id; } + inline bool operator!=(Algorithm::Id id) const { return m_id != id; } inline bool operator!=(const Algorithm &other) const { return !isEqual(other); } + inline bool operator==(Algorithm::Id id) const { return m_id == id; } inline bool operator==(const Algorithm &other) const { return isEqual(other); } inline operator Algorithm::Id() const { return m_id; } rapidjson::Value toJSON() const; + size_t memory() const; static Family family(Id id); static Id parse(const char *name); diff --git a/src/crypto/common/Assembly.h b/src/crypto/common/Assembly.h index 0b3f29b3..afd8a536 100644 --- a/src/crypto/common/Assembly.h +++ b/src/crypto/common/Assembly.h @@ -59,10 +59,10 @@ public: inline bool isEqual(const Assembly &other) const { return m_id == other.m_id; } + inline bool operator!=(Assembly::Id id) const { return m_id != id; } inline bool operator!=(const Assembly &other) const { return !isEqual(other); } - inline bool operator!=(const Assembly::Id &id) const { return m_id != id; } + inline bool operator==(Assembly::Id id) const { return m_id == id; } inline bool operator==(const Assembly &other) const { return isEqual(other); } - inline bool operator==(const Assembly::Id &id) const { return m_id == id; } inline operator Assembly::Id() const { return m_id; } private: diff --git a/src/workers/CpuThread.cpp b/src/workers/CpuThreadLegacy.cpp similarity index 78% rename from src/workers/CpuThread.cpp rename to src/workers/CpuThreadLegacy.cpp index 7011da12..df9b9904 100644 --- a/src/workers/CpuThread.cpp +++ b/src/workers/CpuThreadLegacy.cpp @@ -31,14 +31,14 @@ #include "crypto/common/VirtualMemory.h" #include "Mem.h" #include "rapidjson/document.h" -#include "workers/CpuThread.h" +#include "workers/CpuThreadLegacy.h" static const xmrig::CnHash cnHash; -xmrig::CpuThread::CpuThread(size_t index, Algorithm algorithm, AlgoVariant av, Multiway multiway, int64_t affinity, int priority, bool softAES, bool prefetch, Assembly assembly) : +xmrig::CpuThreadLegacy::CpuThreadLegacy(size_t index, Algorithm algorithm, AlgoVariant av, Multiway multiway, int64_t affinity, int priority, bool softAES, bool prefetch, Assembly assembly) : m_algorithm(algorithm), m_av(av), m_assembly(assembly), @@ -52,20 +52,20 @@ xmrig::CpuThread::CpuThread(size_t index, Algorithm algorithm, AlgoVariant av, M } -xmrig::cn_hash_fun xmrig::CpuThread::fn(const Algorithm &algorithm) const +xmrig::cn_hash_fun xmrig::CpuThreadLegacy::fn(const Algorithm &algorithm) const { return cnHash.fn(algorithm, m_av, m_assembly); } -bool xmrig::CpuThread::isSoftAES(AlgoVariant av) +bool xmrig::CpuThreadLegacy::isSoftAES(AlgoVariant av) { return av == AV_SINGLE_SOFT || av == AV_DOUBLE_SOFT || av > AV_PENTA; } -xmrig::CpuThread *xmrig::CpuThread::createFromAV(size_t index, const Algorithm &algorithm, AlgoVariant av, int64_t affinity, int priority, Assembly assembly) +xmrig::CpuThreadLegacy *xmrig::CpuThreadLegacy::createFromAV(size_t index, const Algorithm &algorithm, AlgoVariant av, int64_t affinity, int priority, Assembly assembly) { assert(av > AV_AUTO && av < AV_MAX); @@ -88,11 +88,11 @@ xmrig::CpuThread *xmrig::CpuThread::createFromAV(size_t index, const Algorithm & } } - return new CpuThread(index, algorithm, av, multiway(av), cpuId, priority, isSoftAES(av), false, assembly); + return new CpuThreadLegacy(index, algorithm, av, multiway(av), cpuId, priority, isSoftAES(av), false, assembly); } -xmrig::CpuThread *xmrig::CpuThread::createFromData(size_t index, const Algorithm &algorithm, const CpuThread::Data &data, int priority, bool softAES) +xmrig::CpuThreadLegacy *xmrig::CpuThreadLegacy::createFromData(size_t index, const Algorithm &algorithm, const CpuThreadLegacy::Data &data, int priority, bool softAES) { int av = AV_AUTO; const Multiway multiway = data.multiway; @@ -106,11 +106,11 @@ xmrig::CpuThread *xmrig::CpuThread::createFromData(size_t index, const Algorithm assert(av > AV_AUTO && av < AV_MAX); - return new CpuThread(index, algorithm, static_cast(av), multiway, data.affinity, priority, softAES, false, data.assembly); + return new CpuThreadLegacy(index, algorithm, static_cast(av), multiway, data.affinity, priority, softAES, false, data.assembly); } -xmrig::CpuThread::Data xmrig::CpuThread::parse(const rapidjson::Value &object) +xmrig::CpuThreadLegacy::Data xmrig::CpuThreadLegacy::parse(const rapidjson::Value &object) { Data data; @@ -140,7 +140,7 @@ xmrig::CpuThread::Data xmrig::CpuThread::parse(const rapidjson::Value &object) } -xmrig::IThread::Multiway xmrig::CpuThread::multiway(AlgoVariant av) +xmrig::IThread::Multiway xmrig::CpuThreadLegacy::multiway(AlgoVariant av) { switch (av) { case AV_SINGLE: @@ -172,7 +172,7 @@ xmrig::IThread::Multiway xmrig::CpuThread::multiway(AlgoVariant av) #ifdef APP_DEBUG -void xmrig::CpuThread::print() const +void xmrig::CpuThreadLegacy::print() const { LOG_DEBUG(GREEN_BOLD("CPU thread: ") " index " WHITE_BOLD("%zu") ", multiway " WHITE_BOLD("%d") ", av " WHITE_BOLD("%d") ",", index(), static_cast(multiway()), static_cast(m_av)); @@ -187,7 +187,7 @@ void xmrig::CpuThread::print() const #ifdef XMRIG_FEATURE_API -rapidjson::Value xmrig::CpuThread::toAPI(rapidjson::Document &doc) const +rapidjson::Value xmrig::CpuThreadLegacy::toAPI(rapidjson::Document &doc) const { using namespace rapidjson; @@ -206,7 +206,7 @@ rapidjson::Value xmrig::CpuThread::toAPI(rapidjson::Document &doc) const #endif -rapidjson::Value xmrig::CpuThread::toConfig(rapidjson::Document &doc) const +rapidjson::Value xmrig::CpuThreadLegacy::toConfig(rapidjson::Document &doc) const { using namespace rapidjson; diff --git a/src/workers/CpuThread.h b/src/workers/CpuThreadLegacy.h similarity index 83% rename from src/workers/CpuThread.h rename to src/workers/CpuThreadLegacy.h index a43a0c09..ed69d8ac 100644 --- a/src/workers/CpuThread.h +++ b/src/workers/CpuThreadLegacy.h @@ -22,8 +22,8 @@ * along with this program. If not, see . */ -#ifndef XMRIG_CPUTHREAD_H -#define XMRIG_CPUTHREAD_H +#ifndef XMRIG_CPUTHREADLEGACY_H +#define XMRIG_CPUTHREADLEGACY_H #include "common/xmrig.h" @@ -37,7 +37,7 @@ struct cryptonight_ctx; namespace xmrig { -class CpuThread : public IThread +class CpuThreadLegacy : public IThread { public: struct Data @@ -59,13 +59,13 @@ public: }; - CpuThread(size_t index, Algorithm algorithm, AlgoVariant av, Multiway multiway, int64_t affinity, int priority, bool softAES, bool prefetch, Assembly assembly); + CpuThreadLegacy(size_t index, Algorithm algorithm, AlgoVariant av, Multiway multiway, int64_t affinity, int priority, bool softAES, bool prefetch, Assembly assembly); cn_hash_fun fn(const Algorithm &algorithm) const; static bool isSoftAES(AlgoVariant av); - static CpuThread *createFromAV(size_t index, const Algorithm &algorithm, AlgoVariant av, int64_t affinity, int priority, Assembly assembly); - static CpuThread *createFromData(size_t index, const Algorithm &algorithm, const CpuThread::Data &data, int priority, bool softAES); + static CpuThreadLegacy *createFromAV(size_t index, const Algorithm &algorithm, AlgoVariant av, int64_t affinity, int priority, Assembly assembly); + static CpuThreadLegacy *createFromData(size_t index, const Algorithm &algorithm, const CpuThreadLegacy::Data &data, int priority, bool softAES); static Data parse(const rapidjson::Value &object); static Multiway multiway(AlgoVariant av); @@ -106,4 +106,4 @@ private: } /* namespace xmrig */ -#endif /* XMRIG_CPUTHREAD_H */ +#endif /* XMRIG_CPUTHREADLEGACY_H */ diff --git a/src/workers/MultiWorker.cpp b/src/workers/MultiWorker.cpp index 52e98e0a..0ac026bc 100644 --- a/src/workers/MultiWorker.cpp +++ b/src/workers/MultiWorker.cpp @@ -28,7 +28,7 @@ #include "crypto/cn/CryptoNight_test.h" -#include "workers/CpuThread.h" +#include "workers/CpuThreadLegacy.h" #include "workers/MultiWorker.h" #include "workers/Workers.h" diff --git a/src/workers/Worker.cpp b/src/workers/Worker.cpp index c6ea6d9a..4f69d905 100644 --- a/src/workers/Worker.cpp +++ b/src/workers/Worker.cpp @@ -26,7 +26,7 @@ #include "backend/cpu/Cpu.h" #include "common/Platform.h" -#include "workers/CpuThread.h" +#include "workers/CpuThreadLegacy.h" #include "workers/ThreadHandle.h" #include "workers/Worker.h" @@ -39,7 +39,7 @@ Worker::Worker(ThreadHandle *handle) : m_timestamp(0), m_count(0), m_sequence(0), - m_thread(static_cast(handle->config())) + m_thread(static_cast(handle->config())) { if (xmrig::Cpu::info()->threads() > 1 && m_thread->affinity() != -1L) { Platform::setThreadAffinity(m_thread->affinity()); diff --git a/src/workers/Worker.h b/src/workers/Worker.h index 4710bcc5..3d40257d 100644 --- a/src/workers/Worker.h +++ b/src/workers/Worker.h @@ -37,7 +37,7 @@ class ThreadHandle; namespace xmrig { - class CpuThread; + class CpuThreadLegacy; } @@ -62,7 +62,7 @@ protected: std::atomic m_timestamp; uint64_t m_count; uint64_t m_sequence; - xmrig::CpuThread *m_thread; + xmrig::CpuThreadLegacy *m_thread; }; From 9bf4c2c98f2d24bb673014d3166bbbf3bbd3bfa2 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 6 Jul 2019 11:31:12 +0700 Subject: [PATCH 013/172] Generate "rx" and "rx/wow" sections of CPU threads. --- src/backend/cpu/CpuConfig.cpp | 10 ++++++++++ src/backend/cpu/platform/AdvancedCpuInfo.cpp | 4 ++-- src/crypto/common/Algorithm.cpp | 14 ++++++++++---- 3 files changed, 22 insertions(+), 6 deletions(-) diff --git a/src/backend/cpu/CpuConfig.cpp b/src/backend/cpu/CpuConfig.cpp index 34dcff44..b4a9c363 100644 --- a/src/backend/cpu/CpuConfig.cpp +++ b/src/backend/cpu/CpuConfig.cpp @@ -57,6 +57,11 @@ static const char *kCnHeavy = "cn-heavy"; static const char *kCnPico = "cn-pico"; #endif +#ifdef XMRIG_ALGO_RANDOMX +static const char *kRx = "rx"; +static const char *kRxWOW = "rx/wow"; +#endif + extern template class Threads; } @@ -135,6 +140,11 @@ void xmrig::CpuConfig::read(const rapidjson::Value &value) # ifdef XMRIG_ALGO_CN_PICO m_threads.move(kCnPico, Cpu::info()->threads(Algorithm::CN_PICO_0)); # endif + +# ifdef XMRIG_ALGO_RANDOMX + m_threads.move(kRx, Cpu::info()->threads(Algorithm::RX_0)); + m_threads.move(kRxWOW, Cpu::info()->threads(Algorithm::RX_WOW)); +# endif } } diff --git a/src/backend/cpu/platform/AdvancedCpuInfo.cpp b/src/backend/cpu/platform/AdvancedCpuInfo.cpp index 4c3b30ab..b5b2fe91 100644 --- a/src/backend/cpu/platform/AdvancedCpuInfo.cpp +++ b/src/backend/cpu/platform/AdvancedCpuInfo.cpp @@ -22,12 +22,11 @@ * along with this program. If not, see . */ +#include #include #include #include -#include - #include "backend/cpu/platform/AdvancedCpuInfo.h" @@ -144,6 +143,7 @@ xmrig::CpuThreads xmrig::AdvancedCpuInfo::threads(const Algorithm &algorithm) co if (cache) { cache *= 1024; const size_t memory = algorithm.memory(); + assert(memory > 0); count = cache / memory; diff --git a/src/crypto/common/Algorithm.cpp b/src/crypto/common/Algorithm.cpp index 4af3fd41..7680ef02 100644 --- a/src/crypto/common/Algorithm.cpp +++ b/src/crypto/common/Algorithm.cpp @@ -130,7 +130,10 @@ rapidjson::Value xmrig::Algorithm::toJSON() const size_t xmrig::Algorithm::memory() const { - if (family() < RANDOM_X) { + const Family f = family(); + assert(f != UNKNOWN); + + if (f < RANDOM_X) { return CnAlgo<>::memory(m_id); } @@ -138,7 +141,7 @@ size_t xmrig::Algorithm::memory() const return 0x100000; } - return 0; + return 0x200000; } @@ -181,12 +184,15 @@ xmrig::Algorithm::Family xmrig::Algorithm::family(Id id) # endif # ifdef XMRIG_ALGO_RANDOMX + case RX_0: case RX_WOW: + case RX_LOKI: return RANDOM_X; # endif - default: - break; + case INVALID: + case MAX: + return UNKNOWN; } return UNKNOWN; From ea1149a971bdf57482649c5ff180b6b6e7a4b87b Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 6 Jul 2019 15:22:19 +0700 Subject: [PATCH 014/172] Added class JobResults. --- CMakeLists.txt | 4 +- src/crypto/common/Algorithm.cpp | 2 + src/net/JobResults.cpp | 139 ++++++++++++++++++ src/net/JobResults.h | 49 ++++++ src/net/Network.cpp | 5 +- src/{ => net}/interfaces/IJobResultListener.h | 0 src/workers/MultiWorker.cpp | 3 +- src/workers/Workers.cpp | 37 ----- src/workers/Workers.h | 7 - 9 files changed, 199 insertions(+), 47 deletions(-) create mode 100644 src/net/JobResults.cpp create mode 100644 src/net/JobResults.h rename src/{ => net}/interfaces/IJobResultListener.h (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index d252af5d..33d3ad70 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -35,11 +35,12 @@ set(HEADERS src/core/config/ConfigTransform.h src/core/config/usage.h src/core/Controller.h - src/interfaces/IJobResultListener.h src/interfaces/IThread.h src/interfaces/IWorker.h src/Mem.h + src/net/interfaces/IJobResultListener.h src/net/JobResult.h + src/net/JobResults.h src/net/Network.h src/net/NetworkState.h src/net/strategies/DonateStrategy.h @@ -90,6 +91,7 @@ set(SOURCES src/core/config/ConfigTransform.cpp src/core/Controller.cpp src/Mem.cpp + src/net/JobResults.cpp src/net/Network.cpp src/net/NetworkState.cpp src/net/strategies/DonateStrategy.cpp diff --git a/src/crypto/common/Algorithm.cpp b/src/crypto/common/Algorithm.cpp index 7680ef02..ab63204f 100644 --- a/src/crypto/common/Algorithm.cpp +++ b/src/crypto/common/Algorithm.cpp @@ -137,9 +137,11 @@ size_t xmrig::Algorithm::memory() const return CnAlgo<>::memory(m_id); } +# ifdef XMRIG_ALGO_RANDOMX if (m_id == RX_WOW) { return 0x100000; } +# endif return 0x200000; } diff --git a/src/net/JobResults.cpp b/src/net/JobResults.cpp new file mode 100644 index 00000000..8c65b5e4 --- /dev/null +++ b/src/net/JobResults.cpp @@ -0,0 +1,139 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#include +#include +#include + + +#include "base/tools/Handle.h" +#include "net/interfaces/IJobResultListener.h" +#include "net/JobResult.h" +#include "net/JobResults.h" + + +namespace xmrig { + + +class JobResultsPrivate +{ +public: + inline JobResultsPrivate() + { + uv_mutex_init(&m_mutex); + + m_async = new uv_async_t; + m_async->data = this; + + uv_async_init(uv_default_loop(), m_async, JobResultsPrivate::onResult); + } + + + inline ~JobResultsPrivate() + { + Handle::close(m_async); + } + + + void setListener(IJobResultListener *listener) + { + m_listener = listener; + } + + + void submit(const JobResult &result) + { + uv_mutex_lock(&m_mutex); + m_queue.push_back(result); + uv_mutex_unlock(&m_mutex); + + uv_async_send(m_async); + } + + +private: + static void onResult(uv_async_t *handle) + { + static_cast(handle->data)->submit(); + } + + + inline void submit() + { + std::list results; + + uv_mutex_lock(&m_mutex); + while (!m_queue.empty()) { + results.push_back(std::move(m_queue.front())); + m_queue.pop_front(); + } + uv_mutex_unlock(&m_mutex); + + for (auto result : results) { + m_listener->onJobResult(result); + } + + results.clear(); + } + + + IJobResultListener *m_listener = nullptr; + std::list m_queue; + uv_async_t *m_async; + uv_mutex_t m_mutex; +}; + + +static JobResultsPrivate *handler = new JobResultsPrivate(); + + +} // namespace xmrig + + + +void xmrig::JobResults::setListener(IJobResultListener *listener) +{ + assert(handler != nullptr && listener != nullptr); + + handler->setListener(listener); +} + + +void xmrig::JobResults::stop() +{ + delete handler; + + handler = nullptr; +} + + +void xmrig::JobResults::submit(const JobResult &result) +{ + assert(handler != nullptr); + + if (handler) { + handler->submit(result); + } +} diff --git a/src/net/JobResults.h b/src/net/JobResults.h new file mode 100644 index 00000000..e7082acb --- /dev/null +++ b/src/net/JobResults.h @@ -0,0 +1,49 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_JOBRESULTS_H +#define XMRIG_JOBRESULTS_H + + +namespace xmrig { + + +class IJobResultListener; +class JobResult; + + +class JobResults +{ +public: + static void setListener(IJobResultListener *listener); + static void stop(); + static void submit(const JobResult &result); +}; + + +} // namespace xmrig + + +#endif /* XMRIG_JOBRESULTS_H */ diff --git a/src/net/Network.cpp b/src/net/Network.cpp index 16669f52..c08facb9 100644 --- a/src/net/Network.cpp +++ b/src/net/Network.cpp @@ -40,6 +40,7 @@ #include "base/tools/Timer.h" #include "core/config/Config.h" #include "core/Controller.h" +#include "net/JobResults.h" #include "net/Network.h" #include "net/strategies/DonateStrategy.h" #include "rapidjson/document.h" @@ -57,7 +58,7 @@ xmrig::Network::Network(Controller *controller) : m_donate(nullptr), m_timer(nullptr) { - Workers::setListener(this); + JobResults::setListener(this); controller->addListener(this); # ifdef XMRIG_FEATURE_API @@ -77,6 +78,8 @@ xmrig::Network::Network(Controller *controller) : xmrig::Network::~Network() { + JobResults::stop(); + delete m_timer; if (m_donate) { diff --git a/src/interfaces/IJobResultListener.h b/src/net/interfaces/IJobResultListener.h similarity index 100% rename from src/interfaces/IJobResultListener.h rename to src/net/interfaces/IJobResultListener.h diff --git a/src/workers/MultiWorker.cpp b/src/workers/MultiWorker.cpp index dc1292a3..2565e7c4 100644 --- a/src/workers/MultiWorker.cpp +++ b/src/workers/MultiWorker.cpp @@ -28,6 +28,7 @@ #include "crypto/cn/CryptoNight_test.h" +#include "net/JobResults.h" #include "workers/CpuThreadLegacy.h" #include "workers/MultiWorker.h" #include "workers/Workers.h" @@ -170,7 +171,7 @@ void xmrig::MultiWorker::start() for (size_t i = 0; i < N; ++i) { if (*reinterpret_cast(m_hash + (i * 32) + 24) < m_state.job.target()) { - Workers::submit(JobResult(m_state.job.poolId(), m_state.job.id(), m_state.job.clientId(), *nonce(i), m_hash + (i * 32), m_state.job.diff(), m_state.job.algorithm())); + JobResults::submit(JobResult(m_state.job.poolId(), m_state.job.id(), m_state.job.clientId(), *nonce(i), m_hash + (i * 32), m_state.job.diff(), m_state.job.algorithm())); } *nonce(i) += 1; diff --git a/src/workers/Workers.cpp b/src/workers/Workers.cpp index 28590d36..88d73a0b 100644 --- a/src/workers/Workers.cpp +++ b/src/workers/Workers.cpp @@ -32,7 +32,6 @@ #include "base/tools/Handle.h" #include "core/config/Config.h" #include "core/Controller.h" -#include "interfaces/IJobResultListener.h" #include "interfaces/IThread.h" #include "Mem.h" #include "rapidjson/document.h" @@ -45,15 +44,12 @@ bool Workers::m_active = false; bool Workers::m_enabled = true; Hashrate *Workers::m_hashrate = nullptr; -xmrig::IJobResultListener *Workers::m_listener = nullptr; xmrig::Job Workers::m_job; Workers::LaunchStatus Workers::m_status; std::atomic Workers::m_paused; std::atomic Workers::m_sequence; -std::list Workers::m_queue; std::vector Workers::m_workers; uint64_t Workers::m_ticks = 0; -uv_async_t *Workers::m_async = nullptr; uv_mutex_t Workers::m_mutex; uv_rwlock_t Workers::m_rwlock; uv_timer_t *Workers::m_timer = nullptr; @@ -199,9 +195,6 @@ void Workers::start(xmrig::Controller *controller) m_sequence = 1; m_paused = 1; - m_async = new uv_async_t; - uv_async_init(uv_default_loop(), m_async, Workers::onResult); - m_timer = new uv_timer_t; uv_timer_init(uv_default_loop(), m_timer); uv_timer_start(m_timer, Workers::onTick, 500, 500); @@ -221,7 +214,6 @@ void Workers::start(xmrig::Controller *controller) void Workers::stop() { xmrig::Handle::close(m_timer); - xmrig::Handle::close(m_async); m_hashrate->stop(); m_paused = 0; @@ -233,16 +225,6 @@ void Workers::stop() } -void Workers::submit(const xmrig::JobResult &result) -{ - uv_mutex_lock(&m_mutex); - m_queue.push_back(result); - uv_mutex_unlock(&m_mutex); - - uv_async_send(m_async); -} - - #ifdef XMRIG_FEATURE_API void Workers::threadsSummary(rapidjson::Document &doc) { @@ -306,25 +288,6 @@ void Workers::onReady(void *arg) } -void Workers::onResult(uv_async_t *) -{ - std::list results; - - uv_mutex_lock(&m_mutex); - while (!m_queue.empty()) { - results.push_back(std::move(m_queue.front())); - m_queue.pop_front(); - } - uv_mutex_unlock(&m_mutex); - - for (auto result : results) { - m_listener->onJobResult(result); - } - - results.clear(); -} - - void Workers::onTick(uv_timer_t *) { for (ThreadHandle *handle : m_workers) { diff --git a/src/workers/Workers.h b/src/workers/Workers.h index 96191309..39e872b5 100644 --- a/src/workers/Workers.h +++ b/src/workers/Workers.h @@ -47,7 +47,6 @@ class ThreadHandle; namespace xmrig { class Controller; - class IJobResultListener; } @@ -62,7 +61,6 @@ public: static void setJob(const xmrig::Job &job, bool donate); static void start(xmrig::Controller *controller); static void stop(); - static void submit(const xmrig::JobResult &result); static inline bool isEnabled() { return m_enabled; } static inline bool isOutdated(uint64_t sequence) { return m_sequence.load(std::memory_order_relaxed) != sequence; } @@ -70,7 +68,6 @@ public: static inline Hashrate *hashrate() { return m_hashrate; } static inline uint64_t sequence() { return m_sequence.load(std::memory_order_relaxed); } static inline void pause() { m_active = false; m_paused = 1; m_sequence++; } - static inline void setListener(xmrig::IJobResultListener *listener) { m_listener = listener; } # ifdef XMRIG_FEATURE_API static void threadsSummary(rapidjson::Document &doc); @@ -83,7 +80,6 @@ public: private: static void onReady(void *arg); - static void onResult(uv_async_t *handle); static void onTick(uv_timer_t *handle); static void start(IWorker *worker); @@ -109,15 +105,12 @@ private: static bool m_active; static bool m_enabled; static Hashrate *m_hashrate; - static xmrig::IJobResultListener *m_listener; static xmrig::Job m_job; static LaunchStatus m_status; static std::atomic m_paused; static std::atomic m_sequence; - static std::list m_queue; static std::vector m_workers; static uint64_t m_ticks; - static uv_async_t *m_async; static uv_mutex_t m_mutex; static uv_rwlock_t m_rwlock; static uv_timer_t *m_timer; From f42adafee0b73d4112a7e7362399fe850b629a40 Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 10 Jul 2019 01:53:05 +0700 Subject: [PATCH 015/172] Added classes Rx, RxAlgo, RxCache, RxDataset. --- CMakeLists.txt | 30 ++++-- src/base/io/log/Log.h | 6 ++ src/core/config/Config.cpp | 2 +- src/crypto/common/Algorithm.cpp | 7 +- src/crypto/common/VirtualMemory.h | 2 + src/crypto/common/VirtualMemory_win.cpp | 9 -- src/crypto/rx/Rx.cpp | 134 ++++++++++++++++++++++++ src/crypto/rx/Rx.h | 53 ++++++++++ src/crypto/rx/RxAlgo.cpp | 69 ++++++++++++ src/crypto/rx/RxAlgo.h | 56 ++++++++++ src/crypto/rx/RxCache.cpp | 81 ++++++++++++++ src/crypto/rx/RxCache.h | 70 +++++++++++++ src/crypto/rx/RxDataset.cpp | 124 ++++++++++++++++++++++ src/crypto/rx/RxDataset.h | 72 +++++++++++++ src/net/JobResults.cpp | 2 + src/workers/MultiWorker.cpp | 9 +- src/workers/Workers.cpp | 108 +------------------ src/workers/Workers.h | 14 --- 18 files changed, 704 insertions(+), 144 deletions(-) create mode 100644 src/crypto/rx/Rx.cpp create mode 100644 src/crypto/rx/Rx.h create mode 100644 src/crypto/rx/RxAlgo.cpp create mode 100644 src/crypto/rx/RxAlgo.h create mode 100644 src/crypto/rx/RxCache.cpp create mode 100644 src/crypto/rx/RxCache.h create mode 100644 src/crypto/rx/RxDataset.cpp create mode 100644 src/crypto/rx/RxDataset.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 33d3ad70..c315e1cf 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -171,24 +171,32 @@ if (WITH_RANDOMX) set(SOURCES_CRYPTO "${SOURCES_CRYPTO}" src/crypto/randomx/aes_hash.cpp + src/crypto/randomx/allocator.cpp + src/crypto/randomx/argon2_core.c src/crypto/randomx/argon2_ref.c + src/crypto/randomx/blake2_generator.cpp + src/crypto/randomx/blake2/blake2b.c src/crypto/randomx/bytecode_machine.cpp src/crypto/randomx/dataset.cpp - src/crypto/randomx/soft_aes.cpp - src/crypto/randomx/virtual_memory.cpp - src/crypto/randomx/vm_interpreted.cpp - src/crypto/randomx/allocator.cpp + src/crypto/randomx/instructions_portable.cpp src/crypto/randomx/randomx.cpp + src/crypto/randomx/reciprocal.c + src/crypto/randomx/soft_aes.cpp src/crypto/randomx/superscalar.cpp + src/crypto/randomx/virtual_machine.cpp + src/crypto/randomx/virtual_memory.cpp + src/crypto/randomx/vm_compiled_light.cpp src/crypto/randomx/vm_compiled.cpp src/crypto/randomx/vm_interpreted_light.cpp - src/crypto/randomx/argon2_core.c - src/crypto/randomx/blake2_generator.cpp - src/crypto/randomx/instructions_portable.cpp - src/crypto/randomx/reciprocal.c - src/crypto/randomx/virtual_machine.cpp - src/crypto/randomx/vm_compiled_light.cpp - src/crypto/randomx/blake2/blake2b.c + src/crypto/randomx/vm_interpreted.cpp + src/crypto/rx/Rx.cpp + src/crypto/rx/Rx.h + src/crypto/rx/RxAlgo.cpp + src/crypto/rx/RxAlgo.h + src/crypto/rx/RxCache.cpp + src/crypto/rx/RxCache.h + src/crypto/rx/RxDataset.cpp + src/crypto/rx/RxDataset.h ) if (NOT ARCH_ID) set(ARCH_ID ${CMAKE_HOST_SYSTEM_PROCESSOR}) diff --git a/src/base/io/log/Log.h b/src/base/io/log/Log.h index a14ffded..962d1dba 100644 --- a/src/base/io/log/Log.h +++ b/src/base/io/log/Log.h @@ -81,6 +81,9 @@ private: #define WHITE_S CSI "0;37m" // another name for LT.GRAY #define WHITE_BOLD_S CSI "1;37m" // actually white +#define BLUE_BG_S CSI "44m" +#define BLUE_BG_BOLD_S CSI "44;1m" + //color wrappings #define BLACK(x) BLACK_S x CLEAR #define BLACK_BOLD(x) BLACK_BOLD_S x CLEAR @@ -99,6 +102,9 @@ private: #define WHITE(x) WHITE_S x CLEAR #define WHITE_BOLD(x) WHITE_BOLD_S x CLEAR +#define BLUE_BG(x) BLUE_BG_S x CLEAR +#define BLUE_BG_BOLD(x) BLUE_BG_BOLD_S x CLEAR + #define LOG_EMERG(x, ...) xmrig::Log::print(xmrig::Log::EMERG, x, ##__VA_ARGS__) #define LOG_ALERT(x, ...) xmrig::Log::print(xmrig::Log::ALERT, x, ##__VA_ARGS__) diff --git a/src/core/config/Config.cpp b/src/core/config/Config.cpp index 87abbb91..784e171c 100644 --- a/src/core/config/Config.cpp +++ b/src/core/config/Config.cpp @@ -119,7 +119,7 @@ void xmrig::Config::getJSON(rapidjson::Document &doc) const bool xmrig::Config::finalize() { - Algorithm algorithm(Algorithm::CN_0); // FIXME algo + Algorithm algorithm(Algorithm::RX_WOW); // FIXME algo if (!m_threads.cpu.empty()) { m_threads.mode = Advanced; diff --git a/src/crypto/common/Algorithm.cpp b/src/crypto/common/Algorithm.cpp index ab63204f..2c259d32 100644 --- a/src/crypto/common/Algorithm.cpp +++ b/src/crypto/common/Algorithm.cpp @@ -32,6 +32,7 @@ #include "crypto/cn/CnAlgo.h" #include "crypto/common/Algorithm.h" +#include "crypto/rx/RxAlgo.h" #include "rapidjson/document.h" @@ -138,12 +139,12 @@ size_t xmrig::Algorithm::memory() const } # ifdef XMRIG_ALGO_RANDOMX - if (m_id == RX_WOW) { - return 0x100000; + if (f == RANDOM_X) { + return RxAlgo::l3(m_id); } # endif - return 0x200000; + return 0; } diff --git a/src/crypto/common/VirtualMemory.h b/src/crypto/common/VirtualMemory.h index e8acb017..a83c35ed 100644 --- a/src/crypto/common/VirtualMemory.h +++ b/src/crypto/common/VirtualMemory.h @@ -44,6 +44,8 @@ public: static void freeLargePagesMemory(void *p, size_t size); static void protectExecutableMemory(void *p, size_t size); static void unprotectExecutableMemory(void *p, size_t size); + + static inline constexpr size_t align(size_t pos, size_t align = 2097152) { return ((pos - 1) / align + 1) * align; } }; diff --git a/src/crypto/common/VirtualMemory_win.cpp b/src/crypto/common/VirtualMemory_win.cpp index dd6be14f..7f1d6f43 100644 --- a/src/crypto/common/VirtualMemory_win.cpp +++ b/src/crypto/common/VirtualMemory_win.cpp @@ -32,15 +32,6 @@ #include "crypto/common/VirtualMemory.h" -namespace xmrig { - -constexpr size_t align(size_t pos, size_t align) { - return ((pos - 1) / align + 1) * align; -} - -} - - void *xmrig::VirtualMemory::allocateExecutableMemory(size_t size) { return VirtualAlloc(nullptr, size, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE); diff --git a/src/crypto/rx/Rx.cpp b/src/crypto/rx/Rx.cpp new file mode 100644 index 00000000..735169e2 --- /dev/null +++ b/src/crypto/rx/Rx.cpp @@ -0,0 +1,134 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2019 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 tevador + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#include +#include + + +#include "backend/cpu/Cpu.h" +#include "base/io/log/Log.h" +#include "base/tools/Buffer.h" +#include "base/tools/Chrono.h" +#include "crypto/rx/Rx.h" +#include "crypto/rx/RxCache.h" +#include "crypto/rx/RxDataset.h" + + +namespace xmrig { + + +class RxPrivate +{ +public: + inline RxPrivate() + { + uv_mutex_init(&mutex); + } + + + inline ~RxPrivate() + { + delete dataset; + uv_mutex_destroy(&mutex); + } + + + inline void lock() { uv_mutex_lock(&mutex); } + inline void unlock() { uv_mutex_unlock(&mutex); } + + + RxDataset *dataset = nullptr; + uint32_t initThreads = std::thread::hardware_concurrency(); + uv_mutex_t mutex; +}; + + +static RxPrivate *d_ptr = new RxPrivate(); +static const char *tag = BLUE_BG(" rx "); + + +} // namespace xmrig + + +xmrig::RxDataset *xmrig::Rx::dataset(const uint8_t *seed, const Algorithm &algorithm, bool hugePages) +{ + d_ptr->lock(); + + if (!d_ptr->dataset) { + const uint64_t ts = Chrono::steadyMSecs(); + + LOG_INFO("%s" MAGENTA_BOLD(" allocate") CYAN_BOLD(" %zu MiB") BLACK_BOLD(" (%zu+%zu) for RandomX dataset & cache"), + tag, + (RxDataset::size() + RxCache::size()) / 1024 / 1024, + RxDataset::size() / 1024 / 1024, + RxCache::size() / 1024 / 1024 + ); + + d_ptr->dataset = new RxDataset(hugePages); + + const auto hugePages = d_ptr->dataset->hugePages(); + const double percent = hugePages.first == 0 ? 0.0 : static_cast(hugePages.first) / hugePages.second * 100.0; + + LOG_INFO("%s" GREEN(" allocate done") " huge pages %s%u/%u %1.0f%%" CLEAR " %sJIT" BLACK_BOLD(" (%" PRIu64 " ms)"), + tag, + (hugePages.first == hugePages.second ? GREEN_BOLD_S : (hugePages.first == 0 ? RED_BOLD_S : YELLOW_BOLD_S)), + hugePages.first, + hugePages.second, + percent, + d_ptr->dataset->cache()->isJIT() ? GREEN_BOLD_S "+" : RED_BOLD_S "-", + Chrono::steadyMSecs() - ts + ); + } + + if (!d_ptr->dataset->isReady(seed, algorithm)) { + const uint64_t ts = Chrono::steadyMSecs(); + + LOG_INFO("%s" MAGENTA_BOLD(" init dataset") " algo " WHITE_BOLD("%s") " threads " WHITE_BOLD("%u") BLACK_BOLD(" seed %s..."), + tag, + algorithm.shortName(), + d_ptr->initThreads, + Buffer::toHex(seed, 8).data() + ); + + d_ptr->dataset->init(seed, algorithm, d_ptr->initThreads); + + LOG_INFO("%s" GREEN(" init done") BLACK_BOLD(" (%" PRIu64 " ms)"), tag, Chrono::steadyMSecs() - ts); + } + + RxDataset *dataset = d_ptr->dataset; + d_ptr->unlock(); + + return dataset; +} + + +void xmrig::Rx::stop() +{ + delete d_ptr; + + d_ptr = nullptr; +} diff --git a/src/crypto/rx/Rx.h b/src/crypto/rx/Rx.h new file mode 100644 index 00000000..c9d068c6 --- /dev/null +++ b/src/crypto/rx/Rx.h @@ -0,0 +1,53 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2019 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 tevador + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_RX_H +#define XMRIG_RX_H + + +#include + + +namespace xmrig +{ + + +class Algorithm; +class RxDataset; + + +class Rx +{ +public: + static RxDataset *dataset(const uint8_t *seed, const Algorithm &algorithm, bool hugePages = true); + static void stop(); +}; + + +} /* namespace xmrig */ + + +#endif /* XMRIG_RX_H */ diff --git a/src/crypto/rx/RxAlgo.cpp b/src/crypto/rx/RxAlgo.cpp new file mode 100644 index 00000000..b0e92e6e --- /dev/null +++ b/src/crypto/rx/RxAlgo.cpp @@ -0,0 +1,69 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2019 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 tevador + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#include "crypto/randomx/randomx.h" +#include "crypto/rx/RxAlgo.h" + + +xmrig::Algorithm::Id xmrig::RxAlgo::apply(Algorithm::Id algorithm) +{ + switch (algorithm) { + case Algorithm::RX_WOW: + randomx_apply_config(RandomX_WowneroConfig); + break; + + case Algorithm::RX_LOKI: + randomx_apply_config(RandomX_LokiConfig); + break; + + default: + randomx_apply_config(RandomX_MoneroConfig); + break; + } + + return algorithm; +} + + +size_t xmrig::RxAlgo::l3(Algorithm::Id algorithm) +{ + switch (algorithm) { + case Algorithm::RX_0: + return RandomX_MoneroConfig.ScratchpadL3_Size; + + case Algorithm::RX_WOW: + return RandomX_WowneroConfig.ScratchpadL3_Size; + + case Algorithm::RX_LOKI: + return RandomX_LokiConfig.ScratchpadL3_Size; + + default: + break; + } + + return 0; +} diff --git a/src/crypto/rx/RxAlgo.h b/src/crypto/rx/RxAlgo.h new file mode 100644 index 00000000..dd3f0aa7 --- /dev/null +++ b/src/crypto/rx/RxAlgo.h @@ -0,0 +1,56 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2019 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 tevador + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_RX_ALGO_H +#define XMRIG_RX_ALGO_H + + +#include +#include + + +#include "crypto/common/Algorithm.h" + + +struct RandomX_ConfigurationBase; + + +namespace xmrig +{ + + +class RxAlgo +{ +public: + static Algorithm::Id apply(Algorithm::Id algorithm); + static size_t l3(Algorithm::Id algorithm); +}; + + +} /* namespace xmrig */ + + +#endif /* XMRIG_RX_ALGO_H */ diff --git a/src/crypto/rx/RxCache.cpp b/src/crypto/rx/RxCache.cpp new file mode 100644 index 00000000..a5e9efb3 --- /dev/null +++ b/src/crypto/rx/RxCache.cpp @@ -0,0 +1,81 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2019 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 tevador + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#include "crypto/randomx/randomx.h" +#include "crypto/rx/RxCache.h" + + +static_assert(RANDOMX_FLAG_JIT == 8, "RANDOMX_FLAG_JIT flag mismatch"); +static_assert(RANDOMX_FLAG_LARGE_PAGES == 1, "RANDOMX_FLAG_LARGE_PAGES flag mismatch"); + + + +xmrig::RxCache::RxCache(bool hugePages) : + m_seed() +{ + if (hugePages) { + m_flags = RANDOMX_FLAG_JIT | RANDOMX_FLAG_LARGE_PAGES; + m_cache = randomx_alloc_cache(static_cast(m_flags)); + } + + if (!m_cache) { + m_flags = RANDOMX_FLAG_JIT; + m_cache = randomx_alloc_cache(static_cast(m_flags)); + } + + if (!m_cache) { + m_flags = RANDOMX_FLAG_DEFAULT; + m_cache = randomx_alloc_cache(static_cast(m_flags)); + } +} + + +xmrig::RxCache::~RxCache() +{ + if (m_cache) { + randomx_release_cache(m_cache); + } +} + + +bool xmrig::RxCache::init(const void *seed) +{ + if (isReady(seed)) { + return false; + } + + memcpy(m_seed, seed, sizeof(m_seed)); + randomx_init_cache(m_cache, m_seed, sizeof(m_seed)); + + return true; +} + + +bool xmrig::RxCache::isReady(const void *seed) const +{ + return memcmp(m_seed, seed, sizeof(m_seed)) == 0; +} diff --git a/src/crypto/rx/RxCache.h b/src/crypto/rx/RxCache.h new file mode 100644 index 00000000..893ebf06 --- /dev/null +++ b/src/crypto/rx/RxCache.h @@ -0,0 +1,70 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2019 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 tevador + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_RX_CACHE_H +#define XMRIG_RX_CACHE_H + + +#include + + +#include "crypto/randomx/configuration.h" + + +struct randomx_cache; + + +namespace xmrig +{ + + +class RxCache +{ +public: + RxCache(bool hugePages = true); + ~RxCache(); + + inline bool isHugePages() const { return m_flags & 1; } + inline bool isJIT() const { return m_flags & 8; } + inline const uint8_t *seed() const { return m_seed; } + inline randomx_cache *get() const { return m_cache; } + + bool init(const void *seed); + bool isReady(const void *seed) const; + + static inline constexpr size_t size() { return RANDOMX_CACHE_MAX_SIZE; } + +private: + int m_flags = 0; + randomx_cache *m_cache = nullptr; + uint8_t m_seed[32]; +}; + + +} /* namespace xmrig */ + + +#endif /* XMRIG_RX_CACHE_H */ diff --git a/src/crypto/rx/RxDataset.cpp b/src/crypto/rx/RxDataset.cpp new file mode 100644 index 00000000..5c3b9f37 --- /dev/null +++ b/src/crypto/rx/RxDataset.cpp @@ -0,0 +1,124 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2019 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 tevador + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#include + + +#include "crypto/common/VirtualMemory.h" +#include "crypto/randomx/randomx.h" +#include "crypto/rx/RxAlgo.h" +#include "crypto/rx/RxCache.h" +#include "crypto/rx/RxDataset.h" + + +static_assert(RANDOMX_FLAG_LARGE_PAGES == 1, "RANDOMX_FLAG_LARGE_PAGES flag mismatch"); + + +xmrig::RxDataset::RxDataset(bool hugePages) +{ + if (hugePages) { + m_flags = RANDOMX_FLAG_LARGE_PAGES; + m_dataset = randomx_alloc_dataset(static_cast(m_flags)); + } + + if (!m_dataset) { + m_flags = RANDOMX_FLAG_DEFAULT; + m_dataset = randomx_alloc_dataset(static_cast(m_flags)); + } + + m_cache = new RxCache(hugePages); +} + + +xmrig::RxDataset::~RxDataset() +{ + if (m_dataset) { + randomx_release_dataset(m_dataset); + } + + delete m_cache; +} + + +bool xmrig::RxDataset::init(const void *seed, const Algorithm &algorithm, uint32_t numThreads) +{ + if (isReady(seed, algorithm)) { + return false; + } + + if (m_algorithm != algorithm) { + m_algorithm = RxAlgo::apply(algorithm); + } + + cache()->init(seed); + + const uint32_t datasetItemCount = randomx_dataset_item_count(); + + if (numThreads > 1) { + std::vector threads; + threads.reserve(numThreads); + + for (uint32_t i = 0; i < numThreads; ++i) { + const uint32_t a = (datasetItemCount * i) / numThreads; + const uint32_t b = (datasetItemCount * (i + 1)) / numThreads; + threads.emplace_back(randomx_init_dataset, m_dataset, m_cache->get(), a, b - a); + } + + for (uint32_t i = 0; i < numThreads; ++i) { + threads[i].join(); + } + } + else { + randomx_init_dataset(m_dataset, m_cache->get(), 0, datasetItemCount); + } + + return true; +} + + +bool xmrig::RxDataset::isReady(const void *seed, const Algorithm &algorithm) const +{ + return algorithm == m_algorithm && cache()->isReady(seed); +} + + +std::pair xmrig::RxDataset::hugePages() const +{ + constexpr size_t twoMiB = 2u * 1024u * 1024u; + constexpr const size_t total = (VirtualMemory::align(size(), twoMiB) + VirtualMemory::align(RxCache::size(), twoMiB)) / twoMiB; + + size_t count = 0; + if (isHugePages()) { + count += VirtualMemory::align(size(), twoMiB) / twoMiB; + } + + if (m_cache->isHugePages()) { + count += VirtualMemory::align(RxCache::size(), twoMiB) / twoMiB; + } + + return std::pair(count, total); +} diff --git a/src/crypto/rx/RxDataset.h b/src/crypto/rx/RxDataset.h new file mode 100644 index 00000000..7944d52c --- /dev/null +++ b/src/crypto/rx/RxDataset.h @@ -0,0 +1,72 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2019 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 tevador + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_RX_DATASET_H +#define XMRIG_RX_DATASET_H + + +#include "crypto/common/Algorithm.h" +#include "crypto/randomx/configuration.h" + + +struct randomx_dataset; + + +namespace xmrig +{ + + +class RxCache; + + +class RxDataset +{ +public: + RxDataset(bool hugePages = true); + ~RxDataset(); + + inline bool isHugePages() const { return m_flags & 1; } + inline randomx_dataset *get() const { return m_dataset; } + inline RxCache *cache() const { return m_cache; } + + bool init(const void *seed, const Algorithm &algorithm, uint32_t numThreads); + bool isReady(const void *seed, const Algorithm &algorithm) const; + std::pair hugePages() const; + + static inline constexpr size_t size() { return RANDOMX_DATASET_MAX_SIZE; } + +private: + Algorithm m_algorithm; + int m_flags = 0; + randomx_dataset *m_dataset = nullptr; + RxCache *m_cache = nullptr; +}; + + +} /* namespace xmrig */ + + +#endif /* XMRIG_RX_DATASET_H */ diff --git a/src/net/JobResults.cpp b/src/net/JobResults.cpp index 8c65b5e4..bf0b5e86 100644 --- a/src/net/JobResults.cpp +++ b/src/net/JobResults.cpp @@ -54,6 +54,8 @@ public: inline ~JobResultsPrivate() { Handle::close(m_async); + + uv_mutex_destroy(&m_mutex); } diff --git a/src/workers/MultiWorker.cpp b/src/workers/MultiWorker.cpp index 2565e7c4..059a7171 100644 --- a/src/workers/MultiWorker.cpp +++ b/src/workers/MultiWorker.cpp @@ -28,6 +28,8 @@ #include "crypto/cn/CryptoNight_test.h" +#include "crypto/rx/Rx.h" +#include "crypto/rx/RxDataset.h" #include "net/JobResults.h" #include "workers/CpuThreadLegacy.h" #include "workers/MultiWorker.h" @@ -67,9 +69,11 @@ void xmrig::MultiWorker::allocateRandomX_VM() flags |= RANDOMX_FLAG_HARD_AES; } - m_rx_vm = randomx_create_vm(static_cast(flags), nullptr, Workers::getDataset()); + RxDataset *dataset = Rx::dataset(m_state.job.seedHash(), m_state.job.algorithm()); + + m_rx_vm = randomx_create_vm(static_cast(flags), nullptr, dataset->get()); if (!m_rx_vm) { - m_rx_vm = randomx_create_vm(static_cast(flags - RANDOMX_FLAG_LARGE_PAGES), nullptr, Workers::getDataset()); + m_rx_vm = randomx_create_vm(static_cast(flags - RANDOMX_FLAG_LARGE_PAGES), nullptr, dataset->get()); } } } @@ -160,7 +164,6 @@ void xmrig::MultiWorker::start() # ifdef XMRIG_ALGO_RANDOMX if (m_state.job.algorithm().family() == Algorithm::RANDOM_X) { allocateRandomX_VM(); - Workers::updateDataset(m_state.job.seedHash(), m_totalWays, m_state.job.algorithm()); randomx_calculate_hash(m_rx_vm, m_state.blob, m_state.job.size(), m_hash); } else diff --git a/src/workers/Workers.cpp b/src/workers/Workers.cpp index 88d73a0b..58cccd9e 100644 --- a/src/workers/Workers.cpp +++ b/src/workers/Workers.cpp @@ -29,9 +29,13 @@ #include "api/Api.h" #include "base/io/log/Log.h" +#include "base/tools/Chrono.h" #include "base/tools/Handle.h" #include "core/config/Config.h" #include "core/Controller.h" +#include "crypto/rx/RxAlgo.h" +#include "crypto/rx/RxCache.h" +#include "crypto/rx/RxDataset.h" #include "interfaces/IThread.h" #include "Mem.h" #include "rapidjson/document.h" @@ -55,15 +59,6 @@ uv_rwlock_t Workers::m_rwlock; uv_timer_t *Workers::m_timer = nullptr; xmrig::Controller *Workers::m_controller = nullptr; -#ifdef XMRIG_ALGO_RANDOMX -uv_rwlock_t Workers::m_rx_dataset_lock; -randomx_cache *Workers::m_rx_cache = nullptr; -randomx_dataset *Workers::m_rx_dataset = nullptr; -uint8_t Workers::m_rx_seed_hash[32] = {}; -xmrig::Algorithm Workers::m_rx_algo; -std::atomic Workers::m_rx_dataset_init_thread_counter = {}; -#endif - xmrig::Job Workers::job() { @@ -176,7 +171,7 @@ void Workers::start(xmrig::Controller *controller) m_controller = controller; const std::vector &threads = controller->config()->threads(); - m_status.algo = xmrig::Algorithm::CN_0; // FIXME algo + m_status.algo = xmrig::Algorithm::RX_WOW; // FIXME algo m_status.threads = threads.size(); for (const xmrig::IThread *thread : threads) { @@ -188,10 +183,6 @@ void Workers::start(xmrig::Controller *controller) uv_mutex_init(&m_mutex); uv_rwlock_init(&m_rwlock); -# ifdef XMRIG_ALGO_RANDOMX - uv_rwlock_init(&m_rx_dataset_lock); -# endif - m_sequence = 1; m_paused = 1; @@ -335,92 +326,3 @@ void Workers::start(IWorker *worker) worker->start(); } - - -#ifdef XMRIG_ALGO_RANDOMX -void Workers::updateDataset(const uint8_t* seed_hash, const uint32_t num_threads, const xmrig::Algorithm &algorithm) -{ - // Check if we need to update cache and dataset - if ((memcmp(m_rx_seed_hash, seed_hash, sizeof(m_rx_seed_hash)) == 0) && (m_rx_algo == algorithm)) - return; - - const uint32_t thread_id = m_rx_dataset_init_thread_counter++; - LOG_DEBUG("Thread %u started updating RandomX dataset", thread_id); - - // Wait for all threads to get here - do { - if (m_sequence.load(std::memory_order_relaxed) == 0) { - // Exit immediately if workers were stopped - return; - } - std::this_thread::yield(); - } while (m_rx_dataset_init_thread_counter.load() != num_threads); - - // One of the threads updates cache - uv_rwlock_wrlock(&m_rx_dataset_lock); - - if (m_rx_algo != algorithm) { - switch (algorithm) { - case xmrig::Algorithm::RX_WOW: - randomx_apply_config(RandomX_WowneroConfig); - break; - - case xmrig::Algorithm::RX_LOKI: - randomx_apply_config(RandomX_LokiConfig); - break; - - default: - randomx_apply_config(RandomX_MoneroConfig); - break; - } - - m_rx_algo = algorithm; - } - - if (memcmp(m_rx_seed_hash, seed_hash, sizeof(m_rx_seed_hash)) != 0) { - memcpy(m_rx_seed_hash, seed_hash, sizeof(m_rx_seed_hash)); - randomx_init_cache(m_rx_cache, m_rx_seed_hash, sizeof(m_rx_seed_hash)); - } - - uv_rwlock_wrunlock(&m_rx_dataset_lock); - - // All threads update dataset - const uint32_t a = (randomx_dataset_item_count() * thread_id) / num_threads; - const uint32_t b = (randomx_dataset_item_count() * (thread_id + 1)) / num_threads; - randomx_init_dataset(m_rx_dataset, m_rx_cache, a, b - a); - - LOG_DEBUG("Thread %u finished updating RandomX dataset", thread_id); - - // Wait for all threads to complete - --m_rx_dataset_init_thread_counter; - do { - if (m_sequence.load(std::memory_order_relaxed) == 0) { - // Exit immediately if workers were stopped - return; - } - std::this_thread::yield(); - } while (m_rx_dataset_init_thread_counter.load() != 0); -} - -randomx_dataset* Workers::getDataset() -{ - if (m_rx_dataset) - return m_rx_dataset; - - uv_rwlock_wrlock(&m_rx_dataset_lock); - if (!m_rx_dataset) { - randomx_dataset* dataset = randomx_alloc_dataset(RANDOMX_FLAG_LARGE_PAGES); - if (!dataset) { - dataset = randomx_alloc_dataset(RANDOMX_FLAG_DEFAULT); - } - m_rx_cache = randomx_alloc_cache(static_cast(RANDOMX_FLAG_JIT | RANDOMX_FLAG_LARGE_PAGES)); - if (!m_rx_cache) { - m_rx_cache = randomx_alloc_cache(RANDOMX_FLAG_JIT); - } - m_rx_dataset = dataset; - } - uv_rwlock_wrunlock(&m_rx_dataset_lock); - - return m_rx_dataset; -} -#endif diff --git a/src/workers/Workers.h b/src/workers/Workers.h index 39e872b5..8619f973 100644 --- a/src/workers/Workers.h +++ b/src/workers/Workers.h @@ -73,11 +73,6 @@ public: static void threadsSummary(rapidjson::Document &doc); # endif -# ifdef XMRIG_ALGO_RANDOMX - static void updateDataset(const uint8_t* seed_hash, uint32_t num_threads, const xmrig::Algorithm &algorithm); - static randomx_dataset* getDataset(); -# endif - private: static void onReady(void *arg); static void onTick(uv_timer_t *handle); @@ -115,15 +110,6 @@ private: static uv_rwlock_t m_rwlock; static uv_timer_t *m_timer; static xmrig::Controller *m_controller; - -# ifdef XMRIG_ALGO_RANDOMX - static uv_rwlock_t m_rx_dataset_lock; - static randomx_cache *m_rx_cache; - static randomx_dataset *m_rx_dataset; - static uint8_t m_rx_seed_hash[32]; - static xmrig::Algorithm m_rx_algo; - static std::atomic m_rx_dataset_init_thread_counter; -# endif }; From 3bebf778da034d974718b0258ea7b74c3534d649 Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 10 Jul 2019 02:28:45 +0700 Subject: [PATCH 016/172] Fixed build. --- src/api/v1/ApiRouter.cpp | 4 ++-- src/backend/cpu/platform/AdvancedCpuInfo.cpp | 1 + src/backend/cpu/platform/BasicCpuInfo.cpp | 2 +- src/backend/cpu/platform/BasicCpuInfo.h | 2 +- src/backend/cpu/platform/BasicCpuInfo_arm.cpp | 8 +++++++- 5 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/api/v1/ApiRouter.cpp b/src/api/v1/ApiRouter.cpp index 0f754e17..ff1ef404 100644 --- a/src/api/v1/ApiRouter.cpp +++ b/src/api/v1/ApiRouter.cpp @@ -22,7 +22,7 @@ * along with this program. If not, see . */ -#include +#include #include #include @@ -44,7 +44,7 @@ static inline rapidjson::Value normalize(double d) { using namespace rapidjson; - if (!isnormal(d)) { + if (!std::isnormal(d)) { return Value(kNullType); } diff --git a/src/backend/cpu/platform/AdvancedCpuInfo.cpp b/src/backend/cpu/platform/AdvancedCpuInfo.cpp index b5b2fe91..f3c4ed23 100644 --- a/src/backend/cpu/platform/AdvancedCpuInfo.cpp +++ b/src/backend/cpu/platform/AdvancedCpuInfo.cpp @@ -22,6 +22,7 @@ * along with this program. If not, see . */ +#include #include #include #include diff --git a/src/backend/cpu/platform/BasicCpuInfo.cpp b/src/backend/cpu/platform/BasicCpuInfo.cpp index 26237468..369392b6 100644 --- a/src/backend/cpu/platform/BasicCpuInfo.cpp +++ b/src/backend/cpu/platform/BasicCpuInfo.cpp @@ -124,8 +124,8 @@ static inline bool has_ossave() xmrig::BasicCpuInfo::BasicCpuInfo() : m_assembly(Assembly::NONE), - m_brand(), m_aes(has_aes_ni()), + m_brand(), m_avx2(has_avx2() && has_ossave()), m_threads(std::thread::hardware_concurrency()) { diff --git a/src/backend/cpu/platform/BasicCpuInfo.h b/src/backend/cpu/platform/BasicCpuInfo.h index 12c275dd..886d59c3 100644 --- a/src/backend/cpu/platform/BasicCpuInfo.h +++ b/src/backend/cpu/platform/BasicCpuInfo.h @@ -55,8 +55,8 @@ protected: private: Assembly m_assembly; + bool m_aes; char m_brand[64 + 6]; - const bool m_aes; const bool m_avx2; const size_t m_threads; }; diff --git a/src/backend/cpu/platform/BasicCpuInfo_arm.cpp b/src/backend/cpu/platform/BasicCpuInfo_arm.cpp index 49e300e4..6702f6f0 100644 --- a/src/backend/cpu/platform/BasicCpuInfo_arm.cpp +++ b/src/backend/cpu/platform/BasicCpuInfo_arm.cpp @@ -37,8 +37,8 @@ xmrig::BasicCpuInfo::BasicCpuInfo() : m_aes(false), - m_avx2(false), m_brand(), + m_avx2(false), m_threads(std::thread::hardware_concurrency()) { # ifdef XMRIG_ARMv8 @@ -61,3 +61,9 @@ size_t xmrig::BasicCpuInfo::optimalThreadsCount(size_t memSize, int maxCpuUsage) { return threads(); } + + +xmrig::CpuThreads xmrig::BasicCpuInfo::threads(const Algorithm &algorithm) const +{ + return CpuThreads(threads()); +} From 270d3ba6a2bed53d132a4d8960ba867d35776137 Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 10 Jul 2019 10:14:33 +0700 Subject: [PATCH 017/172] Added class RxVm. --- CMakeLists.txt | 2 ++ src/crypto/rx/Rx.cpp | 48 ++++++++++++++++---------- src/crypto/rx/RxCache.h | 2 +- src/crypto/rx/RxDataset.cpp | 4 +++ src/crypto/rx/RxVm.cpp | 68 +++++++++++++++++++++++++++++++++++++ src/crypto/rx/RxVm.h | 61 +++++++++++++++++++++++++++++++++ src/workers/MultiWorker.cpp | 21 +++--------- src/workers/MultiWorker.h | 10 +++--- 8 files changed, 176 insertions(+), 40 deletions(-) create mode 100644 src/crypto/rx/RxVm.cpp create mode 100644 src/crypto/rx/RxVm.h diff --git a/CMakeLists.txt b/CMakeLists.txt index c315e1cf..9094d381 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -197,6 +197,8 @@ if (WITH_RANDOMX) src/crypto/rx/RxCache.h src/crypto/rx/RxDataset.cpp src/crypto/rx/RxDataset.h + src/crypto/rx/RxVm.cpp + src/crypto/rx/RxVm.h ) if (NOT ARCH_ID) set(ARCH_ID ${CMAKE_HOST_SYSTEM_PROCESSOR}) diff --git a/src/crypto/rx/Rx.cpp b/src/crypto/rx/Rx.cpp index 735169e2..630dd45a 100644 --- a/src/crypto/rx/Rx.cpp +++ b/src/crypto/rx/Rx.cpp @@ -90,29 +90,43 @@ xmrig::RxDataset *xmrig::Rx::dataset(const uint8_t *seed, const Algorithm &algor d_ptr->dataset = new RxDataset(hugePages); - const auto hugePages = d_ptr->dataset->hugePages(); - const double percent = hugePages.first == 0 ? 0.0 : static_cast(hugePages.first) / hugePages.second * 100.0; + if (d_ptr->dataset->get() != nullptr) { + const auto hugePages = d_ptr->dataset->hugePages(); + const double percent = hugePages.first == 0 ? 0.0 : static_cast(hugePages.first) / hugePages.second * 100.0; - LOG_INFO("%s" GREEN(" allocate done") " huge pages %s%u/%u %1.0f%%" CLEAR " %sJIT" BLACK_BOLD(" (%" PRIu64 " ms)"), - tag, - (hugePages.first == hugePages.second ? GREEN_BOLD_S : (hugePages.first == 0 ? RED_BOLD_S : YELLOW_BOLD_S)), - hugePages.first, - hugePages.second, - percent, - d_ptr->dataset->cache()->isJIT() ? GREEN_BOLD_S "+" : RED_BOLD_S "-", - Chrono::steadyMSecs() - ts - ); + LOG_INFO("%s" GREEN(" allocate done") " huge pages %s%u/%u %1.0f%%" CLEAR " %sJIT" BLACK_BOLD(" (%" PRIu64 " ms)"), + tag, + (hugePages.first == hugePages.second ? GREEN_BOLD_S : (hugePages.first == 0 ? RED_BOLD_S : YELLOW_BOLD_S)), + hugePages.first, + hugePages.second, + percent, + d_ptr->dataset->cache()->isJIT() ? GREEN_BOLD_S "+" : RED_BOLD_S "-", + Chrono::steadyMSecs() - ts + ); + } + else { + LOG_WARN(CLEAR "%s" YELLOW_BOLD_S " failed to allocate RandomX dataset, switching to slow mode", tag); + } } if (!d_ptr->dataset->isReady(seed, algorithm)) { const uint64_t ts = Chrono::steadyMSecs(); - LOG_INFO("%s" MAGENTA_BOLD(" init dataset") " algo " WHITE_BOLD("%s") " threads " WHITE_BOLD("%u") BLACK_BOLD(" seed %s..."), - tag, - algorithm.shortName(), - d_ptr->initThreads, - Buffer::toHex(seed, 8).data() - ); + if (d_ptr->dataset->get() != nullptr) { + LOG_INFO("%s" MAGENTA_BOLD(" init dataset") " algo " WHITE_BOLD("%s") " threads " WHITE_BOLD("%u") BLACK_BOLD(" seed %s..."), + tag, + algorithm.shortName(), + d_ptr->initThreads, + Buffer::toHex(seed, 8).data() + ); + } + else { + LOG_INFO("%s" MAGENTA_BOLD(" init cache") " algo " WHITE_BOLD("%s") BLACK_BOLD(" seed %s..."), + tag, + algorithm.shortName(), + Buffer::toHex(seed, 8).data() + ); + } d_ptr->dataset->init(seed, algorithm, d_ptr->initThreads); diff --git a/src/crypto/rx/RxCache.h b/src/crypto/rx/RxCache.h index 893ebf06..c48924a1 100644 --- a/src/crypto/rx/RxCache.h +++ b/src/crypto/rx/RxCache.h @@ -58,7 +58,7 @@ public: static inline constexpr size_t size() { return RANDOMX_CACHE_MAX_SIZE; } private: - int m_flags = 0; + int m_flags = 0; randomx_cache *m_cache = nullptr; uint8_t m_seed[32]; }; diff --git a/src/crypto/rx/RxDataset.cpp b/src/crypto/rx/RxDataset.cpp index 5c3b9f37..603cf578 100644 --- a/src/crypto/rx/RxDataset.cpp +++ b/src/crypto/rx/RxDataset.cpp @@ -76,6 +76,10 @@ bool xmrig::RxDataset::init(const void *seed, const Algorithm &algorithm, uint32 cache()->init(seed); + if (!get()) { + return true; + } + const uint32_t datasetItemCount = randomx_dataset_item_count(); if (numThreads > 1) { diff --git a/src/crypto/rx/RxVm.cpp b/src/crypto/rx/RxVm.cpp new file mode 100644 index 00000000..3ee0f859 --- /dev/null +++ b/src/crypto/rx/RxVm.cpp @@ -0,0 +1,68 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2019 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 tevador + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#include "crypto/randomx/randomx.h" +#include "crypto/rx/RxCache.h" +#include "crypto/rx/RxDataset.h" +#include "crypto/rx/RxVm.h" + + +xmrig::RxVm::RxVm(RxDataset *dataset, bool hugePages, bool softAes) +{ + m_flags = RANDOMX_FLAG_JIT; + if (hugePages) { + m_flags |= RANDOMX_FLAG_LARGE_PAGES; + } + + if (!softAes) { + m_flags |= RANDOMX_FLAG_HARD_AES; + } + + if (dataset->get()) { + m_flags |= RANDOMX_FLAG_FULL_MEM; + } + + m_vm = randomx_create_vm(static_cast(m_flags), dataset->cache()->get(), dataset->get()); + + if (!m_vm) { + m_flags &= ~RANDOMX_FLAG_LARGE_PAGES; + m_vm = randomx_create_vm(static_cast(m_flags), dataset->cache()->get(), dataset->get()); + } + + if (!m_vm) { + m_flags &= ~RANDOMX_FLAG_JIT; + m_vm = randomx_create_vm(static_cast(m_flags), dataset->cache()->get(), dataset->get()); + } +} + + +xmrig::RxVm::~RxVm() +{ + if (m_vm) { + randomx_destroy_vm(m_vm); + } +} diff --git a/src/crypto/rx/RxVm.h b/src/crypto/rx/RxVm.h new file mode 100644 index 00000000..90af8187 --- /dev/null +++ b/src/crypto/rx/RxVm.h @@ -0,0 +1,61 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2019 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 tevador + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_RX_VM_H +#define XMRIG_RX_VM_H + + +#include + + +struct randomx_vm; + + +namespace xmrig +{ + + +class RxDataset; + + +class RxVm +{ +public: + RxVm(RxDataset *dataset, bool hugePages, bool softAes); + ~RxVm(); + + inline randomx_vm *get() const { return m_vm; } + +private: + int m_flags = 0; + randomx_vm *m_vm = nullptr; +}; + + +} /* namespace xmrig */ + + +#endif /* XMRIG_RX_CACHE_H */ diff --git a/src/workers/MultiWorker.cpp b/src/workers/MultiWorker.cpp index 059a7171..684d92f9 100644 --- a/src/workers/MultiWorker.cpp +++ b/src/workers/MultiWorker.cpp @@ -29,7 +29,7 @@ #include "crypto/cn/CryptoNight_test.h" #include "crypto/rx/Rx.h" -#include "crypto/rx/RxDataset.h" +#include "crypto/rx/RxVm.h" #include "net/JobResults.h" #include "workers/CpuThreadLegacy.h" #include "workers/MultiWorker.h" @@ -52,9 +52,7 @@ xmrig::MultiWorker::~MultiWorker() Mem::release(m_ctx, N, m_memory); # ifdef XMRIG_ALGO_RANDOMX - if (m_rx_vm) { - randomx_destroy_vm(m_rx_vm); - } + delete m_vm; # endif } @@ -63,18 +61,9 @@ xmrig::MultiWorker::~MultiWorker() template void xmrig::MultiWorker::allocateRandomX_VM() { - if (!m_rx_vm) { - int flags = RANDOMX_FLAG_LARGE_PAGES | RANDOMX_FLAG_FULL_MEM | RANDOMX_FLAG_JIT; - if (!m_thread->isSoftAES()) { - flags |= RANDOMX_FLAG_HARD_AES; - } - + if (!m_vm) { RxDataset *dataset = Rx::dataset(m_state.job.seedHash(), m_state.job.algorithm()); - - m_rx_vm = randomx_create_vm(static_cast(flags), nullptr, dataset->get()); - if (!m_rx_vm) { - m_rx_vm = randomx_create_vm(static_cast(flags - RANDOMX_FLAG_LARGE_PAGES), nullptr, dataset->get()); - } + m_vm = new RxVm(dataset, true, m_thread->isSoftAES()); } } #endif @@ -164,7 +153,7 @@ void xmrig::MultiWorker::start() # ifdef XMRIG_ALGO_RANDOMX if (m_state.job.algorithm().family() == Algorithm::RANDOM_X) { allocateRandomX_VM(); - randomx_calculate_hash(m_rx_vm, m_state.blob, m_state.job.size(), m_hash); + randomx_calculate_hash(m_vm->get(), m_state.blob, m_state.job.size(), m_hash); } else # endif diff --git a/src/workers/MultiWorker.h b/src/workers/MultiWorker.h index d695e030..0502ad84 100644 --- a/src/workers/MultiWorker.h +++ b/src/workers/MultiWorker.h @@ -27,11 +27,6 @@ #define XMRIG_MULTIWORKER_H -#ifdef XMRIG_ALGO_RANDOMX -# include -#endif - - #include "base/net/stratum/Job.h" #include "Mem.h" #include "net/JobResult.h" @@ -41,6 +36,9 @@ namespace xmrig { +class RxVm; + + template class MultiWorker : public Worker { @@ -81,7 +79,7 @@ private: uint8_t m_hash[N * 32]; # ifdef XMRIG_ALGO_RANDOMX - randomx_vm *m_rx_vm = nullptr; + RxVm *m_vm = nullptr; # endif }; From 8e2219b7c481d8cd692b1103d317254e03626bf7 Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 10 Jul 2019 10:26:10 +0700 Subject: [PATCH 018/172] Fixed RandomX VM creation in some cases. --- src/crypto/rx/RxVm.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/crypto/rx/RxVm.cpp b/src/crypto/rx/RxVm.cpp index 3ee0f859..b02f708e 100644 --- a/src/crypto/rx/RxVm.cpp +++ b/src/crypto/rx/RxVm.cpp @@ -33,7 +33,6 @@ xmrig::RxVm::RxVm(RxDataset *dataset, bool hugePages, bool softAes) { - m_flags = RANDOMX_FLAG_JIT; if (hugePages) { m_flags |= RANDOMX_FLAG_LARGE_PAGES; } @@ -46,6 +45,10 @@ xmrig::RxVm::RxVm(RxDataset *dataset, bool hugePages, bool softAes) m_flags |= RANDOMX_FLAG_FULL_MEM; } + if (dataset->cache()->isJIT()) { + m_flags |= RANDOMX_FLAG_JIT; + } + m_vm = randomx_create_vm(static_cast(m_flags), dataset->cache()->get(), dataset->get()); if (!m_vm) { @@ -54,7 +57,7 @@ xmrig::RxVm::RxVm(RxDataset *dataset, bool hugePages, bool softAes) } if (!m_vm) { - m_flags &= ~RANDOMX_FLAG_JIT; + m_flags &= ~RANDOMX_FLAG_HARD_AES; m_vm = randomx_create_vm(static_cast(m_flags), dataset->cache()->get(), dataset->get()); } } From 6f27037f0759133aea66a366093744e983b49d9b Mon Sep 17 00:00:00 2001 From: XMRig Date: Thu, 11 Jul 2019 16:15:51 +0700 Subject: [PATCH 019/172] Added new nonce allocation method for dynamic/variable threads. --- CMakeLists.txt | 3 + src/core/WorkerJob.h | 143 ++++++++++++++++++++++++++++++++++++ src/crypto/common/Nonce.cpp | 83 +++++++++++++++++++++ src/crypto/common/Nonce.h | 57 ++++++++++++++ src/workers/MultiWorker.cpp | 88 ++++++---------------- src/workers/MultiWorker.h | 19 +---- src/workers/Worker.cpp | 5 +- src/workers/Worker.h | 5 +- src/workers/Workers.cpp | 21 ++++-- src/workers/Workers.h | 7 +- 10 files changed, 336 insertions(+), 95 deletions(-) create mode 100644 src/core/WorkerJob.h create mode 100644 src/crypto/common/Nonce.cpp create mode 100644 src/crypto/common/Nonce.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 9094d381..13f787f7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -35,6 +35,7 @@ set(HEADERS src/core/config/ConfigTransform.h src/core/config/usage.h src/core/Controller.h + src/core/WorkerJob.h src/interfaces/IThread.h src/interfaces/IWorker.h src/Mem.h @@ -71,6 +72,7 @@ set(HEADERS_CRYPTO src/crypto/cn/soft_aes.h src/crypto/common/Algorithm.h src/crypto/common/keccak.h + src/crypto/common/Nonce.h src/crypto/common/portable/mm_malloc.h src/crypto/common/VirtualMemory.h ) @@ -113,6 +115,7 @@ set(SOURCES_CRYPTO src/crypto/cn/CnHash.cpp src/crypto/common/Algorithm.cpp src/crypto/common/keccak.cpp + src/crypto/common/Nonce.cpp ) if (WIN32) diff --git a/src/core/WorkerJob.h b/src/core/WorkerJob.h new file mode 100644 index 00000000..004c5533 --- /dev/null +++ b/src/core/WorkerJob.h @@ -0,0 +1,143 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_WORKERJOB_H +#define XMRIG_WORKERJOB_H + + +#include +#include + + +#include "base/net/stratum/Job.h" +#include "crypto/common/Nonce.h" + + +namespace xmrig { + + +template +class WorkerJob +{ +public: + inline const Job ¤tJob() const { return m_jobs[index()]; } + inline uint32_t *nonce(size_t i = 0) { return reinterpret_cast(blob() + (i * currentJob().size()) + 39); } + inline uint64_t sequence() const { return m_sequence; } + inline uint8_t *blob() { return m_blobs[index()]; } + inline uint8_t index() const { return m_index; } + + + inline void add(const Job &job, uint64_t sequence, uint32_t reserveCount) + { + m_sequence = sequence; + + if (currentJob() == job) { + return; + } + + if (index() == 1 && job.poolId() >= 0 && job == m_jobs[0]) { + return; + } + + save(job, reserveCount); + } + + + inline void nextRound(uint32_t reserveCount) + { + m_rounds[index()]++; + + if ((m_rounds[index()] % reserveCount) == 0) { + for (size_t i = 0; i < N; ++i) { + *nonce(i) = Nonce::next(index(), *nonce(i), reserveCount, currentJob().isNicehash()); + } + } + else { + for (size_t i = 0; i < N; ++i) { + *nonce(i) += 1; + } + } + } + + +private: + inline void save(const Job &job, uint32_t reserveCount) + { + m_index = job.poolId() == -1 ? 1 : 0; + const size_t size = job.size(); + m_jobs[index()] = job; + m_rounds[index()] = 0; + + for (size_t i = 0; i < N; ++i) { + memcpy(m_blobs[index()] + (i * size), job.blob(), size); + *nonce(i) = Nonce::next(index(), *nonce(i), reserveCount, job.isNicehash()); + } + } + + + alignas(16) uint8_t m_blobs[2][Job::kMaxBlobSize * N]; + Job m_jobs[2]; + uint32_t m_rounds[2] = { 0, 0 }; + uint64_t m_sequence = 0; + uint8_t m_index = 0; +}; + + +template<> +inline uint32_t *xmrig::WorkerJob<1>::nonce(size_t) +{ + return reinterpret_cast(blob() + 39); +} + + +template<> +inline void xmrig::WorkerJob<1>::nextRound(uint32_t reserveCount) +{ + m_rounds[index()]++; + + if ((m_rounds[index()] % reserveCount) == 0) { + *nonce() = Nonce::next(index(), *nonce(), reserveCount, currentJob().isNicehash()); + } + else { + *nonce() += 1; + } +} + + +template<> +inline void xmrig::WorkerJob<1>::save(const Job &job, uint32_t reserveCount) +{ + m_index = job.poolId() == -1 ? 1 : 0; + m_jobs[index()] = job; + m_rounds[index()] = 0; + + memcpy(blob(), job.blob(), job.size()); + *nonce() = Nonce::next(index(), *nonce(), reserveCount, currentJob().isNicehash()); +} + + +} // namespace xmrig + + +#endif /* XMRIG_WORKERJOB_H */ diff --git a/src/crypto/common/Nonce.cpp b/src/crypto/common/Nonce.cpp new file mode 100644 index 00000000..6670308a --- /dev/null +++ b/src/crypto/common/Nonce.cpp @@ -0,0 +1,83 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#include + + +#include "crypto/common/Nonce.h" + + +namespace xmrig { + + +std::atomic Nonce::m_sequence; +uint32_t Nonce::m_nonces[2] = { 0, 0 }; + + +static uv_mutex_t mutex; +static Nonce nonce; + + +} // namespace xmrig + + +xmrig::Nonce::Nonce() +{ + m_sequence = 1; + + uv_mutex_init(&mutex); +} + + +uint32_t xmrig::Nonce::next(uint8_t index, uint32_t nonce, uint32_t reserveCount, bool nicehash) +{ + uint32_t next; + + uv_mutex_lock(&mutex); + + if (nicehash) { + next = (nonce & 0xFF000000) | m_nonces[index]; + } + else { + next = m_nonces[index]; + } + + m_nonces[index] += reserveCount; + + uv_mutex_unlock(&mutex); + + return next; +} + + +void xmrig::Nonce::reset(uint8_t index) +{ + uv_mutex_lock(&mutex); + + m_nonces[index] = 0; + m_sequence++; + + uv_mutex_unlock(&mutex); +} diff --git a/src/crypto/common/Nonce.h b/src/crypto/common/Nonce.h new file mode 100644 index 00000000..ea843bc9 --- /dev/null +++ b/src/crypto/common/Nonce.h @@ -0,0 +1,57 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_NONCE_H +#define XMRIG_NONCE_H + + +#include + + +namespace xmrig { + + +class Nonce +{ +public: + Nonce(); + + static inline bool isOutdated(uint64_t sequence) { return m_sequence.load(std::memory_order_relaxed) != sequence; } + static inline uint64_t sequence() { return m_sequence.load(std::memory_order_relaxed); } + static inline void stop() { m_sequence = 0; } + static inline void touch() { m_sequence++; } + + static uint32_t next(uint8_t index, uint32_t nonce, uint32_t reserveCount, bool nicehash); + static void reset(uint8_t index); + +private: + static uint32_t m_nonces[2]; + static std::atomic m_sequence; +}; + + +} // namespace xmrig + + +#endif /* XMRIG_NONCE_H */ diff --git a/src/workers/MultiWorker.cpp b/src/workers/MultiWorker.cpp index 684d92f9..daae9230 100644 --- a/src/workers/MultiWorker.cpp +++ b/src/workers/MultiWorker.cpp @@ -28,6 +28,7 @@ #include "crypto/cn/CryptoNight_test.h" +#include "crypto/common/Nonce.h" #include "crypto/rx/Rx.h" #include "crypto/rx/RxVm.h" #include "net/JobResults.h" @@ -36,6 +37,13 @@ #include "workers/Workers.h" +namespace xmrig { + +static constexpr uint32_t kReserveCount = 4096; + +} // namespace xmrig + + template xmrig::MultiWorker::MultiWorker(ThreadHandle *handle) : Worker(handle) @@ -62,7 +70,7 @@ template void xmrig::MultiWorker::allocateRandomX_VM() { if (!m_vm) { - RxDataset *dataset = Rx::dataset(m_state.job.seedHash(), m_state.job.algorithm()); + RxDataset *dataset = Rx::dataset(m_job.currentJob().seedHash(), m_job.currentJob().algorithm()); m_vm = new RxVm(dataset, true, m_thread->isSoftAES()); } } @@ -131,44 +139,45 @@ bool xmrig::MultiWorker::selfTest() template void xmrig::MultiWorker::start() { - while (Workers::sequence() > 0) { + while (Nonce::sequence() > 0) { if (Workers::isPaused()) { do { std::this_thread::sleep_for(std::chrono::milliseconds(200)); } while (Workers::isPaused()); - if (Workers::sequence() == 0) { + if (Nonce::sequence() == 0) { break; } consumeJob(); } - while (!Workers::isOutdated(m_sequence)) { + while (!Nonce::isOutdated(m_job.sequence())) { if ((m_count & 0x7) == 0) { storeStats(); } + const Job &job = m_job.currentJob(); + # ifdef XMRIG_ALGO_RANDOMX - if (m_state.job.algorithm().family() == Algorithm::RANDOM_X) { + if (job.algorithm().family() == Algorithm::RANDOM_X) { allocateRandomX_VM(); - randomx_calculate_hash(m_vm->get(), m_state.blob, m_state.job.size(), m_hash); + randomx_calculate_hash(m_vm->get(), m_job.blob(), job.size(), m_hash); } else # endif { - m_thread->fn(m_state.job.algorithm())(m_state.blob, m_state.job.size(), m_hash, m_ctx, m_state.job.height()); + m_thread->fn(job.algorithm())(m_job.blob(), job.size(), m_hash, m_ctx, job.height()); } for (size_t i = 0; i < N; ++i) { - if (*reinterpret_cast(m_hash + (i * 32) + 24) < m_state.job.target()) { - JobResults::submit(JobResult(m_state.job.poolId(), m_state.job.id(), m_state.job.clientId(), *nonce(i), m_hash + (i * 32), m_state.job.diff(), m_state.job.algorithm())); + if (*reinterpret_cast(m_hash + (i * 32) + 24) < job.target()) { + JobResults::submit(JobResult(job.poolId(), job.id(), job.clientId(), *m_job.nonce(i), m_hash + (i * 32), job.diff(), job.algorithm())); } - - *nonce(i) += 1; } + m_job.nextRound(kReserveCount); m_count += N; std::this_thread::yield(); @@ -179,18 +188,6 @@ void xmrig::MultiWorker::start() } -template -bool xmrig::MultiWorker::resume(const xmrig::Job &job) -{ - if (m_state.job.poolId() == -1 && job.poolId() >= 0 && job.id() == m_pausedState.job.id()) { - m_state = m_pausedState; - return true; - } - - return false; -} - - template bool xmrig::MultiWorker::verify(const Algorithm &algorithm, const uint8_t *referenceValue) { @@ -215,10 +212,10 @@ bool xmrig::MultiWorker::verify2(const Algorithm &algorithm, const uint8_t *r for (size_t i = 0; i < (sizeof(cn_r_test_input) / sizeof(cn_r_test_input[0])); ++i) { const size_t size = cn_r_test_input[i].size; for (size_t k = 0; k < N; ++k) { - memcpy(m_state.blob + (k * size), cn_r_test_input[i].data, size); + memcpy(m_job.blob() + (k * size), cn_r_test_input[i].data, size); } - func(m_state.blob, size, m_hash, m_ctx, cn_r_test_input[i].height); + func(m_job.blob(), size, m_hash, m_ctx, cn_r_test_input[i].height); for (size_t k = 0; k < N; ++k) { if (memcmp(m_hash + k * 32, referenceValue + i * 32, sizeof m_hash / N) != 0) { @@ -258,46 +255,7 @@ bool MultiWorker<1>::verify2(const Algorithm &algorithm, const uint8_t *referenc template void xmrig::MultiWorker::consumeJob() { - Job job = Workers::job(); - m_sequence = Workers::sequence(); - if (m_state.job == job) { - return; - } - - save(job); - - if (resume(job)) { - return; - } - - m_state.job = job; - - const size_t size = m_state.job.size(); - memcpy(m_state.blob, m_state.job.blob(), m_state.job.size()); - - if (N > 1) { - for (size_t i = 1; i < N; ++i) { - memcpy(m_state.blob + (i * size), m_state.blob, size); - } - } - - for (size_t i = 0; i < N; ++i) { - if (m_state.job.isNicehash()) { - *nonce(i) = (*nonce(i) & 0xff000000U) + (0xffffffU / m_totalWays * (m_offset + i)); - } - else { - *nonce(i) = 0xffffffffU / m_totalWays * (m_offset + i); - } - } -} - - -template -void xmrig::MultiWorker::save(const Job &job) -{ - if (job.poolId() == -1 && m_state.job.poolId() >= 0) { - m_pausedState = m_state; - } + m_job.add(Workers::job(), Nonce::sequence(), kReserveCount); } diff --git a/src/workers/MultiWorker.h b/src/workers/MultiWorker.h index 0502ad84..2bcb2333 100644 --- a/src/workers/MultiWorker.h +++ b/src/workers/MultiWorker.h @@ -28,6 +28,7 @@ #include "base/net/stratum/Job.h" +#include "core/WorkerJob.h" #include "Mem.h" #include "net/JobResult.h" #include "workers/Worker.h" @@ -55,29 +56,15 @@ private: void allocateRandomX_VM(); # endif - bool resume(const Job &job); bool verify(const Algorithm &algorithm, const uint8_t *referenceValue); bool verify2(const Algorithm &algorithm, const uint8_t *referenceValue); void consumeJob(); - void save(const Job &job); - - inline uint32_t *nonce(size_t index) - { - return reinterpret_cast(m_state.blob + (index * m_state.job.size()) + 39); - } - - struct State - { - alignas(16) uint8_t blob[Job::kMaxBlobSize * N]; - Job job; - }; - cryptonight_ctx *m_ctx[N]; - State m_pausedState; - State m_state; uint8_t m_hash[N * 32]; + WorkerJob m_job; + # ifdef XMRIG_ALGO_RANDOMX RxVm *m_vm = nullptr; # endif diff --git a/src/workers/Worker.cpp b/src/workers/Worker.cpp index 4f69d905..0c61b3cb 100644 --- a/src/workers/Worker.cpp +++ b/src/workers/Worker.cpp @@ -5,7 +5,9 @@ * Copyright 2014-2016 Wolf9466 * Copyright 2016 Jay D Dee * Copyright 2017-2018 XMR-Stak , - * Copyright 2016-2018 XMRig , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -38,7 +40,6 @@ Worker::Worker(ThreadHandle *handle) : m_hashCount(0), m_timestamp(0), m_count(0), - m_sequence(0), m_thread(static_cast(handle->config())) { if (xmrig::Cpu::info()->threads() > 1 && m_thread->affinity() != -1L) { diff --git a/src/workers/Worker.h b/src/workers/Worker.h index 3d40257d..13e437d3 100644 --- a/src/workers/Worker.h +++ b/src/workers/Worker.h @@ -5,7 +5,9 @@ * Copyright 2014-2016 Wolf9466 * Copyright 2016 Jay D Dee * Copyright 2017-2018 XMR-Stak , - * Copyright 2016-2018 XMRig , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -61,7 +63,6 @@ protected: std::atomic m_hashCount; std::atomic m_timestamp; uint64_t m_count; - uint64_t m_sequence; xmrig::CpuThreadLegacy *m_thread; }; diff --git a/src/workers/Workers.cpp b/src/workers/Workers.cpp index 58cccd9e..1ed27c40 100644 --- a/src/workers/Workers.cpp +++ b/src/workers/Workers.cpp @@ -33,6 +33,7 @@ #include "base/tools/Handle.h" #include "core/config/Config.h" #include "core/Controller.h" +#include "crypto/common/Nonce.h" #include "crypto/rx/RxAlgo.h" #include "crypto/rx/RxCache.h" #include "crypto/rx/RxDataset.h" @@ -51,7 +52,6 @@ Hashrate *Workers::m_hashrate = nullptr; xmrig::Job Workers::m_job; Workers::LaunchStatus Workers::m_status; std::atomic Workers::m_paused; -std::atomic Workers::m_sequence; std::vector Workers::m_workers; uint64_t Workers::m_ticks = 0; uv_mutex_t Workers::m_mutex; @@ -90,6 +90,15 @@ size_t Workers::threads() } +void Workers::pause() +{ + m_active = false; + m_paused = 1; + + xmrig::Nonce::touch(); +} + + void Workers::printHashrate(bool detail) { assert(m_controller != nullptr); @@ -134,7 +143,7 @@ void Workers::setEnabled(bool enabled) } m_paused = enabled ? 0 : 1; - m_sequence++; + xmrig::Nonce::touch(); } @@ -146,6 +155,9 @@ void Workers::setJob(const xmrig::Job &job, bool donate) if (donate) { m_job.setPoolId(-1); } + + xmrig::Nonce::reset(donate ? 1 : 0); + uv_rwlock_wrunlock(&m_rwlock); m_active = true; @@ -153,7 +165,6 @@ void Workers::setJob(const xmrig::Job &job, bool donate) return; } - m_sequence++; m_paused = 0; } @@ -183,7 +194,6 @@ void Workers::start(xmrig::Controller *controller) uv_mutex_init(&m_mutex); uv_rwlock_init(&m_rwlock); - m_sequence = 1; m_paused = 1; m_timer = new uv_timer_t; @@ -208,7 +218,8 @@ void Workers::stop() m_hashrate->stop(); m_paused = 0; - m_sequence = 0; + + xmrig::Nonce::stop(); for (size_t i = 0; i < m_workers.size(); ++i) { m_workers[i]->join(); diff --git a/src/workers/Workers.h b/src/workers/Workers.h index 8619f973..83777d0d 100644 --- a/src/workers/Workers.h +++ b/src/workers/Workers.h @@ -53,21 +53,19 @@ namespace xmrig { class Workers { public: - static xmrig::Job job(); static size_t hugePages(); static size_t threads(); + static void pause(); static void printHashrate(bool detail); static void setEnabled(bool enabled); static void setJob(const xmrig::Job &job, bool donate); static void start(xmrig::Controller *controller); static void stop(); + static xmrig::Job job(); static inline bool isEnabled() { return m_enabled; } - static inline bool isOutdated(uint64_t sequence) { return m_sequence.load(std::memory_order_relaxed) != sequence; } static inline bool isPaused() { return m_paused.load(std::memory_order_relaxed) == 1; } static inline Hashrate *hashrate() { return m_hashrate; } - static inline uint64_t sequence() { return m_sequence.load(std::memory_order_relaxed); } - static inline void pause() { m_active = false; m_paused = 1; m_sequence++; } # ifdef XMRIG_FEATURE_API static void threadsSummary(rapidjson::Document &doc); @@ -103,7 +101,6 @@ private: static xmrig::Job m_job; static LaunchStatus m_status; static std::atomic m_paused; - static std::atomic m_sequence; static std::vector m_workers; static uint64_t m_ticks; static uv_mutex_t m_mutex; From be7ff62c48adcf2644a48c3c494132bd13a913f9 Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 12 Jul 2019 02:25:07 +0700 Subject: [PATCH 020/172] Removed no longer required code. --- src/workers/ThreadHandle.cpp | 4 +--- src/workers/ThreadHandle.h | 6 +----- src/workers/Worker.cpp | 2 -- src/workers/Worker.h | 2 -- src/workers/Workers.cpp | 5 +---- 5 files changed, 3 insertions(+), 16 deletions(-) diff --git a/src/workers/ThreadHandle.cpp b/src/workers/ThreadHandle.cpp index 43ff950c..ced5f326 100644 --- a/src/workers/ThreadHandle.cpp +++ b/src/workers/ThreadHandle.cpp @@ -26,10 +26,8 @@ #include "workers/ThreadHandle.h" -ThreadHandle::ThreadHandle(xmrig::IThread *config, uint32_t offset, size_t totalWays) : +ThreadHandle::ThreadHandle(xmrig::IThread *config) : m_worker(nullptr), - m_totalWays(totalWays), - m_offset(offset), m_config(config) { } diff --git a/src/workers/ThreadHandle.h b/src/workers/ThreadHandle.h index f3e09ce5..c32aabf0 100644 --- a/src/workers/ThreadHandle.h +++ b/src/workers/ThreadHandle.h @@ -40,21 +40,17 @@ class IWorker; class ThreadHandle { public: - ThreadHandle(xmrig::IThread *config, uint32_t offset, size_t totalWays); + ThreadHandle(xmrig::IThread *config); void join(); void start(void (*callback) (void *)); inline IWorker *worker() const { return m_worker; } inline size_t threadId() const { return m_config->index(); } - inline size_t totalWays() const { return m_totalWays; } - inline uint32_t offset() const { return m_offset; } inline void setWorker(IWorker *worker) { assert(worker != nullptr); m_worker = worker; } inline xmrig::IThread *config() const { return m_config; } private: IWorker *m_worker; - size_t m_totalWays; - uint32_t m_offset; uv_thread_t m_thread; xmrig::IThread *m_config; }; diff --git a/src/workers/Worker.cpp b/src/workers/Worker.cpp index 0c61b3cb..3a9b693d 100644 --- a/src/workers/Worker.cpp +++ b/src/workers/Worker.cpp @@ -35,8 +35,6 @@ Worker::Worker(ThreadHandle *handle) : m_id(handle->threadId()), - m_totalWays(handle->totalWays()), - m_offset(handle->offset()), m_hashCount(0), m_timestamp(0), m_count(0), diff --git a/src/workers/Worker.h b/src/workers/Worker.h index 13e437d3..997771b0 100644 --- a/src/workers/Worker.h +++ b/src/workers/Worker.h @@ -57,8 +57,6 @@ protected: void storeStats(); const size_t m_id; - const size_t m_totalWays; - const uint32_t m_offset; MemInfo m_memory; std::atomic m_hashCount; std::atomic m_timestamp; diff --git a/src/workers/Workers.cpp b/src/workers/Workers.cpp index 1ed27c40..72d9a1d1 100644 --- a/src/workers/Workers.cpp +++ b/src/workers/Workers.cpp @@ -200,11 +200,8 @@ void Workers::start(xmrig::Controller *controller) uv_timer_init(uv_default_loop(), m_timer); uv_timer_start(m_timer, Workers::onTick, 500, 500); - uint32_t offset = 0; - for (xmrig::IThread *thread : threads) { - ThreadHandle *handle = new ThreadHandle(thread, offset, m_status.ways); - offset += thread->multiway(); + ThreadHandle *handle = new ThreadHandle(thread); m_workers.push_back(handle); handle->start(Workers::onReady); From 4643742d13c08d2e95934cf68223976011f655fe Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 13 Jul 2019 00:49:17 +0700 Subject: [PATCH 021/172] Refactoring --- src/base/net/stratum/Client.cpp | 4 +-- src/base/net/stratum/DaemonClient.cpp | 2 +- src/base/net/stratum/Job.cpp | 13 +------ src/base/net/stratum/Job.h | 18 +++++----- src/core/WorkerJob.h | 6 ++-- src/net/JobResult.h | 50 ++++++++++----------------- src/net/Network.cpp | 2 +- src/workers/MultiWorker.cpp | 2 +- src/workers/Workers.cpp | 6 ++-- 9 files changed, 39 insertions(+), 64 deletions(-) diff --git a/src/base/net/stratum/Client.cpp b/src/base/net/stratum/Client.cpp index 63123720..c1519573 100644 --- a/src/base/net/stratum/Client.cpp +++ b/src/base/net/stratum/Client.cpp @@ -162,7 +162,7 @@ int64_t xmrig::Client::submit(const JobResult &result) Buffer::toHex(reinterpret_cast(&result.nonce), 4, nonce); nonce[8] = '\0'; - Buffer::toHex(result.result, 32, data); + Buffer::toHex(result.result(), 32, data); data[64] = '\0'; # endif @@ -313,7 +313,7 @@ bool xmrig::Client::parseJob(const rapidjson::Value ¶ms, int *code) return false; } - Job job(m_id, has(), m_pool.algorithm(), m_rpcId); + Job job(has(), m_pool.algorithm(), m_rpcId); if (!job.setId(params["job_id"].GetString())) { *code = 3; diff --git a/src/base/net/stratum/DaemonClient.cpp b/src/base/net/stratum/DaemonClient.cpp index 70cc9151..0c141c7d 100644 --- a/src/base/net/stratum/DaemonClient.cpp +++ b/src/base/net/stratum/DaemonClient.cpp @@ -212,7 +212,7 @@ bool xmrig::DaemonClient::isOutdated(uint64_t height, const char *hash) const bool xmrig::DaemonClient::parseJob(const rapidjson::Value ¶ms, int *code) { - Job job(m_id, false, m_pool.algorithm(), String()); + Job job(false, m_pool.algorithm(), String()); String blocktemplate = Json::getString(params, kBlocktemplateBlob); if (blocktemplate.isNull() || !job.setBlob(Json::getString(params, "blockhashing_blob"))) { diff --git a/src/base/net/stratum/Job.cpp b/src/base/net/stratum/Job.cpp index 7e846b3b..a383bbf7 100644 --- a/src/base/net/stratum/Job.cpp +++ b/src/base/net/stratum/Job.cpp @@ -34,27 +34,16 @@ xmrig::Job::Job() : - m_nicehash(false), - m_poolId(-2), - m_size(0), - m_diff(0), - m_height(0), - m_target(0), m_blob(), m_seedHash() { } -xmrig::Job::Job(int poolId, bool nicehash, const Algorithm &algorithm, const String &clientId) : +xmrig::Job::Job(bool nicehash, const Algorithm &algorithm, const String &clientId) : m_algorithm(algorithm), m_nicehash(nicehash), - m_poolId(poolId), - m_size(0), m_clientId(clientId), - m_diff(0), - m_height(0), - m_target(0), m_blob(), m_seedHash() { diff --git a/src/base/net/stratum/Job.h b/src/base/net/stratum/Job.h index c229f95c..06d1be79 100644 --- a/src/base/net/stratum/Job.h +++ b/src/base/net/stratum/Job.h @@ -47,7 +47,7 @@ public: static constexpr const size_t kMaxBlobSize = 128; Job(); - Job(int poolId, bool nicehash, const Algorithm &algorithm, const String &clientId); + Job(bool nicehash, const Algorithm &algorithm, const String &clientId); ~Job(); bool isEqual(const Job &other) const; @@ -65,18 +65,18 @@ public: inline const uint32_t *nonce() const { return reinterpret_cast(m_blob + 39); } inline const uint8_t *blob() const { return m_blob; } inline const uint8_t *seedHash() const { return m_seedHash; } - inline int poolId() const { return m_poolId; } inline size_t size() const { return m_size; } inline uint32_t *nonce() { return reinterpret_cast(m_blob + 39); } inline uint64_t diff() const { return m_diff; } inline uint64_t height() const { return m_height; } inline uint64_t target() const { return m_target; } inline uint8_t fixedByte() const { return *(m_blob + 42); } + inline uint8_t index() const { return m_index; } inline void reset() { m_size = 0; m_diff = 0; } inline void setAlgorithm(const char *algo) { m_algorithm = algo; } inline void setClientId(const String &id) { m_clientId = id; } inline void setHeight(uint64_t height) { m_height = height; } - inline void setPoolId(int poolId) { m_poolId = poolId; } + inline void setIndex(uint8_t index) { m_index = index; } # ifdef XMRIG_PROXY_PROJECT inline char *rawBlob() { return m_rawBlob; } @@ -93,15 +93,15 @@ public: private: Algorithm m_algorithm; - bool m_nicehash; - int m_poolId; - size_t m_size; + bool m_nicehash = false; + size_t m_size = 0; String m_clientId; String m_id; - uint64_t m_diff; - uint64_t m_height; - uint64_t m_target; + uint64_t m_diff = 0; + uint64_t m_height = 0; + uint64_t m_target = 0; uint8_t m_blob[kMaxBlobSize]; + uint8_t m_index = 0; uint8_t m_seedHash[32]; # ifdef XMRIG_PROXY_PROJECT diff --git a/src/core/WorkerJob.h b/src/core/WorkerJob.h index 004c5533..7b598ee6 100644 --- a/src/core/WorkerJob.h +++ b/src/core/WorkerJob.h @@ -56,7 +56,7 @@ public: return; } - if (index() == 1 && job.poolId() >= 0 && job == m_jobs[0]) { + if (index() == 1 && job.index() == 0 && job == m_jobs[0]) { return; } @@ -84,7 +84,7 @@ public: private: inline void save(const Job &job, uint32_t reserveCount) { - m_index = job.poolId() == -1 ? 1 : 0; + m_index = job.index(); const size_t size = job.size(); m_jobs[index()] = job; m_rounds[index()] = 0; @@ -128,7 +128,7 @@ inline void xmrig::WorkerJob<1>::nextRound(uint32_t reserveCount) template<> inline void xmrig::WorkerJob<1>::save(const Job &job, uint32_t reserveCount) { - m_index = job.poolId() == -1 ? 1 : 0; + m_index = job.index(); m_jobs[index()] = job; m_rounds[index()] = 0; diff --git a/src/net/JobResult.h b/src/net/JobResult.h index 9fe1238e..2c2fded5 100644 --- a/src/net/JobResult.h +++ b/src/net/JobResult.h @@ -41,43 +41,31 @@ namespace xmrig { class JobResult { public: - inline JobResult() : poolId(0), nonce(0), diff(0) {} - inline JobResult(int poolId, const String &jobId, const String &clientId, uint32_t nonce, const uint8_t *result, uint64_t diff, const Algorithm &algorithm) : - algorithm(algorithm), - poolId(poolId), - clientId(clientId), - jobId(jobId), + inline JobResult() {} + + inline JobResult(const Job &job, uint32_t nonce, const uint8_t *result) : + algorithm(job.algorithm()), + clientId(job.clientId()), + jobId(job.id()), nonce(nonce), - diff(diff) + diff(job.diff()), + index(job.index()) { - memcpy(this->result, result, sizeof(this->result)); + memcpy(m_result, result, sizeof(m_result)); } + inline const uint8_t *result() const { return m_result; } + inline uint64_t actualDiff() const { return Job::toDiff(reinterpret_cast(m_result)[3]); } - inline JobResult(const Job &job) : poolId(0), nonce(0), diff(0) - { - jobId = job.id(); - clientId = job.clientId(); - poolId = job.poolId(); - diff = job.diff(); - nonce = *job.nonce(); - algorithm = job.algorithm(); - } + const Algorithm algorithm; + const String clientId; + const String jobId; + const uint32_t nonce = 0; + const uint64_t diff = 0; + const uint8_t index = 0; - - inline uint64_t actualDiff() const - { - return Job::toDiff(reinterpret_cast(result)[3]); - } - - - Algorithm algorithm; - int poolId; - String clientId; - String jobId; - uint32_t nonce; - uint64_t diff; - uint8_t result[32]; +private: + uint8_t m_result[32]; }; diff --git a/src/net/Network.cpp b/src/net/Network.cpp index c08facb9..d40bebd1 100644 --- a/src/net/Network.cpp +++ b/src/net/Network.cpp @@ -144,7 +144,7 @@ void xmrig::Network::onJob(IStrategy *strategy, IClient *client, const Job &job) void xmrig::Network::onJobResult(const JobResult &result) { - if (result.poolId == -1 && m_donate) { + if (result.index == 1 && m_donate) { m_donate->submit(result); return; } diff --git a/src/workers/MultiWorker.cpp b/src/workers/MultiWorker.cpp index daae9230..1f06455a 100644 --- a/src/workers/MultiWorker.cpp +++ b/src/workers/MultiWorker.cpp @@ -173,7 +173,7 @@ void xmrig::MultiWorker::start() for (size_t i = 0; i < N; ++i) { if (*reinterpret_cast(m_hash + (i * 32) + 24) < job.target()) { - JobResults::submit(JobResult(job.poolId(), job.id(), job.clientId(), *m_job.nonce(i), m_hash + (i * 32), job.diff(), job.algorithm())); + JobResults::submit(JobResult(job, *m_job.nonce(i), m_hash + (i * 32))); } } diff --git a/src/workers/Workers.cpp b/src/workers/Workers.cpp index 72d9a1d1..53a8b712 100644 --- a/src/workers/Workers.cpp +++ b/src/workers/Workers.cpp @@ -150,11 +150,9 @@ void Workers::setEnabled(bool enabled) void Workers::setJob(const xmrig::Job &job, bool donate) { uv_rwlock_wrlock(&m_rwlock); - m_job = job; - if (donate) { - m_job.setPoolId(-1); - } + m_job = job; + m_job.setIndex(donate ? 1 : 0); xmrig::Nonce::reset(donate ? 1 : 0); From 8b3f2d8fff01b230b47f54dfcbd12ef66beab274 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 13 Jul 2019 16:48:14 +0700 Subject: [PATCH 022/172] Move Platform. --- CMakeLists.txt | 5 ----- src/App.cpp | 1 - src/api/v1/ApiRouter.cpp | 2 +- src/base/base.cmake | 17 +++++++++++++++-- src/base/kernel/Base.cpp | 2 +- src/{common => base/kernel}/Platform.cpp | 0 src/{common => base/kernel}/Platform.h | 9 +++++++++ src/{common => base/kernel}/Platform_mac.cpp | 0 src/{common => base/kernel}/Platform_unix.cpp | 0 src/{common => base/kernel}/Platform_win.cpp | 0 src/base/net/http/HttpClient.cpp | 2 +- .../net/stratum/strategies/FailoverStrategy.cpp | 2 +- .../stratum/strategies/SinglePoolStrategy.cpp | 2 +- src/core/Controller.cpp | 1 - src/net/strategies/DonateStrategy.cpp | 2 +- src/workers/Worker.cpp | 2 +- 16 files changed, 31 insertions(+), 16 deletions(-) rename src/{common => base/kernel}/Platform.cpp (100%) rename src/{common => base/kernel}/Platform.h (90%) rename src/{common => base/kernel}/Platform_mac.cpp (100%) rename src/{common => base/kernel}/Platform_unix.cpp (100%) rename src/{common => base/kernel}/Platform_win.cpp (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 13f787f7..a6adda76 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -27,7 +27,6 @@ set(HEADERS "${HEADERS_BACKEND}" src/api/interfaces/IApiListener.h src/App.h - src/common/Platform.h src/common/xmrig.h src/core/config/Config_default.h src/core/config/Config_platform.h @@ -88,7 +87,6 @@ set(SOURCES "${SOURCES_BASE_HTTP}" "${SOURCES_BACKEND}" src/App.cpp - src/common/Platform.cpp src/core/config/Config.cpp src/core/config/ConfigTransform.cpp src/core/Controller.cpp @@ -123,7 +121,6 @@ if (WIN32) "${SOURCES_OS}" res/app.rc src/App_win.cpp - src/common/Platform_win.cpp src/Mem_win.cpp src/crypto/common/VirtualMemory_win.cpp ) @@ -134,7 +131,6 @@ elseif (APPLE) set(SOURCES_OS "${SOURCES_OS}" src/App_unix.cpp - src/common/Platform_mac.cpp src/Mem_unix.cpp src/crypto/common/VirtualMemory_unix.cpp ) @@ -142,7 +138,6 @@ else() set(SOURCES_OS "${SOURCES_OS}" src/App_unix.cpp - src/common/Platform_unix.cpp src/Mem_unix.cpp src/crypto/common/VirtualMemory_unix.cpp ) diff --git a/src/App.cpp b/src/App.cpp index 6e42ac30..5b2178ac 100644 --- a/src/App.cpp +++ b/src/App.cpp @@ -34,7 +34,6 @@ #include "base/io/Console.h" #include "base/io/log/Log.h" #include "base/kernel/Signals.h" -#include "common/Platform.h" #include "core/config/Config.h" #include "core/Controller.h" #include "Mem.h" diff --git a/src/api/v1/ApiRouter.cpp b/src/api/v1/ApiRouter.cpp index ff1ef404..18d97fda 100644 --- a/src/api/v1/ApiRouter.cpp +++ b/src/api/v1/ApiRouter.cpp @@ -31,7 +31,7 @@ #include "api/v1/ApiRouter.h" #include "backend/cpu/Cpu.h" #include "base/kernel/Base.h" -#include "common/Platform.h" +#include "base/kernel/Platform.h" #include "core/config/Config.h" #include "interfaces/IThread.h" #include "rapidjson/document.h" diff --git a/src/base/base.cmake b/src/base/base.cmake index dcc10495..b25d9743 100644 --- a/src/base/base.cmake +++ b/src/base/base.cmake @@ -26,6 +26,7 @@ set(HEADERS_BASE src/base/kernel/interfaces/IStrategyListener.h src/base/kernel/interfaces/ITimerListener.h src/base/kernel/interfaces/IWatcherListener.h + src/base/kernel/Platform.h src/base/kernel/Process.h src/base/kernel/Signals.h src/base/net/dns/Dns.h @@ -63,6 +64,7 @@ set(SOURCES_BASE src/base/kernel/config/BaseConfig.cpp src/base/kernel/config/BaseTransform.cpp src/base/kernel/Entry.cpp + src/base/kernel/Platform.cpp src/base/kernel/Process.cpp src/base/kernel/Signals.cpp src/base/net/dns/Dns.cpp @@ -83,9 +85,20 @@ set(SOURCES_BASE if (WIN32) - set(SOURCES_OS src/base/io/json/Json_win.cpp) + set(SOURCES_OS + src/base/io/json/Json_win.cpp + src/base/kernel/Platform_win.cpp + ) +elseif (APPLE) + set(SOURCES_OS + src/base/io/json/Json_unix.cpp + src/base/kernel/Platform_mac.cpp + ) else() - set(SOURCES_OS src/base/io/json/Json_unix.cpp) + set(SOURCES_OS + src/base/io/json/Json_unix.cpp + src/base/kernel//Platform_unix.cpp + ) endif() diff --git a/src/base/kernel/Base.cpp b/src/base/kernel/Base.cpp index 031daed7..46f32684 100644 --- a/src/base/kernel/Base.cpp +++ b/src/base/kernel/Base.cpp @@ -35,8 +35,8 @@ #include "base/io/Watcher.h" #include "base/kernel/Base.h" #include "base/kernel/interfaces/IBaseListener.h" +#include "base/kernel/Platform.h" #include "base/kernel/Process.h" -#include "common/Platform.h" #include "core/config/Config.h" #include "core/config/ConfigTransform.h" diff --git a/src/common/Platform.cpp b/src/base/kernel/Platform.cpp similarity index 100% rename from src/common/Platform.cpp rename to src/base/kernel/Platform.cpp diff --git a/src/common/Platform.h b/src/base/kernel/Platform.h similarity index 90% rename from src/common/Platform.h rename to src/base/kernel/Platform.h index 85f08a2e..f3c2c719 100644 --- a/src/common/Platform.h +++ b/src/base/kernel/Platform.h @@ -35,6 +35,15 @@ class Platform { public: + static inline bool trySetThreadAffinity(int64_t cpu_id) + { + if (cpu_id < 0) { + return false; + } + + return setThreadAffinity(static_cast(cpu_id)); + } + static bool setThreadAffinity(uint64_t cpu_id); static uint32_t setTimerResolution(uint32_t resolution); static void init(const char *userAgent); diff --git a/src/common/Platform_mac.cpp b/src/base/kernel/Platform_mac.cpp similarity index 100% rename from src/common/Platform_mac.cpp rename to src/base/kernel/Platform_mac.cpp diff --git a/src/common/Platform_unix.cpp b/src/base/kernel/Platform_unix.cpp similarity index 100% rename from src/common/Platform_unix.cpp rename to src/base/kernel/Platform_unix.cpp diff --git a/src/common/Platform_win.cpp b/src/base/kernel/Platform_win.cpp similarity index 100% rename from src/common/Platform_win.cpp rename to src/base/kernel/Platform_win.cpp diff --git a/src/base/net/http/HttpClient.cpp b/src/base/net/http/HttpClient.cpp index 319bb4dd..113e2f13 100644 --- a/src/base/net/http/HttpClient.cpp +++ b/src/base/net/http/HttpClient.cpp @@ -29,10 +29,10 @@ #include "3rdparty/http-parser/http_parser.h" #include "base/io/log/Log.h" +#include "base/kernel/Platform.h" #include "base/net/dns/Dns.h" #include "base/net/http/HttpClient.h" #include "base/tools/Baton.h" -#include "common/Platform.h" namespace xmrig { diff --git a/src/base/net/stratum/strategies/FailoverStrategy.cpp b/src/base/net/stratum/strategies/FailoverStrategy.cpp index d5247229..9545e9e1 100644 --- a/src/base/net/stratum/strategies/FailoverStrategy.cpp +++ b/src/base/net/stratum/strategies/FailoverStrategy.cpp @@ -24,9 +24,9 @@ #include "base/kernel/interfaces/IStrategyListener.h" +#include "base/kernel/Platform.h" #include "base/net/stratum/Client.h" #include "base/net/stratum/strategies/FailoverStrategy.h" -#include "common/Platform.h" #ifdef XMRIG_FEATURE_HTTP diff --git a/src/base/net/stratum/strategies/SinglePoolStrategy.cpp b/src/base/net/stratum/strategies/SinglePoolStrategy.cpp index f432514e..6c6a6fc1 100644 --- a/src/base/net/stratum/strategies/SinglePoolStrategy.cpp +++ b/src/base/net/stratum/strategies/SinglePoolStrategy.cpp @@ -24,9 +24,9 @@ #include "base/kernel/interfaces/IStrategyListener.h" +#include "base/kernel/Platform.h" #include "base/net/stratum/Client.h" #include "base/net/stratum/strategies/SinglePoolStrategy.h" -#include "common/Platform.h" #ifdef XMRIG_FEATURE_HTTP diff --git a/src/core/Controller.cpp b/src/core/Controller.cpp index 8e2e03a1..81c67d7c 100644 --- a/src/core/Controller.cpp +++ b/src/core/Controller.cpp @@ -27,7 +27,6 @@ #include "backend/cpu/Cpu.h" -#include "common/Platform.h" #include "core/Controller.h" #include "net/Network.h" diff --git a/src/net/strategies/DonateStrategy.cpp b/src/net/strategies/DonateStrategy.cpp index 9669db9a..78c7acc5 100644 --- a/src/net/strategies/DonateStrategy.cpp +++ b/src/net/strategies/DonateStrategy.cpp @@ -26,13 +26,13 @@ #include +#include "base/kernel/Platform.h" #include "base/net/stratum/Client.h" #include "base/net/stratum/Job.h" #include "base/net/stratum/strategies/FailoverStrategy.h" #include "base/net/stratum/strategies/SinglePoolStrategy.h" #include "base/tools/Buffer.h" #include "base/tools/Timer.h" -#include "common/Platform.h" #include "common/xmrig.h" #include "core/config/Config.h" #include "core/Controller.h" diff --git a/src/workers/Worker.cpp b/src/workers/Worker.cpp index 3a9b693d..d85858af 100644 --- a/src/workers/Worker.cpp +++ b/src/workers/Worker.cpp @@ -27,7 +27,7 @@ #include "backend/cpu/Cpu.h" -#include "common/Platform.h" +#include "base/kernel/Platform.h" #include "workers/CpuThreadLegacy.h" #include "workers/ThreadHandle.h" #include "workers/Worker.h" From dc87ef60620b48ef9b3482583380850ff32bcc2d Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 13 Jul 2019 19:10:17 +0700 Subject: [PATCH 023/172] Removed xmrig.h. --- CMakeLists.txt | 1 - src/Mem_unix.cpp | 1 - src/Mem_win.cpp | 1 - src/base/kernel/config/BaseConfig.h | 1 - src/common/xmrig.h | 78 --------------------------- src/core/config/Config.cpp | 20 +++---- src/core/config/Config.h | 9 ++-- src/crypto/cn/CnHash.h | 16 +++++- src/crypto/cn/CryptoNight_monero.h | 1 - src/crypto/common/Assembly.h | 1 - src/net/strategies/DonateStrategy.cpp | 1 - src/workers/CpuThreadLegacy.cpp | 38 ++++++------- src/workers/CpuThreadLegacy.h | 11 ++-- 13 files changed, 53 insertions(+), 126 deletions(-) delete mode 100644 src/common/xmrig.h diff --git a/CMakeLists.txt b/CMakeLists.txt index a6adda76..48946c7e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -27,7 +27,6 @@ set(HEADERS "${HEADERS_BACKEND}" src/api/interfaces/IApiListener.h src/App.h - src/common/xmrig.h src/core/config/Config_default.h src/core/config/Config_platform.h src/core/config/Config.h diff --git a/src/Mem_unix.cpp b/src/Mem_unix.cpp index 9bdce0f5..4dc13e93 100644 --- a/src/Mem_unix.cpp +++ b/src/Mem_unix.cpp @@ -29,7 +29,6 @@ #include "base/io/log/Log.h" -#include "common/xmrig.h" #include "crypto/common/portable/mm_malloc.h" #include "crypto/common/VirtualMemory.h" #include "crypto/cn/CryptoNight.h" diff --git a/src/Mem_win.cpp b/src/Mem_win.cpp index 34460e9d..56b4521d 100644 --- a/src/Mem_win.cpp +++ b/src/Mem_win.cpp @@ -31,7 +31,6 @@ #include "base/io/log/Log.h" -#include "common/xmrig.h" #include "crypto/common/portable/mm_malloc.h" #include "crypto/common/VirtualMemory.h" #include "crypto/cn/CryptoNight.h" diff --git a/src/base/kernel/config/BaseConfig.h b/src/base/kernel/config/BaseConfig.h index 48d7c2cf..c5cf29a3 100644 --- a/src/base/kernel/config/BaseConfig.h +++ b/src/base/kernel/config/BaseConfig.h @@ -29,7 +29,6 @@ #include "base/kernel/interfaces/IConfig.h" #include "base/net/http/Http.h" #include "base/net/stratum/Pools.h" -#include "common/xmrig.h" struct option; diff --git a/src/common/xmrig.h b/src/common/xmrig.h deleted file mode 100644 index 169c4c1f..00000000 --- a/src/common/xmrig.h +++ /dev/null @@ -1,78 +0,0 @@ -/* XMRig - * Copyright 2010 Jeff Garzik - * Copyright 2012-2014 pooler - * Copyright 2014 Lucas Jones - * Copyright 2014-2016 Wolf9466 - * Copyright 2016 Jay D Dee - * Copyright 2017-2018 XMR-Stak , - * Copyright 2018-2019 SChernykh - * Copyright 2016-2019 XMRig , - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef XMRIG_XMRIG_H -#define XMRIG_XMRIG_H - - -namespace xmrig -{ - - -//--av=1 For CPUs with hardware AES. -//--av=2 Lower power mode (double hash) of 1. -//--av=3 Software AES implementation. -//--av=4 Lower power mode (double hash) of 3. -enum AlgoVariant { - AV_AUTO, // --av=0 Automatic mode. - AV_SINGLE, // --av=1 Single hash mode - AV_DOUBLE, // --av=2 Double hash mode - AV_SINGLE_SOFT, // --av=3 Single hash mode (Software AES) - AV_DOUBLE_SOFT, // --av=4 Double hash mode (Software AES) - AV_TRIPLE, // --av=5 Triple hash mode - AV_QUAD, // --av=6 Quard hash mode - AV_PENTA, // --av=7 Penta hash mode - AV_TRIPLE_SOFT, // --av=8 Triple hash mode (Software AES) - AV_QUAD_SOFT, // --av=9 Quard hash mode (Software AES) - AV_PENTA_SOFT, // --av=10 Penta hash mode (Software AES) - AV_MAX -}; - - -enum AlgoVerify { - VERIFY_HW_AES = 1, - VERIFY_SOFT_AES = 2 -}; - - -enum AesMode { - AES_AUTO, - AES_HW, - AES_SOFT -}; - - -enum OclVendor { - OCL_VENDOR_UNKNOWN = -2, - OCL_VENDOR_MANUAL = -1, - OCL_VENDOR_AMD = 0, - OCL_VENDOR_NVIDIA = 1, - OCL_VENDOR_INTEL = 2 -}; - - -} /* namespace xmrig */ - - -#endif /* XMRIG_XMRIG_H */ diff --git a/src/core/config/Config.cpp b/src/core/config/Config.cpp index 784e171c..02ef9c90 100644 --- a/src/core/config/Config.cpp +++ b/src/core/config/Config.cpp @@ -40,7 +40,7 @@ xmrig::Config::Config() : - m_algoVariant(AV_AUTO), + m_algoVariant(CnHash::AV_AUTO), m_shouldSave(false) { } @@ -131,7 +131,7 @@ bool xmrig::Config::finalize() return true; } - const AlgoVariant av = getAlgoVariant(); + const CnHash::AlgoVariant av = getAlgoVariant(); m_threads.mode = m_threads.count ? Simple : Automatic; const size_t size = CpuThreadLegacy::multiway(av) * CnAlgo<>::memory(algorithm) / 1024; // FIXME MEMORY @@ -158,8 +158,8 @@ bool xmrig::Config::finalize() void xmrig::Config::setAlgoVariant(int av) { - if (av >= AV_AUTO && av < AV_MAX) { - m_algoVariant = static_cast(av); + if (av >= CnHash::AV_AUTO && av < CnHash::AV_MAX) { + m_algoVariant = static_cast(av); } } @@ -192,7 +192,7 @@ void xmrig::Config::setThreads(const rapidjson::Value &threads) } -xmrig::AlgoVariant xmrig::Config::getAlgoVariant() const +xmrig::CnHash::AlgoVariant xmrig::Config::getAlgoVariant() const { # ifdef XMRIG_ALGO_CN_LITE // if (m_algorithm.algo() == xmrig::CRYPTONIGHT_LITE) { // FIXME @@ -200,8 +200,8 @@ xmrig::AlgoVariant xmrig::Config::getAlgoVariant() const // } # endif - if (m_algoVariant <= AV_AUTO || m_algoVariant >= AV_MAX) { - return Cpu::info()->hasAES() ? AV_SINGLE : AV_SINGLE_SOFT; + if (m_algoVariant <= CnHash::AV_AUTO || m_algoVariant >= CnHash::AV_MAX) { + return Cpu::info()->hasAES() ? CnHash::AV_SINGLE : CnHash::AV_SINGLE_SOFT; } // if (m_safe && !Cpu::info()->hasAES() && m_algoVariant <= AV_DOUBLE) { @@ -213,10 +213,10 @@ xmrig::AlgoVariant xmrig::Config::getAlgoVariant() const #ifdef XMRIG_ALGO_CN_LITE -xmrig::AlgoVariant xmrig::Config::getAlgoVariantLite() const +xmrig::CnHash::AlgoVariant xmrig::Config::getAlgoVariantLite() const { - if (m_algoVariant <= AV_AUTO || m_algoVariant >= AV_MAX) { - return Cpu::info()->hasAES() ? AV_DOUBLE : AV_DOUBLE_SOFT; + if (m_algoVariant <= CnHash::AV_AUTO || m_algoVariant >= CnHash::AV_MAX) { + return Cpu::info()->hasAES() ? CnHash::AV_DOUBLE : CnHash::AV_DOUBLE_SOFT; } // if (m_safe && !Cpu::info()->hasAES() && m_algoVariant <= AV_DOUBLE) { diff --git a/src/core/config/Config.h b/src/core/config/Config.h index 7b765892..aa547796 100644 --- a/src/core/config/Config.h +++ b/src/core/config/Config.h @@ -32,7 +32,6 @@ #include "backend/cpu/CpuConfig.h" #include "base/kernel/config/BaseConfig.h" -#include "common/xmrig.h" #include "rapidjson/fwd.h" #include "workers/CpuThreadLegacy.h" @@ -58,7 +57,7 @@ public: bool read(const IJsonReader &reader, const char *fileName) override; void getJSON(rapidjson::Document &doc) const override; - inline AlgoVariant algoVariant() const { return m_algoVariant; } + inline CnHash::AlgoVariant algoVariant() const { return m_algoVariant; } inline bool isShouldSave() const { return (m_shouldSave || m_upgrade || m_cpu.isShouldSave()) && isAutoSave(); } inline const CpuConfig &cpu() const { return m_cpu; } inline const std::vector &threads() const { return m_threads.list; } @@ -70,9 +69,9 @@ private: void setAlgoVariant(int av); void setThreads(const rapidjson::Value &threads); - AlgoVariant getAlgoVariant() const; + CnHash::AlgoVariant getAlgoVariant() const; # ifdef XMRIG_ALGO_CN_LITE - AlgoVariant getAlgoVariantLite() const; + CnHash::AlgoVariant getAlgoVariantLite() const; # endif struct Threads @@ -87,7 +86,7 @@ private: }; - AlgoVariant m_algoVariant; + CnHash::AlgoVariant m_algoVariant; bool m_shouldSave; CpuConfig m_cpu; Threads m_threads; diff --git a/src/crypto/cn/CnHash.h b/src/crypto/cn/CnHash.h index b57bff4c..fdfcc9f3 100644 --- a/src/crypto/cn/CnHash.h +++ b/src/crypto/cn/CnHash.h @@ -31,7 +31,6 @@ #include -#include "common/xmrig.h" #include "crypto/cn/CnAlgo.h" #include "crypto/common/Assembly.h" @@ -49,6 +48,21 @@ typedef void (*cn_mainloop_fun)(cryptonight_ctx **ctx); class CnHash { public: + enum AlgoVariant { + AV_AUTO, // --av=0 Automatic mode. + AV_SINGLE, // --av=1 Single hash mode + AV_DOUBLE, // --av=2 Double hash mode + AV_SINGLE_SOFT, // --av=3 Single hash mode (Software AES) + AV_DOUBLE_SOFT, // --av=4 Double hash mode (Software AES) + AV_TRIPLE, // --av=5 Triple hash mode + AV_QUAD, // --av=6 Quard hash mode + AV_PENTA, // --av=7 Penta hash mode + AV_TRIPLE_SOFT, // --av=8 Triple hash mode (Software AES) + AV_QUAD_SOFT, // --av=9 Quard hash mode (Software AES) + AV_PENTA_SOFT, // --av=10 Penta hash mode (Software AES) + AV_MAX + }; + CnHash(); cn_hash_fun fn(const Algorithm &algorithm, AlgoVariant av, Assembly::Id assembly) const; diff --git a/src/crypto/cn/CryptoNight_monero.h b/src/crypto/cn/CryptoNight_monero.h index 13948dcd..dc012bdc 100644 --- a/src/crypto/cn/CryptoNight_monero.h +++ b/src/crypto/cn/CryptoNight_monero.h @@ -178,7 +178,6 @@ #endif #endif -#include "common/xmrig.h" #include "crypto/cn/r/variant4_random_math.h" #define VARIANT4_RANDOM_MATH_INIT(part) \ diff --git a/src/crypto/common/Assembly.h b/src/crypto/common/Assembly.h index afd8a536..5ea29e11 100644 --- a/src/crypto/common/Assembly.h +++ b/src/crypto/common/Assembly.h @@ -26,7 +26,6 @@ #define XMRIG_ASSEMBLY_H -#include "common/xmrig.h" #include "rapidjson/fwd.h" diff --git a/src/net/strategies/DonateStrategy.cpp b/src/net/strategies/DonateStrategy.cpp index 78c7acc5..2d0a5b43 100644 --- a/src/net/strategies/DonateStrategy.cpp +++ b/src/net/strategies/DonateStrategy.cpp @@ -33,7 +33,6 @@ #include "base/net/stratum/strategies/SinglePoolStrategy.h" #include "base/tools/Buffer.h" #include "base/tools/Timer.h" -#include "common/xmrig.h" #include "core/config/Config.h" #include "core/Controller.h" #include "crypto/common/keccak.h" diff --git a/src/workers/CpuThreadLegacy.cpp b/src/workers/CpuThreadLegacy.cpp index df9b9904..b8e33839 100644 --- a/src/workers/CpuThreadLegacy.cpp +++ b/src/workers/CpuThreadLegacy.cpp @@ -38,7 +38,7 @@ static const xmrig::CnHash cnHash; -xmrig::CpuThreadLegacy::CpuThreadLegacy(size_t index, Algorithm algorithm, AlgoVariant av, Multiway multiway, int64_t affinity, int priority, bool softAES, bool prefetch, Assembly assembly) : +xmrig::CpuThreadLegacy::CpuThreadLegacy(size_t index, Algorithm algorithm, CnHash::AlgoVariant av, Multiway multiway, int64_t affinity, int priority, bool softAES, bool prefetch, Assembly assembly) : m_algorithm(algorithm), m_av(av), m_assembly(assembly), @@ -59,15 +59,15 @@ xmrig::cn_hash_fun xmrig::CpuThreadLegacy::fn(const Algorithm &algorithm) const -bool xmrig::CpuThreadLegacy::isSoftAES(AlgoVariant av) +bool xmrig::CpuThreadLegacy::isSoftAES(CnHash::AlgoVariant av) { - return av == AV_SINGLE_SOFT || av == AV_DOUBLE_SOFT || av > AV_PENTA; + return av == CnHash::AV_SINGLE_SOFT || av == CnHash::AV_DOUBLE_SOFT || av > CnHash::AV_PENTA; } -xmrig::CpuThreadLegacy *xmrig::CpuThreadLegacy::createFromAV(size_t index, const Algorithm &algorithm, AlgoVariant av, int64_t affinity, int priority, Assembly assembly) +xmrig::CpuThreadLegacy *xmrig::CpuThreadLegacy::createFromAV(size_t index, const Algorithm &algorithm, CnHash::AlgoVariant av, int64_t affinity, int priority, Assembly assembly) { - assert(av > AV_AUTO && av < AV_MAX); + assert(av > CnHash::AV_AUTO && av < CnHash::AV_MAX); int64_t cpuId = -1L; @@ -94,7 +94,7 @@ xmrig::CpuThreadLegacy *xmrig::CpuThreadLegacy::createFromAV(size_t index, const xmrig::CpuThreadLegacy *xmrig::CpuThreadLegacy::createFromData(size_t index, const Algorithm &algorithm, const CpuThreadLegacy::Data &data, int priority, bool softAES) { - int av = AV_AUTO; + int av = CnHash::AV_AUTO; const Multiway multiway = data.multiway; if (multiway <= DoubleWay) { @@ -104,9 +104,9 @@ xmrig::CpuThreadLegacy *xmrig::CpuThreadLegacy::createFromData(size_t index, con av = softAES ? (multiway + 5) : (multiway + 2); } - assert(av > AV_AUTO && av < AV_MAX); + assert(av > CnHash::AV_AUTO && av < CnHash::AV_MAX); - return new CpuThreadLegacy(index, algorithm, static_cast(av), multiway, data.affinity, priority, softAES, false, data.assembly); + return new CpuThreadLegacy(index, algorithm, static_cast(av), multiway, data.affinity, priority, softAES, false, data.assembly); } @@ -140,27 +140,27 @@ xmrig::CpuThreadLegacy::Data xmrig::CpuThreadLegacy::parse(const rapidjson::Valu } -xmrig::IThread::Multiway xmrig::CpuThreadLegacy::multiway(AlgoVariant av) +xmrig::IThread::Multiway xmrig::CpuThreadLegacy::multiway(CnHash::AlgoVariant av) { switch (av) { - case AV_SINGLE: - case AV_SINGLE_SOFT: + case CnHash::AV_SINGLE: + case CnHash::AV_SINGLE_SOFT: return SingleWay; - case AV_DOUBLE_SOFT: - case AV_DOUBLE: + case CnHash::AV_DOUBLE_SOFT: + case CnHash::AV_DOUBLE: return DoubleWay; - case AV_TRIPLE_SOFT: - case AV_TRIPLE: + case CnHash::AV_TRIPLE_SOFT: + case CnHash::AV_TRIPLE: return TripleWay; - case AV_QUAD_SOFT: - case AV_QUAD: + case CnHash::AV_QUAD_SOFT: + case CnHash::AV_QUAD: return QuadWay; - case AV_PENTA_SOFT: - case AV_PENTA: + case CnHash::AV_PENTA_SOFT: + case CnHash::AV_PENTA: return PentaWay; default: diff --git a/src/workers/CpuThreadLegacy.h b/src/workers/CpuThreadLegacy.h index ed69d8ac..4553295c 100644 --- a/src/workers/CpuThreadLegacy.h +++ b/src/workers/CpuThreadLegacy.h @@ -26,7 +26,6 @@ #define XMRIG_CPUTHREADLEGACY_H -#include "common/xmrig.h" #include "crypto/cn/CnHash.h" #include "interfaces/IThread.h" @@ -59,15 +58,15 @@ public: }; - CpuThreadLegacy(size_t index, Algorithm algorithm, AlgoVariant av, Multiway multiway, int64_t affinity, int priority, bool softAES, bool prefetch, Assembly assembly); + CpuThreadLegacy(size_t index, Algorithm algorithm, CnHash::AlgoVariant av, Multiway multiway, int64_t affinity, int priority, bool softAES, bool prefetch, Assembly assembly); cn_hash_fun fn(const Algorithm &algorithm) const; - static bool isSoftAES(AlgoVariant av); - static CpuThreadLegacy *createFromAV(size_t index, const Algorithm &algorithm, AlgoVariant av, int64_t affinity, int priority, Assembly assembly); + static bool isSoftAES(CnHash::AlgoVariant av); + static CpuThreadLegacy *createFromAV(size_t index, const Algorithm &algorithm, CnHash::AlgoVariant av, int64_t affinity, int priority, Assembly assembly); static CpuThreadLegacy *createFromData(size_t index, const Algorithm &algorithm, const CpuThreadLegacy::Data &data, int priority, bool softAES); static Data parse(const rapidjson::Value &object); - static Multiway multiway(AlgoVariant av); + static Multiway multiway(CnHash::AlgoVariant av); inline bool isPrefetch() const { return m_prefetch; } inline bool isSoftAES() const { return m_softAES; } @@ -92,7 +91,7 @@ protected: private: const Algorithm m_algorithm; - const AlgoVariant m_av; + const CnHash::AlgoVariant m_av; const Assembly m_assembly; const bool m_prefetch; const bool m_softAES; From ee434a57081b7c61b13b54ab49518a2fc5f23b0b Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 13 Jul 2019 22:15:53 +0700 Subject: [PATCH 024/172] Move files. --- CMakeLists.txt | 7 ----- src/api/v1/ApiRouter.cpp | 2 +- src/backend/backend.cmake | 9 +++--- src/backend/{ => common}/Threads.cpp | 2 +- src/backend/{ => common}/Threads.h | 0 src/{workers => backend/common}/Worker.cpp | 2 +- src/{workers => backend/common}/Worker.h | 2 +- src/{core => backend/common}/WorkerJob.h | 0 src/backend/common/common.cmake | 12 ++++++++ src/{ => backend/common}/interfaces/IThread.h | 0 src/{ => backend/common}/interfaces/IWorker.h | 0 src/backend/cpu/CpuConfig.h | 2 +- .../cpu/CpuWorker.cpp} | 30 +++++++++---------- .../MultiWorker.h => backend/cpu/CpuWorker.h} | 27 ++++++++++++----- src/backend/cpu/cpu.cmake | 12 ++++---- src/workers/CpuThreadLegacy.h | 2 +- src/workers/ThreadHandle.h | 2 +- src/workers/Workers.cpp | 16 ++++------ 18 files changed, 71 insertions(+), 56 deletions(-) rename src/backend/{ => common}/Threads.cpp (99%) rename src/backend/{ => common}/Threads.h (100%) rename src/{workers => backend/common}/Worker.cpp (98%) rename src/{workers => backend/common}/Worker.h (97%) rename src/{core => backend/common}/WorkerJob.h (100%) create mode 100644 src/backend/common/common.cmake rename src/{ => backend/common}/interfaces/IThread.h (100%) rename src/{ => backend/common}/interfaces/IWorker.h (100%) rename src/{workers/MultiWorker.cpp => backend/cpu/CpuWorker.cpp} (90%) rename src/{workers/MultiWorker.h => backend/cpu/CpuWorker.h} (78%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 48946c7e..8291f606 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -33,9 +33,6 @@ set(HEADERS src/core/config/ConfigTransform.h src/core/config/usage.h src/core/Controller.h - src/core/WorkerJob.h - src/interfaces/IThread.h - src/interfaces/IWorker.h src/Mem.h src/net/interfaces/IJobResultListener.h src/net/JobResult.h @@ -47,9 +44,7 @@ set(HEADERS src/version.h src/workers/CpuThreadLegacy.h src/workers/Hashrate.h - src/workers/MultiWorker.h src/workers/ThreadHandle.h - src/workers/Worker.h src/workers/Workers.h ) @@ -97,9 +92,7 @@ set(SOURCES src/Summary.cpp src/workers/CpuThreadLegacy.cpp src/workers/Hashrate.cpp - src/workers/MultiWorker.cpp src/workers/ThreadHandle.cpp - src/workers/Worker.cpp src/workers/Workers.cpp src/xmrig.cpp ) diff --git a/src/api/v1/ApiRouter.cpp b/src/api/v1/ApiRouter.cpp index 18d97fda..5ed94c4b 100644 --- a/src/api/v1/ApiRouter.cpp +++ b/src/api/v1/ApiRouter.cpp @@ -29,11 +29,11 @@ #include "api/interfaces/IApiRequest.h" #include "api/v1/ApiRouter.h" +#include "backend/common/interfaces/IThread.h" #include "backend/cpu/Cpu.h" #include "base/kernel/Base.h" #include "base/kernel/Platform.h" #include "core/config/Config.h" -#include "interfaces/IThread.h" #include "rapidjson/document.h" #include "version.h" #include "workers/Hashrate.h" diff --git a/src/backend/backend.cmake b/src/backend/backend.cmake index 750cc9cb..c37cf262 100644 --- a/src/backend/backend.cmake +++ b/src/backend/backend.cmake @@ -1,12 +1,13 @@ include (src/backend/cpu/cpu.cmake) +include (src/backend/common/common.cmake) set(HEADERS_BACKEND - "${HEADERS_CPU}" - src/backend/Threads.h + "${HEADERS_BACKEND_COMMON}" + "${HEADERS_BACKEND_CPU}" ) set(SOURCES_BACKEND - "${SOURCES_CPU}" - src/backend/Threads.cpp + "${SOURCES_BACKEND_COMMON}" + "${SOURCES_BACKEND_CPU}" ) diff --git a/src/backend/Threads.cpp b/src/backend/common/Threads.cpp similarity index 99% rename from src/backend/Threads.cpp rename to src/backend/common/Threads.cpp index 11e1ec15..4cb9d4c6 100644 --- a/src/backend/Threads.cpp +++ b/src/backend/common/Threads.cpp @@ -23,8 +23,8 @@ */ +#include "backend/common/Threads.h" #include "backend/cpu/CpuThread.h" -#include "backend/Threads.h" #include "rapidjson/document.h" diff --git a/src/backend/Threads.h b/src/backend/common/Threads.h similarity index 100% rename from src/backend/Threads.h rename to src/backend/common/Threads.h diff --git a/src/workers/Worker.cpp b/src/backend/common/Worker.cpp similarity index 98% rename from src/workers/Worker.cpp rename to src/backend/common/Worker.cpp index d85858af..f0457bbb 100644 --- a/src/workers/Worker.cpp +++ b/src/backend/common/Worker.cpp @@ -26,11 +26,11 @@ #include +#include "backend/common/Worker.h" #include "backend/cpu/Cpu.h" #include "base/kernel/Platform.h" #include "workers/CpuThreadLegacy.h" #include "workers/ThreadHandle.h" -#include "workers/Worker.h" Worker::Worker(ThreadHandle *handle) : diff --git a/src/workers/Worker.h b/src/backend/common/Worker.h similarity index 97% rename from src/workers/Worker.h rename to src/backend/common/Worker.h index 997771b0..3e1d202f 100644 --- a/src/workers/Worker.h +++ b/src/backend/common/Worker.h @@ -31,7 +31,7 @@ #include -#include "interfaces/IWorker.h" +#include "backend/common/interfaces/IWorker.h" #include "Mem.h" diff --git a/src/core/WorkerJob.h b/src/backend/common/WorkerJob.h similarity index 100% rename from src/core/WorkerJob.h rename to src/backend/common/WorkerJob.h diff --git a/src/backend/common/common.cmake b/src/backend/common/common.cmake new file mode 100644 index 00000000..e7caa593 --- /dev/null +++ b/src/backend/common/common.cmake @@ -0,0 +1,12 @@ +set(HEADERS_BACKEND_COMMON + src/backend/common/interfaces/IThread.h + src/backend/common/interfaces/IWorker.h + src/backend/common/Threads.h + src/backend/common/Worker.h + src/backend/common/WorkerJob.h + ) + +set(SOURCES_BACKEND_COMMON + src/backend/common/Threads.cpp + src/backend/common/Worker.cpp + ) diff --git a/src/interfaces/IThread.h b/src/backend/common/interfaces/IThread.h similarity index 100% rename from src/interfaces/IThread.h rename to src/backend/common/interfaces/IThread.h diff --git a/src/interfaces/IWorker.h b/src/backend/common/interfaces/IWorker.h similarity index 100% rename from src/interfaces/IWorker.h rename to src/backend/common/interfaces/IWorker.h diff --git a/src/backend/cpu/CpuConfig.h b/src/backend/cpu/CpuConfig.h index 66da3a5f..88222ab1 100644 --- a/src/backend/cpu/CpuConfig.h +++ b/src/backend/cpu/CpuConfig.h @@ -26,8 +26,8 @@ #define XMRIG_CPUCONFIG_H +#include "backend/common/Threads.h" #include "backend/cpu/CpuThread.h" -#include "backend/Threads.h" #include "crypto/common/Assembly.h" diff --git a/src/workers/MultiWorker.cpp b/src/backend/cpu/CpuWorker.cpp similarity index 90% rename from src/workers/MultiWorker.cpp rename to src/backend/cpu/CpuWorker.cpp index 1f06455a..fc98048d 100644 --- a/src/workers/MultiWorker.cpp +++ b/src/backend/cpu/CpuWorker.cpp @@ -27,13 +27,13 @@ #include +#include "backend/cpu/CpuWorker.h" #include "crypto/cn/CryptoNight_test.h" #include "crypto/common/Nonce.h" #include "crypto/rx/Rx.h" #include "crypto/rx/RxVm.h" #include "net/JobResults.h" #include "workers/CpuThreadLegacy.h" -#include "workers/MultiWorker.h" #include "workers/Workers.h" @@ -45,7 +45,7 @@ static constexpr uint32_t kReserveCount = 4096; template -xmrig::MultiWorker::MultiWorker(ThreadHandle *handle) +xmrig::CpuWorker::CpuWorker(ThreadHandle *handle) : Worker(handle) { if (m_thread->algorithm().family() != Algorithm::RANDOM_X) { @@ -55,7 +55,7 @@ xmrig::MultiWorker::MultiWorker(ThreadHandle *handle) template -xmrig::MultiWorker::~MultiWorker() +xmrig::CpuWorker::~CpuWorker() { Mem::release(m_ctx, N, m_memory); @@ -67,7 +67,7 @@ xmrig::MultiWorker::~MultiWorker() #ifdef XMRIG_ALGO_RANDOMX template -void xmrig::MultiWorker::allocateRandomX_VM() +void xmrig::CpuWorker::allocateRandomX_VM() { if (!m_vm) { RxDataset *dataset = Rx::dataset(m_job.currentJob().seedHash(), m_job.currentJob().algorithm()); @@ -78,7 +78,7 @@ void xmrig::MultiWorker::allocateRandomX_VM() template -bool xmrig::MultiWorker::selfTest() +bool xmrig::CpuWorker::selfTest() { if (m_thread->algorithm().family() == Algorithm::CN) { const bool rc = verify(Algorithm::CN_0, test_output_v0) && @@ -137,7 +137,7 @@ bool xmrig::MultiWorker::selfTest() template -void xmrig::MultiWorker::start() +void xmrig::CpuWorker::start() { while (Nonce::sequence() > 0) { if (Workers::isPaused()) { @@ -189,7 +189,7 @@ void xmrig::MultiWorker::start() template -bool xmrig::MultiWorker::verify(const Algorithm &algorithm, const uint8_t *referenceValue) +bool xmrig::CpuWorker::verify(const Algorithm &algorithm, const uint8_t *referenceValue) { cn_hash_fun func = m_thread->fn(algorithm); if (!func) { @@ -202,7 +202,7 @@ bool xmrig::MultiWorker::verify(const Algorithm &algorithm, const uint8_t *re template -bool xmrig::MultiWorker::verify2(const Algorithm &algorithm, const uint8_t *referenceValue) +bool xmrig::CpuWorker::verify2(const Algorithm &algorithm, const uint8_t *referenceValue) { cn_hash_fun func = m_thread->fn(algorithm); if (!func) { @@ -231,7 +231,7 @@ bool xmrig::MultiWorker::verify2(const Algorithm &algorithm, const uint8_t *r namespace xmrig { template<> -bool MultiWorker<1>::verify2(const Algorithm &algorithm, const uint8_t *referenceValue) +bool CpuWorker<1>::verify2(const Algorithm &algorithm, const uint8_t *referenceValue) { cn_hash_fun func = m_thread->fn(algorithm); if (!func) { @@ -253,7 +253,7 @@ bool MultiWorker<1>::verify2(const Algorithm &algorithm, const uint8_t *referenc template -void xmrig::MultiWorker::consumeJob() +void xmrig::CpuWorker::consumeJob() { m_job.add(Workers::job(), Nonce::sequence(), kReserveCount); } @@ -261,11 +261,11 @@ void xmrig::MultiWorker::consumeJob() namespace xmrig { -template class MultiWorker<1>; -template class MultiWorker<2>; -template class MultiWorker<3>; -template class MultiWorker<4>; -template class MultiWorker<5>; +template class CpuWorker<1>; +template class CpuWorker<2>; +template class CpuWorker<3>; +template class CpuWorker<4>; +template class CpuWorker<5>; } // namespace xmrig diff --git a/src/workers/MultiWorker.h b/src/backend/cpu/CpuWorker.h similarity index 78% rename from src/workers/MultiWorker.h rename to src/backend/cpu/CpuWorker.h index 2bcb2333..7e878b54 100644 --- a/src/workers/MultiWorker.h +++ b/src/backend/cpu/CpuWorker.h @@ -23,15 +23,15 @@ * along with this program. If not, see . */ -#ifndef XMRIG_MULTIWORKER_H -#define XMRIG_MULTIWORKER_H +#ifndef XMRIG_CPUWORKER_H +#define XMRIG_CPUWORKER_H +#include "backend/common/WorkerJob.h" #include "base/net/stratum/Job.h" -#include "core/WorkerJob.h" #include "Mem.h" #include "net/JobResult.h" -#include "workers/Worker.h" +#include "backend/common/Worker.h" namespace xmrig { @@ -41,11 +41,11 @@ class RxVm; template -class MultiWorker : public Worker +class CpuWorker : public Worker { public: - MultiWorker(ThreadHandle *handle); - ~MultiWorker(); + CpuWorker(ThreadHandle *handle); + ~CpuWorker() override; protected: bool selfTest() override; @@ -71,7 +71,18 @@ private: }; +template<> +bool CpuWorker<1>::verify2(const Algorithm &algorithm, const uint8_t *referenceValue); + + +extern template class CpuWorker<1>; +extern template class CpuWorker<2>; +extern template class CpuWorker<3>; +extern template class CpuWorker<4>; +extern template class CpuWorker<5>; + + } // namespace xmrig -#endif /* XMRIG_MULTIWORKER_H */ +#endif /* XMRIG_CPUWORKER_H */ diff --git a/src/backend/cpu/cpu.cmake b/src/backend/cpu/cpu.cmake index df9b7cea..3e15a9fd 100644 --- a/src/backend/cpu/cpu.cmake +++ b/src/backend/cpu/cpu.cmake @@ -1,14 +1,16 @@ -set(HEADERS_CPU - src/backend/cpu/Cpu.h - src/backend/cpu/CpuConfig.h +set(HEADERS_BACKEND_CPU + src/backend/cpu/Cpu.h + src/backend/cpu/CpuConfig.h src/backend/cpu/CpuThread.h - src/backend/cpu/interfaces/ICpuInfo.h + src/backend/cpu/CpuWorker.h + src/backend/cpu/interfaces/ICpuInfo.h ) -set(SOURCES_CPU +set(SOURCES_BACKEND_CPU src/backend/cpu/Cpu.cpp src/backend/cpu/CpuConfig.cpp src/backend/cpu/CpuThread.cpp + src/backend/cpu/CpuWorker.cpp ) diff --git a/src/workers/CpuThreadLegacy.h b/src/workers/CpuThreadLegacy.h index 4553295c..b803a8c4 100644 --- a/src/workers/CpuThreadLegacy.h +++ b/src/workers/CpuThreadLegacy.h @@ -26,8 +26,8 @@ #define XMRIG_CPUTHREADLEGACY_H +#include "backend/common/interfaces/IThread.h" #include "crypto/cn/CnHash.h" -#include "interfaces/IThread.h" struct cryptonight_ctx; diff --git a/src/workers/ThreadHandle.h b/src/workers/ThreadHandle.h index c32aabf0..aedb4907 100644 --- a/src/workers/ThreadHandle.h +++ b/src/workers/ThreadHandle.h @@ -31,7 +31,7 @@ #include -#include "interfaces/IThread.h" +#include "backend/common/interfaces/IThread.h" class IWorker; diff --git a/src/workers/Workers.cpp b/src/workers/Workers.cpp index 53a8b712..b43546cd 100644 --- a/src/workers/Workers.cpp +++ b/src/workers/Workers.cpp @@ -28,6 +28,7 @@ #include "api/Api.h" +#include "backend/cpu/CpuWorker.h" #include "base/io/log/Log.h" #include "base/tools/Chrono.h" #include "base/tools/Handle.h" @@ -37,11 +38,9 @@ #include "crypto/rx/RxAlgo.h" #include "crypto/rx/RxCache.h" #include "crypto/rx/RxDataset.h" -#include "interfaces/IThread.h" #include "Mem.h" #include "rapidjson/document.h" #include "workers/Hashrate.h" -#include "workers/MultiWorker.h" #include "workers/ThreadHandle.h" #include "workers/Workers.h" @@ -250,26 +249,23 @@ void Workers::onReady(void *arg) switch (handle->config()->multiway()) { case 1: - worker = new xmrig::MultiWorker<1>(handle); + worker = new xmrig::CpuWorker<1>(handle); break; case 2: - worker = new xmrig::MultiWorker<2>(handle); + worker = new xmrig::CpuWorker<2>(handle); break; case 3: - worker = new xmrig::MultiWorker<3>(handle); + worker = new xmrig::CpuWorker<3>(handle); break; case 4: - worker = new xmrig::MultiWorker<4>(handle); + worker = new xmrig::CpuWorker<4>(handle); break; case 5: - worker = new xmrig::MultiWorker<5>(handle); - break; - - default: + worker = new xmrig::CpuWorker<5>(handle); break; } From dff59fabc241fc1612b614200a0db030000245cc Mon Sep 17 00:00:00 2001 From: XMRig Date: Sun, 14 Jul 2019 00:35:38 +0700 Subject: [PATCH 025/172] Removed CPU specific code from Worker class. --- src/backend/common/Worker.cpp | 27 ++++++++------------------- src/backend/common/Worker.h | 14 ++++---------- src/backend/cpu/CpuWorker.cpp | 6 ++++-- src/backend/cpu/CpuWorker.h | 9 ++++++++- src/base/tools/Chrono.h | 8 ++++++++ src/workers/Hashrate.cpp | 13 ++++--------- src/workers/Workers.cpp | 6 +++--- 7 files changed, 39 insertions(+), 44 deletions(-) diff --git a/src/backend/common/Worker.cpp b/src/backend/common/Worker.cpp index f0457bbb..98da61d4 100644 --- a/src/backend/common/Worker.cpp +++ b/src/backend/common/Worker.cpp @@ -23,36 +23,25 @@ * along with this program. If not, see . */ -#include - #include "backend/common/Worker.h" -#include "backend/cpu/Cpu.h" #include "base/kernel/Platform.h" -#include "workers/CpuThreadLegacy.h" -#include "workers/ThreadHandle.h" +#include "base/tools/Chrono.h" -Worker::Worker(ThreadHandle *handle) : - m_id(handle->threadId()), +xmrig::Worker::Worker(size_t id, int64_t affinity, int priority) : + m_id(id), m_hashCount(0), m_timestamp(0), - m_count(0), - m_thread(static_cast(handle->config())) + m_count(0) { - if (xmrig::Cpu::info()->threads() > 1 && m_thread->affinity() != -1L) { - Platform::setThreadAffinity(m_thread->affinity()); - } - - Platform::setThreadPriority(m_thread->priority()); + Platform::trySetThreadAffinity(affinity); + Platform::setThreadPriority(priority); } -void Worker::storeStats() +void xmrig::Worker::storeStats() { - using namespace std::chrono; - - const uint64_t timestamp = time_point_cast(high_resolution_clock::now()).time_since_epoch().count(); m_hashCount.store(m_count, std::memory_order_relaxed); - m_timestamp.store(timestamp, std::memory_order_relaxed); + m_timestamp.store(Chrono::highResolutionMSecs(), std::memory_order_relaxed); } diff --git a/src/backend/common/Worker.h b/src/backend/common/Worker.h index 3e1d202f..3223a60c 100644 --- a/src/backend/common/Worker.h +++ b/src/backend/common/Worker.h @@ -32,23 +32,16 @@ #include "backend/common/interfaces/IWorker.h" -#include "Mem.h" - - -class ThreadHandle; namespace xmrig { - class CpuThreadLegacy; -} class Worker : public IWorker { public: - Worker(ThreadHandle *handle); + Worker(size_t id, int64_t affinity, int priority); - inline const MemInfo &memory() const { return m_memory; } inline size_t id() const override { return m_id; } inline uint64_t hashCount() const override { return m_hashCount.load(std::memory_order_relaxed); } inline uint64_t timestamp() const override { return m_timestamp.load(std::memory_order_relaxed); } @@ -57,12 +50,13 @@ protected: void storeStats(); const size_t m_id; - MemInfo m_memory; std::atomic m_hashCount; std::atomic m_timestamp; uint64_t m_count; - xmrig::CpuThreadLegacy *m_thread; }; +} // namespace xmrig + + #endif /* XMRIG_WORKER_H */ diff --git a/src/backend/cpu/CpuWorker.cpp b/src/backend/cpu/CpuWorker.cpp index fc98048d..e8aa2e3e 100644 --- a/src/backend/cpu/CpuWorker.cpp +++ b/src/backend/cpu/CpuWorker.cpp @@ -34,6 +34,7 @@ #include "crypto/rx/RxVm.h" #include "net/JobResults.h" #include "workers/CpuThreadLegacy.h" +#include "workers/ThreadHandle.h" #include "workers/Workers.h" @@ -45,8 +46,9 @@ static constexpr uint32_t kReserveCount = 4096; template -xmrig::CpuWorker::CpuWorker(ThreadHandle *handle) - : Worker(handle) +xmrig::CpuWorker::CpuWorker(ThreadHandle *handle) : + Worker(handle->threadId(), handle->config()->affinity(), handle->config()->priority()), + m_thread(static_cast(handle->config())) { if (m_thread->algorithm().family() != Algorithm::RANDOM_X) { m_memory = Mem::create(m_ctx, m_thread->algorithm(), N); diff --git a/src/backend/cpu/CpuWorker.h b/src/backend/cpu/CpuWorker.h index 7e878b54..b9adf0f1 100644 --- a/src/backend/cpu/CpuWorker.h +++ b/src/backend/cpu/CpuWorker.h @@ -34,9 +34,13 @@ #include "backend/common/Worker.h" +class ThreadHandle; + + namespace xmrig { +class CpuThreadLegacy; class RxVm; @@ -47,6 +51,8 @@ public: CpuWorker(ThreadHandle *handle); ~CpuWorker() override; + inline const MemInfo &memory() const { return m_memory; } + protected: bool selfTest() override; void start() override; @@ -60,9 +66,10 @@ private: bool verify2(const Algorithm &algorithm, const uint8_t *referenceValue); void consumeJob(); + CpuThreadLegacy *m_thread; cryptonight_ctx *m_ctx[N]; + MemInfo m_memory; uint8_t m_hash[N * 32]; - WorkerJob m_job; # ifdef XMRIG_ALGO_RANDOMX diff --git a/src/base/tools/Chrono.h b/src/base/tools/Chrono.h index d3c14602..e464f361 100644 --- a/src/base/tools/Chrono.h +++ b/src/base/tools/Chrono.h @@ -35,6 +35,14 @@ namespace xmrig { class Chrono { public: + static inline uint64_t highResolutionMSecs() + { + using namespace std::chrono; + + return static_cast(time_point_cast(high_resolution_clock::now()).time_since_epoch().count()); + } + + static inline uint64_t steadyMSecs() { using namespace std::chrono; diff --git a/src/workers/Hashrate.cpp b/src/workers/Hashrate.cpp index 568817cd..0a683caa 100644 --- a/src/workers/Hashrate.cpp +++ b/src/workers/Hashrate.cpp @@ -24,13 +24,13 @@ #include -#include #include #include #include #include "base/io/log/Log.h" +#include "base/tools/Chrono.h" #include "base/tools/Handle.h" #include "core/config/Config.h" #include "core/Controller.h" @@ -98,9 +98,6 @@ double Hashrate::calc(size_t threadId, size_t ms) const return nan(""); } - using namespace std::chrono; - const uint64_t now = time_point_cast(high_resolution_clock::now()).time_since_epoch().count(); - uint64_t earliestHashCount = 0; uint64_t earliestStamp = 0; uint64_t lastestStamp = 0; @@ -119,7 +116,7 @@ double Hashrate::calc(size_t threadId, size_t ms) const lastestHashCnt = m_counts[threadId][idx]; } - if (now - m_timestamps[threadId][idx] > ms) { + if (xmrig::Chrono::highResolutionMSecs() - m_timestamps[threadId][idx] > ms) { haveFullSet = true; break; } @@ -136,10 +133,8 @@ double Hashrate::calc(size_t threadId, size_t ms) const return nan(""); } - double hashes, time; - hashes = (double) lastestHashCnt - earliestHashCount; - time = (double) lastestStamp - earliestStamp; - time /= 1000.0; + const double hashes = static_cast(lastestHashCnt - earliestHashCount); + const double time = static_cast(lastestStamp - earliestStamp) / 1000.0; return hashes / time; } diff --git a/src/workers/Workers.cpp b/src/workers/Workers.cpp index b43546cd..78954d8f 100644 --- a/src/workers/Workers.cpp +++ b/src/workers/Workers.cpp @@ -299,12 +299,12 @@ void Workers::onTick(uv_timer_t *) void Workers::start(IWorker *worker) { - const Worker *w = static_cast(worker); +// const Worker *w = static_cast(worker); uv_mutex_lock(&m_mutex); m_status.started++; - m_status.pages += w->memory().pages; - m_status.hugePages += w->memory().hugePages; +// m_status.pages += w->memory().pages; +// m_status.hugePages += w->memory().hugePages; if (m_status.started == m_status.threads) { const double percent = (double) m_status.hugePages / m_status.pages * 100.0; From 27f3008d791f53397281557bda3c696088a33ffd Mon Sep 17 00:00:00 2001 From: XMRig Date: Tue, 16 Jul 2019 22:10:50 +0700 Subject: [PATCH 026/172] Added initial support for new style threads launch method. --- CMakeLists.txt | 8 +- src/App.cpp | 17 +- src/api/v1/ApiRouter.cpp | 12 +- src/backend/common/Thread.h | 62 ++++ src/backend/common/WorkerJob.h | 1 - src/backend/common/Workers.cpp | 152 ++++++++ .../common/Workers.h} | 48 ++- src/backend/common/common.cmake | 4 + .../common/interfaces/IBackend.h} | 38 +- src/backend/common/interfaces/IWorker.h | 6 + src/backend/cpu/CpuBackend.cpp | 151 ++++++++ src/backend/cpu/CpuBackend.h | 61 ++++ src/backend/cpu/CpuLaunchData.cpp | 51 +++ src/backend/cpu/CpuLaunchData.h | 67 ++++ src/backend/cpu/CpuThread.h | 16 +- src/backend/cpu/CpuWorker.cpp | 56 +-- src/backend/cpu/CpuWorker.h | 17 +- src/backend/cpu/cpu.cmake | 4 + src/base/io/log/Log.h | 82 ++--- src/core/Controller.cpp | 19 +- src/core/Controller.h | 6 +- src/core/Miner.cpp | 215 ++++++++++++ src/core/Miner.h | 65 ++++ src/crypto/cn/CnHash.cpp | 9 +- src/crypto/cn/CnHash.h | 2 +- src/crypto/common/Nonce.cpp | 29 +- src/crypto/common/Nonce.h | 23 +- src/net/Network.cpp | 8 +- src/workers/CpuThreadLegacy.cpp | 6 +- src/workers/Workers.cpp | 330 ----------------- src/workers/WorkersLegacy.cpp | 331 ++++++++++++++++++ src/workers/{Workers.h => WorkersLegacy.h} | 38 +- 32 files changed, 1429 insertions(+), 505 deletions(-) create mode 100644 src/backend/common/Thread.h create mode 100644 src/backend/common/Workers.cpp rename src/{workers/ThreadHandle.h => backend/common/Workers.h} (64%) rename src/{workers/ThreadHandle.cpp => backend/common/interfaces/IBackend.h} (71%) create mode 100644 src/backend/cpu/CpuBackend.cpp create mode 100644 src/backend/cpu/CpuBackend.h create mode 100644 src/backend/cpu/CpuLaunchData.cpp create mode 100644 src/backend/cpu/CpuLaunchData.h create mode 100644 src/core/Miner.cpp create mode 100644 src/core/Miner.h delete mode 100644 src/workers/Workers.cpp create mode 100644 src/workers/WorkersLegacy.cpp rename src/workers/{Workers.h => WorkersLegacy.h} (76%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8291f606..8749a003 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -33,6 +33,7 @@ set(HEADERS src/core/config/ConfigTransform.h src/core/config/usage.h src/core/Controller.h + src/core/Miner.h src/Mem.h src/net/interfaces/IJobResultListener.h src/net/JobResult.h @@ -44,8 +45,7 @@ set(HEADERS src/version.h src/workers/CpuThreadLegacy.h src/workers/Hashrate.h - src/workers/ThreadHandle.h - src/workers/Workers.h + src/workers/WorkersLegacy.h ) set(HEADERS_CRYPTO @@ -84,6 +84,7 @@ set(SOURCES src/core/config/Config.cpp src/core/config/ConfigTransform.cpp src/core/Controller.cpp + src/core/Miner.cpp src/Mem.cpp src/net/JobResults.cpp src/net/Network.cpp @@ -92,8 +93,7 @@ set(SOURCES src/Summary.cpp src/workers/CpuThreadLegacy.cpp src/workers/Hashrate.cpp - src/workers/ThreadHandle.cpp - src/workers/Workers.cpp + src/workers/WorkersLegacy.cpp src/xmrig.cpp ) diff --git a/src/App.cpp b/src/App.cpp index 5b2178ac..d6c39595 100644 --- a/src/App.cpp +++ b/src/App.cpp @@ -36,11 +36,11 @@ #include "base/kernel/Signals.h" #include "core/config/Config.h" #include "core/Controller.h" +#include "core/Miner.h" #include "Mem.h" #include "net/Network.h" #include "Summary.h" #include "version.h" -#include "workers/Workers.h" xmrig::App::App(Process *process) : @@ -86,8 +86,6 @@ int xmrig::App::exec() return 0; } - Workers::start(m_controller); - m_controller->start(); const int r = uv_run(uv_default_loop(), UV_RUN_DEFAULT); @@ -102,23 +100,17 @@ void xmrig::App::onConsoleCommand(char command) switch (command) { case 'h': case 'H': - Workers::printHashrate(true); + m_controller->miner()->printHashrate(true); break; case 'p': case 'P': - if (Workers::isEnabled()) { - LOG_INFO(YELLOW_BOLD("paused") ", press " MAGENTA_BOLD("r") " to resume"); - Workers::setEnabled(false); - } + m_controller->miner()->setEnabled(false); break; case 'r': case 'R': - if (!Workers::isEnabled()) { - LOG_INFO(GREEN_BOLD("resumed")); - Workers::setEnabled(true); - } + m_controller->miner()->setEnabled(true); break; case 3: @@ -162,6 +154,5 @@ void xmrig::App::close() m_console->stop(); m_controller->stop(); - Workers::stop(); Log::destroy(); } diff --git a/src/api/v1/ApiRouter.cpp b/src/api/v1/ApiRouter.cpp index 5ed94c4b..9e609d13 100644 --- a/src/api/v1/ApiRouter.cpp +++ b/src/api/v1/ApiRouter.cpp @@ -37,7 +37,7 @@ #include "rapidjson/document.h" #include "version.h" #include "workers/Hashrate.h" -#include "workers/Workers.h" +#include "workers/WorkersLegacy.h" static inline rapidjson::Value normalize(double d) @@ -107,13 +107,13 @@ void xmrig::ApiRouter::getHashrate(rapidjson::Value &reply, rapidjson::Document Value total(kArrayType); Value threads(kArrayType); - const Hashrate *hr = Workers::hashrate(); + const Hashrate *hr = WorkersLegacy::hashrate(); total.PushBack(normalize(hr->calc(Hashrate::ShortInterval)), allocator); total.PushBack(normalize(hr->calc(Hashrate::MediumInterval)), allocator); total.PushBack(normalize(hr->calc(Hashrate::LargeInterval)), allocator); - for (size_t i = 0; i < Workers::threads(); i++) { + for (size_t i = 0; i < WorkersLegacy::threads(); i++) { Value thread(kArrayType); thread.PushBack(normalize(hr->calc(i, Hashrate::ShortInterval)), allocator); thread.PushBack(normalize(hr->calc(i, Hashrate::MediumInterval)), allocator); @@ -144,7 +144,7 @@ void xmrig::ApiRouter::getMiner(rapidjson::Value &reply, rapidjson::Document &do reply.AddMember("kind", APP_KIND, allocator); reply.AddMember("ua", StringRef(Platform::userAgent()), allocator); reply.AddMember("cpu", cpu, allocator); - reply.AddMember("hugepages", Workers::hugePages() > 0, allocator); + reply.AddMember("hugepages", WorkersLegacy::hugePages() > 0, allocator); reply.AddMember("donate_level", m_base->config()->pools().donateLevel(), allocator); } @@ -153,9 +153,9 @@ void xmrig::ApiRouter::getThreads(rapidjson::Value &reply, rapidjson::Document & { using namespace rapidjson; auto &allocator = doc.GetAllocator(); - const Hashrate *hr = Workers::hashrate(); + const Hashrate *hr = WorkersLegacy::hashrate(); - Workers::threadsSummary(doc); + WorkersLegacy::threadsSummary(doc); const std::vector &threads = m_base->config()->threads(); Value list(kArrayType); diff --git a/src/backend/common/Thread.h b/src/backend/common/Thread.h new file mode 100644 index 00000000..f1d174ec --- /dev/null +++ b/src/backend/common/Thread.h @@ -0,0 +1,62 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_THREAD_H +#define XMRIG_THREAD_H + + +#include + + +namespace xmrig { + + +class IWorker; + + +template +class Thread +{ +public: + inline Thread(size_t index, const T &config) : m_index(index), m_config(config) {} + inline ~Thread() { uv_thread_join(&m_thread); } + + inline const T &config() const { return m_config; } + inline IWorker *worker() const { return m_worker; } + inline size_t index() const { return m_index; } + inline void setWorker(IWorker *worker) { m_worker = worker; } + inline void start(void (*callback) (void *)) { uv_thread_create(&m_thread, callback, this); } + +private: + const size_t m_index = 0; + const T m_config; + IWorker *m_worker = nullptr; + uv_thread_t m_thread; +}; + + +} // namespace xmrig + + +#endif /* XMRIG_THREAD_H */ diff --git a/src/backend/common/WorkerJob.h b/src/backend/common/WorkerJob.h index 7b598ee6..c9a3d55c 100644 --- a/src/backend/common/WorkerJob.h +++ b/src/backend/common/WorkerJob.h @@ -26,7 +26,6 @@ #define XMRIG_WORKERJOB_H -#include #include diff --git a/src/backend/common/Workers.cpp b/src/backend/common/Workers.cpp new file mode 100644 index 00000000..987ca526 --- /dev/null +++ b/src/backend/common/Workers.cpp @@ -0,0 +1,152 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#include "backend/common/Workers.h" +#include "backend/cpu/CpuWorker.h" +#include "base/io/log/Log.h" + + +namespace xmrig { + + +class WorkersPrivate +{ +public: + inline WorkersPrivate() + { + + } + + + inline ~WorkersPrivate() + { + } +}; + + +} // namespace xmrig + + +template +xmrig::Workers::Workers() : + d_ptr(new WorkersPrivate()) +{ + +} + + +template +xmrig::Workers::~Workers() +{ + delete d_ptr; +} + + +template +void xmrig::Workers::add(const T &data) +{ + m_workers.push_back(new Thread(m_workers.size(), data)); +} + + +template +void xmrig::Workers::start() +{ + for (Thread *worker : m_workers) { + worker->start(Workers::onReady); + } +} + + +template +void xmrig::Workers::stop() +{ + Nonce::stop(T::backend()); + + for (Thread *worker : m_workers) { + delete worker; + } + + m_workers.clear(); + Nonce::touch(T::backend()); +} + + +template +void xmrig::Workers::onReady(void *arg) +{ + printf("ON READY\n"); +} + + +namespace xmrig { + + +template<> +void xmrig::Workers::onReady(void *arg) +{ + auto handle = static_cast* >(arg); + + IWorker *worker = nullptr; + + switch (handle->config().intensity) { + case 1: + worker = new CpuWorker<1>(handle->index(), handle->config()); + break; + + case 2: + worker = new CpuWorker<2>(handle->index(), handle->config()); + break; + + case 3: + worker = new CpuWorker<3>(handle->index(), handle->config()); + break; + + case 4: + worker = new CpuWorker<4>(handle->index(), handle->config()); + break; + + case 5: + worker = new CpuWorker<5>(handle->index(), handle->config()); + break; + } + + handle->setWorker(worker); + + if (!worker->selfTest()) { + LOG_ERR("thread %zu error: \"hash self-test failed\".", handle->worker()->id()); + + return; + } + + worker->start(); +} + + +template class Workers; + + +} // namespace xmrig diff --git a/src/workers/ThreadHandle.h b/src/backend/common/Workers.h similarity index 64% rename from src/workers/ThreadHandle.h rename to src/backend/common/Workers.h index aedb4907..25f81c5b 100644 --- a/src/workers/ThreadHandle.h +++ b/src/backend/common/Workers.h @@ -5,6 +5,7 @@ * Copyright 2014-2016 Wolf9466 * Copyright 2016 Jay D Dee * Copyright 2017-2018 XMR-Stak , + * Copyright 2018 Lee Clagett * Copyright 2018-2019 SChernykh * Copyright 2016-2019 XMRig , * @@ -22,38 +23,47 @@ * along with this program. If not, see . */ -#ifndef XMRIG_THREADHANDLE_H -#define XMRIG_THREADHANDLE_H +#ifndef XMRIG_WORKERS_H +#define XMRIG_WORKERS_H -#include -#include -#include +#include "backend/common/Thread.h" +#include "backend/cpu/CpuLaunchData.h" -#include "backend/common/interfaces/IThread.h" +namespace xmrig { -class IWorker; +class WorkersPrivate; -class ThreadHandle +template +class Workers { public: - ThreadHandle(xmrig::IThread *config); - void join(); - void start(void (*callback) (void *)); + Workers(); + ~Workers(); - inline IWorker *worker() const { return m_worker; } - inline size_t threadId() const { return m_config->index(); } - inline void setWorker(IWorker *worker) { assert(worker != nullptr); m_worker = worker; } - inline xmrig::IThread *config() const { return m_config; } + void add(const T &data); + void start(); + void stop(); private: - IWorker *m_worker; - uv_thread_t m_thread; - xmrig::IThread *m_config; + static void onReady(void *arg); + + std::vector *> m_workers; + WorkersPrivate *d_ptr; }; -#endif /* XMRIG_THREADHANDLE_H */ +template<> +void Workers::onReady(void *arg); + + +extern template class Workers; + + +} // namespace xmrig + + +#endif /* XMRIG_WORKERS_H */ diff --git a/src/backend/common/common.cmake b/src/backend/common/common.cmake index e7caa593..bb84af58 100644 --- a/src/backend/common/common.cmake +++ b/src/backend/common/common.cmake @@ -1,12 +1,16 @@ set(HEADERS_BACKEND_COMMON + src/backend/common/interfaces/IBackend.h src/backend/common/interfaces/IThread.h src/backend/common/interfaces/IWorker.h + src/backend/common/Thread.h src/backend/common/Threads.h src/backend/common/Worker.h + src/backend/common/Workers.h src/backend/common/WorkerJob.h ) set(SOURCES_BACKEND_COMMON src/backend/common/Threads.cpp src/backend/common/Worker.cpp + src/backend/common/Workers.cpp ) diff --git a/src/workers/ThreadHandle.cpp b/src/backend/common/interfaces/IBackend.h similarity index 71% rename from src/workers/ThreadHandle.cpp rename to src/backend/common/interfaces/IBackend.h index ced5f326..d6fe1695 100644 --- a/src/workers/ThreadHandle.cpp +++ b/src/backend/common/interfaces/IBackend.h @@ -22,24 +22,34 @@ * along with this program. If not, see . */ - -#include "workers/ThreadHandle.h" +#ifndef XMRIG_IBACKEND_H +#define XMRIG_IBACKEND_H -ThreadHandle::ThreadHandle(xmrig::IThread *config) : - m_worker(nullptr), - m_config(config) +#include + + +namespace xmrig { + + +class Job; +class String; + + +class IBackend { -} +public: + virtual ~IBackend() = default; + + virtual const String &profileName() const = 0; + virtual void printHashrate(bool details) = 0; + virtual void setJob(const Job &job) = 0; + virtual void stop() = 0; + virtual void tick(uint64_t ticks) = 0; +}; -void ThreadHandle::join() -{ - uv_thread_join(&m_thread); -} +} // namespace xmrig -void ThreadHandle::start(void (*callback) (void *)) -{ - uv_thread_create(&m_thread, callback, this); -} +#endif // XMRIG_IBACKEND_H diff --git a/src/backend/common/interfaces/IWorker.h b/src/backend/common/interfaces/IWorker.h index 83e9306e..de22de02 100644 --- a/src/backend/common/interfaces/IWorker.h +++ b/src/backend/common/interfaces/IWorker.h @@ -29,6 +29,9 @@ #include +namespace xmrig { + + class IWorker { public: @@ -42,4 +45,7 @@ public: }; +} // namespace xmrig + + #endif // XMRIG_IWORKER_H diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp new file mode 100644 index 00000000..a1ae5747 --- /dev/null +++ b/src/backend/cpu/CpuBackend.cpp @@ -0,0 +1,151 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#include "backend/common/Workers.h" +#include "backend/cpu/CpuBackend.h" +#include "base/net/stratum/Job.h" +#include "base/tools/String.h" +#include "core/config/Config.h" +#include "core/Controller.h" + + +#include "base/io/log/Log.h" + + +namespace xmrig { + + +extern template class Threads; + + +class CpuBackendPrivate +{ +public: + inline CpuBackendPrivate(const Miner *miner, Controller *controller) : + miner(miner), + controller(controller) + { + } + + + inline ~CpuBackendPrivate() + { + } + + + inline bool isReady(const Algorithm &nextAlgo) const + { + if (!algo.isValid()) { + return false; + } + + if (nextAlgo == algo) { + return true; + } + + const CpuThreads &nextThreads = controller->config()->cpu().threads().get(nextAlgo); + + return algo.memory() == nextAlgo.memory() + && threads.size() == nextThreads.size() + && std::equal(threads.begin(), threads.end(), nextThreads.begin()); + } + + + Algorithm algo; + const Miner *miner; + Controller *controller; + CpuThreads threads; + String profileName; + Workers workers; +}; + + +} // namespace xmrig + + +xmrig::CpuBackend::CpuBackend(const Miner *miner, Controller *controller) : + d_ptr(new CpuBackendPrivate(miner, controller)) +{ + +} + + +xmrig::CpuBackend::~CpuBackend() +{ + delete d_ptr; +} + + +const xmrig::String &xmrig::CpuBackend::profileName() const +{ + return d_ptr->profileName; +} + + +void xmrig::CpuBackend::printHashrate(bool details) +{ + +} + + +void xmrig::CpuBackend::setJob(const Job &job) +{ + LOG_WARN("PROFILE %s %zu", d_ptr->controller->config()->cpu().threads().profileName(job.algorithm()).data(), job.algorithm().memory()); + + if (d_ptr->isReady(job.algorithm())) { + return; + } + + LOG_INFO(GREEN_BOLD_S "INIT"); + + const CpuConfig &cpu = d_ptr->controller->config()->cpu(); + const Threads &threads = cpu.threads(); + + d_ptr->algo = job.algorithm(); + d_ptr->profileName = threads.profileName(job.algorithm()); + d_ptr->threads = threads.get(d_ptr->profileName); + + LOG_INFO(BLUE_BG_S " %zu ", d_ptr->threads.size()); + + d_ptr->workers.stop(); + + for (const CpuThread &thread : d_ptr->threads) { + d_ptr->workers.add(CpuLaunchData(d_ptr->miner, d_ptr->algo, cpu, thread)); + } + + d_ptr->workers.start(); +} + + +void xmrig::CpuBackend::stop() +{ + d_ptr->workers.stop(); +} + + +void xmrig::CpuBackend::tick(uint64_t ticks) +{ + +} diff --git a/src/backend/cpu/CpuBackend.h b/src/backend/cpu/CpuBackend.h new file mode 100644 index 00000000..d39ab38d --- /dev/null +++ b/src/backend/cpu/CpuBackend.h @@ -0,0 +1,61 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_CPUBACKEND_H +#define XMRIG_CPUBACKEND_H + + +#include "backend/common/interfaces/IBackend.h" + + +namespace xmrig { + + +class Controller; +class CpuBackendPrivate; +class Miner; + + +class CpuBackend : public IBackend +{ +public: + CpuBackend(const Miner *miner, Controller *controller); + ~CpuBackend() override; + +protected: + const String &profileName() const override; + void printHashrate(bool details) override; + void setJob(const Job &job) override; + void stop() override; + void tick(uint64_t ticks) override; + +private: + CpuBackendPrivate *d_ptr; +}; + + +} /* namespace xmrig */ + + +#endif /* XMRIG_CPUBACKEND_H */ diff --git a/src/backend/cpu/CpuLaunchData.cpp b/src/backend/cpu/CpuLaunchData.cpp new file mode 100644 index 00000000..68b8e7ae --- /dev/null +++ b/src/backend/cpu/CpuLaunchData.cpp @@ -0,0 +1,51 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#include "backend/cpu/CpuLaunchData.h" +#include "backend/cpu/CpuConfig.h" + + +xmrig::CpuLaunchData::CpuLaunchData(const Miner *miner, const Algorithm &algorithm, const CpuConfig &config, const CpuThread &thread) : + algorithm(algorithm), + assembly(config.assembly()), + hugePages(config.isHugePages()), + hwAES(config.isHwAES()), + intensity(thread.intensity()), + priority(config.priority()), + affinity(thread.affinity()), + miner(miner) +{ +} + + +xmrig::CnHash::AlgoVariant xmrig::CpuLaunchData::av() const +{ + if (intensity <= 2) { + return static_cast(!hwAES ? (intensity + 2) : intensity); + } + + return static_cast(!hwAES ? (intensity + 5) : (intensity + 2)); +} diff --git a/src/backend/cpu/CpuLaunchData.h b/src/backend/cpu/CpuLaunchData.h new file mode 100644 index 00000000..208a68b7 --- /dev/null +++ b/src/backend/cpu/CpuLaunchData.h @@ -0,0 +1,67 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_CPULAUNCHDATA_H +#define XMRIG_CPULAUNCHDATA_H + + +#include "crypto/cn/CnHash.h" +#include "crypto/common/Algorithm.h" +#include "crypto/common/Assembly.h" +#include "crypto/common/Nonce.h" + + +namespace xmrig { + + +class CpuConfig; +class CpuThread; +class Miner; + + +class CpuLaunchData +{ +public: + CpuLaunchData(const Miner *miner, const Algorithm &algorithm, const CpuConfig &config, const CpuThread &thread); + + CnHash::AlgoVariant av() const; + + inline constexpr static Nonce::Backend backend() { return Nonce::CPU; } + + const Algorithm algorithm; + const Assembly assembly; + const bool hugePages; + const bool hwAES; + const int intensity; + const int priority; + const int64_t affinity; + const Miner *miner; +}; + + +} // namespace xmrig + + +#endif /* XMRIG_CPULAUNCHDATA_H */ diff --git a/src/backend/cpu/CpuThread.h b/src/backend/cpu/CpuThread.h index 444b2709..adaffa68 100644 --- a/src/backend/cpu/CpuThread.h +++ b/src/backend/cpu/CpuThread.h @@ -38,19 +38,23 @@ namespace xmrig { class CpuThread { public: - inline constexpr CpuThread(int intensity = 1, int affinity = -1) : m_affinity(affinity), m_intensity(intensity) {} + inline constexpr CpuThread(int intensity = 1, int64_t affinity = -1) : m_intensity(intensity), m_affinity(affinity) {} CpuThread(const rapidjson::Value &value); - inline bool isValid() const { return m_intensity >= 1 && m_intensity <= 5; } - inline int affinity() const { return m_affinity; } - inline int intensity() const { return m_intensity; } + inline bool isEqual(const CpuThread &other) const { return other.m_affinity == m_affinity && other.m_intensity == m_intensity; } + inline bool isValid() const { return m_intensity >= 1 && m_intensity <= 5; } + inline int intensity() const { return m_intensity; } + inline int64_t affinity() const { return m_affinity; } + + inline bool operator!=(const CpuThread &other) const { return !isEqual(other); } + inline bool operator==(const CpuThread &other) const { return isEqual(other); } rapidjson::Value toJSON(rapidjson::Document &doc) const; private: - int m_affinity = -1; - int m_intensity = -1; + int m_intensity = -1; + int64_t m_affinity = -1; }; diff --git a/src/backend/cpu/CpuWorker.cpp b/src/backend/cpu/CpuWorker.cpp index e8aa2e3e..96466252 100644 --- a/src/backend/cpu/CpuWorker.cpp +++ b/src/backend/cpu/CpuWorker.cpp @@ -28,14 +28,17 @@ #include "backend/cpu/CpuWorker.h" +#include "core/Miner.h" #include "crypto/cn/CryptoNight_test.h" #include "crypto/common/Nonce.h" #include "crypto/rx/Rx.h" #include "crypto/rx/RxVm.h" #include "net/JobResults.h" -#include "workers/CpuThreadLegacy.h" -#include "workers/ThreadHandle.h" -#include "workers/Workers.h" + + +#ifdef XMRIG_ALGO_RANDOMX +# include "crypto/randomx/randomx.h" +#endif namespace xmrig { @@ -45,13 +48,18 @@ static constexpr uint32_t kReserveCount = 4096; } // namespace xmrig + template -xmrig::CpuWorker::CpuWorker(ThreadHandle *handle) : - Worker(handle->threadId(), handle->config()->affinity(), handle->config()->priority()), - m_thread(static_cast(handle->config())) +xmrig::CpuWorker::CpuWorker(size_t index, const CpuLaunchData &data) : + Worker(index, data.affinity, data.priority), + m_algorithm(data.algorithm), + m_assembly(data.assembly), + m_hwAES(data.hwAES), + m_av(data.av()), + m_miner(data.miner) { - if (m_thread->algorithm().family() != Algorithm::RANDOM_X) { - m_memory = Mem::create(m_ctx, m_thread->algorithm(), N); + if (m_algorithm.family() != Algorithm::RANDOM_X) { + m_memory = Mem::create(m_ctx, m_algorithm, N); } } @@ -73,7 +81,7 @@ void xmrig::CpuWorker::allocateRandomX_VM() { if (!m_vm) { RxDataset *dataset = Rx::dataset(m_job.currentJob().seedHash(), m_job.currentJob().algorithm()); - m_vm = new RxVm(dataset, true, m_thread->isSoftAES()); + m_vm = new RxVm(dataset, true, !m_hwAES); } } #endif @@ -82,7 +90,7 @@ void xmrig::CpuWorker::allocateRandomX_VM() template bool xmrig::CpuWorker::selfTest() { - if (m_thread->algorithm().family() == Algorithm::CN) { + if (m_algorithm.family() == Algorithm::CN) { const bool rc = verify(Algorithm::CN_0, test_output_v0) && verify(Algorithm::CN_1, test_output_v1) && verify(Algorithm::CN_2, test_output_v2) && @@ -108,14 +116,14 @@ bool xmrig::CpuWorker::selfTest() } # ifdef XMRIG_ALGO_CN_LITE - if (m_thread->algorithm().family() == Algorithm::CN_LITE) { + if (m_algorithm.family() == Algorithm::CN_LITE) { return verify(Algorithm::CN_LITE_0, test_output_v0_lite) && verify(Algorithm::CN_LITE_1, test_output_v1_lite); } # endif # ifdef XMRIG_ALGO_CN_HEAVY - if (m_thread->algorithm().family() == Algorithm::CN_HEAVY) { + if (m_algorithm.family() == Algorithm::CN_HEAVY) { return verify(Algorithm::CN_HEAVY_0, test_output_v0_heavy) && verify(Algorithm::CN_HEAVY_XHV, test_output_xhv_heavy) && verify(Algorithm::CN_HEAVY_TUBE, test_output_tube_heavy); @@ -123,13 +131,13 @@ bool xmrig::CpuWorker::selfTest() # endif # ifdef XMRIG_ALGO_CN_PICO - if (m_thread->algorithm().family() == Algorithm::CN_PICO) { + if (m_algorithm.family() == Algorithm::CN_PICO) { return verify(Algorithm::CN_PICO_0, test_output_pico_trtl); } # endif # ifdef XMRIG_ALGO_RANDOMX - if (m_thread->algorithm().family() == Algorithm::RANDOM_X) { + if (m_algorithm.family() == Algorithm::RANDOM_X) { return true; } # endif @@ -141,21 +149,21 @@ bool xmrig::CpuWorker::selfTest() template void xmrig::CpuWorker::start() { - while (Nonce::sequence() > 0) { - if (Workers::isPaused()) { + while (Nonce::sequence(Nonce::CPU) > 0) { + if (Nonce::isPaused()) { do { std::this_thread::sleep_for(std::chrono::milliseconds(200)); } - while (Workers::isPaused()); + while (Nonce::isPaused()); - if (Nonce::sequence() == 0) { + if (Nonce::sequence(Nonce::CPU) == 0) { break; } consumeJob(); } - while (!Nonce::isOutdated(m_job.sequence())) { + while (!Nonce::isOutdated(Nonce::CPU, m_job.sequence())) { if ((m_count & 0x7) == 0) { storeStats(); } @@ -170,7 +178,7 @@ void xmrig::CpuWorker::start() else # endif { - m_thread->fn(job.algorithm())(m_job.blob(), job.size(), m_hash, m_ctx, job.height()); + fn(job.algorithm())(m_job.blob(), job.size(), m_hash, m_ctx, job.height()); } for (size_t i = 0; i < N; ++i) { @@ -193,7 +201,7 @@ void xmrig::CpuWorker::start() template bool xmrig::CpuWorker::verify(const Algorithm &algorithm, const uint8_t *referenceValue) { - cn_hash_fun func = m_thread->fn(algorithm); + cn_hash_fun func = fn(algorithm); if (!func) { return false; } @@ -206,7 +214,7 @@ bool xmrig::CpuWorker::verify(const Algorithm &algorithm, const uint8_t *refe template bool xmrig::CpuWorker::verify2(const Algorithm &algorithm, const uint8_t *referenceValue) { - cn_hash_fun func = m_thread->fn(algorithm); + cn_hash_fun func = fn(algorithm); if (!func) { return false; } @@ -235,7 +243,7 @@ namespace xmrig { template<> bool CpuWorker<1>::verify2(const Algorithm &algorithm, const uint8_t *referenceValue) { - cn_hash_fun func = m_thread->fn(algorithm); + cn_hash_fun func = fn(algorithm); if (!func) { return false; } @@ -257,7 +265,7 @@ bool CpuWorker<1>::verify2(const Algorithm &algorithm, const uint8_t *referenceV template void xmrig::CpuWorker::consumeJob() { - m_job.add(Workers::job(), Nonce::sequence(), kReserveCount); + m_job.add(m_miner->job(), Nonce::sequence(Nonce::CPU), kReserveCount); } diff --git a/src/backend/cpu/CpuWorker.h b/src/backend/cpu/CpuWorker.h index b9adf0f1..c67d355b 100644 --- a/src/backend/cpu/CpuWorker.h +++ b/src/backend/cpu/CpuWorker.h @@ -27,20 +27,17 @@ #define XMRIG_CPUWORKER_H +#include "backend/common/Worker.h" #include "backend/common/WorkerJob.h" +#include "backend/cpu/CpuLaunchData.h" #include "base/net/stratum/Job.h" #include "Mem.h" #include "net/JobResult.h" -#include "backend/common/Worker.h" - - -class ThreadHandle; namespace xmrig { -class CpuThreadLegacy; class RxVm; @@ -48,7 +45,7 @@ template class CpuWorker : public Worker { public: - CpuWorker(ThreadHandle *handle); + CpuWorker(size_t index, const CpuLaunchData &data); ~CpuWorker() override; inline const MemInfo &memory() const { return m_memory; } @@ -58,6 +55,8 @@ protected: void start() override; private: + inline cn_hash_fun fn(const Algorithm &algorithm) const { return CnHash::fn(algorithm, m_av, m_assembly); } + # ifdef XMRIG_ALGO_RANDOMX void allocateRandomX_VM(); # endif @@ -66,7 +65,11 @@ private: bool verify2(const Algorithm &algorithm, const uint8_t *referenceValue); void consumeJob(); - CpuThreadLegacy *m_thread; + const Algorithm m_algorithm; + const Assembly m_assembly; + const bool m_hwAES; + const CnHash::AlgoVariant m_av; + const Miner *m_miner; cryptonight_ctx *m_ctx[N]; MemInfo m_memory; uint8_t m_hash[N * 32]; diff --git a/src/backend/cpu/cpu.cmake b/src/backend/cpu/cpu.cmake index 3e15a9fd..871debd3 100644 --- a/src/backend/cpu/cpu.cmake +++ b/src/backend/cpu/cpu.cmake @@ -1,6 +1,8 @@ set(HEADERS_BACKEND_CPU src/backend/cpu/Cpu.h + src/backend/cpu/CpuBackend.h src/backend/cpu/CpuConfig.h + src/backend/cpu/CpuLaunchData.cpp src/backend/cpu/CpuThread.h src/backend/cpu/CpuWorker.h src/backend/cpu/interfaces/ICpuInfo.h @@ -8,7 +10,9 @@ set(HEADERS_BACKEND_CPU set(SOURCES_BACKEND_CPU src/backend/cpu/Cpu.cpp + src/backend/cpu/CpuBackend.cpp src/backend/cpu/CpuConfig.cpp + src/backend/cpu/CpuLaunchData.h src/backend/cpu/CpuThread.cpp src/backend/cpu/CpuWorker.cpp ) diff --git a/src/base/io/log/Log.h b/src/base/io/log/Log.h index 962d1dba..078a8546 100644 --- a/src/base/io/log/Log.h +++ b/src/base/io/log/Log.h @@ -61,49 +61,53 @@ private: }; -#define CSI "\x1B[" // Control Sequence Introducer (ANSI spec name) -#define CLEAR CSI "0m" // all attributes off -#define BRIGHT_BLACK_S CSI "0;90m" // somewhat MD.GRAY -#define BLACK_S CSI "0;30m" -#define BLACK_BOLD_S CSI "1;30m" // another name for GRAY -#define RED_S CSI "0;31m" -#define RED_BOLD_S CSI "1;31m" -#define GREEN_S CSI "0;32m" -#define GREEN_BOLD_S CSI "1;32m" -#define YELLOW_S CSI "0;33m" -#define YELLOW_BOLD_S CSI "1;33m" -#define BLUE_S CSI "0;34m" -#define BLUE_BOLD_S CSI "1;34m" -#define MAGENTA_S CSI "0;35m" -#define MAGENTA_BOLD_S CSI "1;35m" -#define CYAN_S CSI "0;36m" -#define CYAN_BOLD_S CSI "1;36m" -#define WHITE_S CSI "0;37m" // another name for LT.GRAY -#define WHITE_BOLD_S CSI "1;37m" // actually white +#define CSI "\x1B[" // Control Sequence Introducer (ANSI spec name) +#define CLEAR CSI "0m" // all attributes off +#define BRIGHT_BLACK_S CSI "0;90m" // somewhat MD.GRAY +#define BLACK_S CSI "0;30m" +#define BLACK_BOLD_S CSI "1;30m" // another name for GRAY +#define RED_S CSI "0;31m" +#define RED_BOLD_S CSI "1;31m" +#define GREEN_S CSI "0;32m" +#define GREEN_BOLD_S CSI "1;32m" +#define YELLOW_S CSI "0;33m" +#define YELLOW_BOLD_S CSI "1;33m" +#define BLUE_S CSI "0;34m" +#define BLUE_BOLD_S CSI "1;34m" +#define MAGENTA_S CSI "0;35m" +#define MAGENTA_BOLD_S CSI "1;35m" +#define CYAN_S CSI "0;36m" +#define CYAN_BOLD_S CSI "1;36m" +#define WHITE_S CSI "0;37m" // another name for LT.GRAY +#define WHITE_BOLD_S CSI "1;37m" // actually white -#define BLUE_BG_S CSI "44m" -#define BLUE_BG_BOLD_S CSI "44;1m" +#define BLUE_BG_S CSI "44m" +#define BLUE_BG_BOLD_S CSI "44;1m" +#define MAGENTA_BG_S CSI "45m" +#define MAGENTA_BG_BOLD_S CSI "45;1m" //color wrappings -#define BLACK(x) BLACK_S x CLEAR -#define BLACK_BOLD(x) BLACK_BOLD_S x CLEAR -#define RED(x) RED_S x CLEAR -#define RED_BOLD(x) RED_BOLD_S x CLEAR -#define GREEN(x) GREEN_S x CLEAR -#define GREEN_BOLD(x) GREEN_BOLD_S x CLEAR -#define YELLOW(x) YELLOW_S x CLEAR -#define YELLOW_BOLD(x) YELLOW_BOLD_S x CLEAR -#define BLUE(x) BLUE_S x CLEAR -#define BLUE_BOLD(x) BLUE_BOLD_S x CLEAR -#define MAGENTA(x) MAGENTA_S x CLEAR -#define MAGENTA_BOLD(x) MAGENTA_BOLD_S x CLEAR -#define CYAN(x) CYAN_S x CLEAR -#define CYAN_BOLD(x) CYAN_BOLD_S x CLEAR -#define WHITE(x) WHITE_S x CLEAR -#define WHITE_BOLD(x) WHITE_BOLD_S x CLEAR +#define BLACK(x) BLACK_S x CLEAR +#define BLACK_BOLD(x) BLACK_BOLD_S x CLEAR +#define RED(x) RED_S x CLEAR +#define RED_BOLD(x) RED_BOLD_S x CLEAR +#define GREEN(x) GREEN_S x CLEAR +#define GREEN_BOLD(x) GREEN_BOLD_S x CLEAR +#define YELLOW(x) YELLOW_S x CLEAR +#define YELLOW_BOLD(x) YELLOW_BOLD_S x CLEAR +#define BLUE(x) BLUE_S x CLEAR +#define BLUE_BOLD(x) BLUE_BOLD_S x CLEAR +#define MAGENTA(x) MAGENTA_S x CLEAR +#define MAGENTA_BOLD(x) MAGENTA_BOLD_S x CLEAR +#define CYAN(x) CYAN_S x CLEAR +#define CYAN_BOLD(x) CYAN_BOLD_S x CLEAR +#define WHITE(x) WHITE_S x CLEAR +#define WHITE_BOLD(x) WHITE_BOLD_S x CLEAR -#define BLUE_BG(x) BLUE_BG_S x CLEAR -#define BLUE_BG_BOLD(x) BLUE_BG_BOLD_S x CLEAR +#define BLUE_BG(x) BLUE_BG_S x CLEAR +#define BLUE_BG_BOLD(x) BLUE_BG_BOLD_S x CLEAR +#define MAGENTA_BG(x) MAGENTA_BG_S x CLEAR +#define MAGENTA_BG_BOLD(x) MAGENTA_BG_BOLD_S x CLEAR #define LOG_EMERG(x, ...) xmrig::Log::print(xmrig::Log::EMERG, x, ##__VA_ARGS__) diff --git a/src/core/Controller.cpp b/src/core/Controller.cpp index 81c67d7c..54c9ee34 100644 --- a/src/core/Controller.cpp +++ b/src/core/Controller.cpp @@ -28,12 +28,12 @@ #include "backend/cpu/Cpu.h" #include "core/Controller.h" +#include "core/Miner.h" #include "net/Network.h" xmrig::Controller::Controller(Process *process) : - Base(process), - m_network(nullptr) + Base(process) { } @@ -68,6 +68,8 @@ void xmrig::Controller::start() { Base::start(); + m_miner = new Miner(this); + network()->connect(); } @@ -78,6 +80,19 @@ void xmrig::Controller::stop() delete m_network; m_network = nullptr; + + m_miner->stop(); + + delete m_miner; + m_miner = nullptr; +} + + +xmrig::Miner *xmrig::Controller::miner() const +{ + assert(m_miner != nullptr); + + return m_miner; } diff --git a/src/core/Controller.h b/src/core/Controller.h index 02f9ca92..da7ba368 100644 --- a/src/core/Controller.h +++ b/src/core/Controller.h @@ -32,6 +32,8 @@ namespace xmrig { +class Job; +class Miner; class Network; @@ -46,10 +48,12 @@ public: void start() override; void stop() override; + Miner *miner() const; Network *network() const; private: - Network *m_network; + Miner *m_miner = nullptr; + Network *m_network = nullptr; }; diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp new file mode 100644 index 00000000..1f819694 --- /dev/null +++ b/src/core/Miner.cpp @@ -0,0 +1,215 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#include + + +#include "backend/cpu/CpuBackend.h" +#include "base/io/log/Log.h" +#include "base/net/stratum/Job.h" +#include "base/tools/Timer.h" +#include "core/config/Config.h" +#include "core/Controller.h" +#include "core/Miner.h" +#include "crypto/common/Nonce.h" + +#include "base/tools/Chrono.h" + + +namespace xmrig { + + +class MinerPrivate +{ +public: + inline MinerPrivate(Controller *controller) : controller(controller) + { + uv_rwlock_init(&rwlock); + } + + + inline ~MinerPrivate() + { + uv_rwlock_destroy(&rwlock); + + delete timer; + + for (IBackend *backend : backends) { + delete backend; + } + } + + + inline void handleJobChange() + { + active = true; + if (enabled) { + Nonce::pause(false);; + } + + for (IBackend *backend : backends) { + backend->setJob(job); + } + + if (ticks == 0) { + ticks++; + timer->start(500, 500); + } + } + + + bool active = false; + bool enabled = true; + Controller *controller; + Job job; + std::vector backends; + Timer *timer = nullptr; + uint64_t ticks = 0; + uv_rwlock_t rwlock; +}; + + +} // namespace xmrig + + + +xmrig::Miner::Miner(Controller *controller) + : d_ptr(new MinerPrivate(controller)) +{ + d_ptr->timer = new Timer(this); + + d_ptr->backends.push_back(new CpuBackend(this, controller)); +} + + +xmrig::Miner::~Miner() +{ + delete d_ptr; +} + + +bool xmrig::Miner::isEnabled() const +{ + return d_ptr->enabled; +} + + +xmrig::Job xmrig::Miner::job() const +{ + uv_rwlock_rdlock(&d_ptr->rwlock); + Job job = d_ptr->job; + uv_rwlock_rdunlock(&d_ptr->rwlock); + + return job; +} + + +void xmrig::Miner::pause() +{ + d_ptr->active = false; + + Nonce::pause(true); + Nonce::touch(); +} + + +void xmrig::Miner::printHashrate(bool details) +{ + for (IBackend *backend : d_ptr->backends) { + backend->printHashrate(details); + } +} + + +void xmrig::Miner::setEnabled(bool enabled) +{ + if (d_ptr->enabled == enabled) { + return; + } + + d_ptr->enabled = enabled; + + if (enabled) { + LOG_INFO(GREEN_BOLD("resumed")); + } + else { + LOG_INFO(YELLOW_BOLD("paused") ", press " MAGENTA_BG_BOLD(" r ") " to resume"); + } + + if (!d_ptr->active) { + return; + } + + Nonce::pause(!enabled); + Nonce::touch(); +} + + +void xmrig::Miner::setJob(const Job &job, bool donate) +{ + uv_rwlock_wrlock(&d_ptr->rwlock); + + const uint8_t index = donate ? 1 : 0; + + d_ptr->job = job; + d_ptr->job.setIndex(index); + + Nonce::reset(index); + + uv_rwlock_wrunlock(&d_ptr->rwlock); + + d_ptr->handleJobChange(); +} + + +void xmrig::Miner::stop() +{ +// xmrig::Handle::close(m_timer); +// m_hashrate->stop(); + + Nonce::stop(); + +// for (size_t i = 0; i < m_workers.size(); ++i) { +// m_workers[i]->join(); +// } + + for (IBackend *backend : d_ptr->backends) { + backend->stop(); + } +} + + +void xmrig::Miner::onTimer(const Timer *) +{ + for (IBackend *backend : d_ptr->backends) { + backend->tick(d_ptr->ticks); + } + + if ((d_ptr->ticks % (d_ptr->controller->config()->printTime() * 2)) == 0) { + printHashrate(false); + } + + d_ptr->ticks++; +} diff --git a/src/core/Miner.h b/src/core/Miner.h new file mode 100644 index 00000000..e7904575 --- /dev/null +++ b/src/core/Miner.h @@ -0,0 +1,65 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_MINER_H +#define XMRIG_MINER_H + + +#include "base/kernel/interfaces/ITimerListener.h" + + +namespace xmrig { + + +class Controller; +class Job; +class MinerPrivate; + + +class Miner : public ITimerListener +{ +public: + Miner(Controller *controller); + ~Miner() override; + + bool isEnabled() const; + Job job() const; + void pause(); + void printHashrate(bool details); + void setEnabled(bool enabled); + void setJob(const Job &job, bool donate); + void stop(); + +protected: + void onTimer(const Timer *timer) override; + +private: + MinerPrivate *d_ptr; +}; + + +} // namespace xmrig + + +#endif /* XMRIG_MINER_H */ diff --git a/src/crypto/cn/CnHash.cpp b/src/crypto/cn/CnHash.cpp index 6582db10..40f4fbba 100644 --- a/src/crypto/cn/CnHash.cpp +++ b/src/crypto/cn/CnHash.cpp @@ -202,6 +202,9 @@ static void patchAsmVariants() #endif +static const xmrig::CnHash cnHash; + + xmrig::CnHash::CnHash() { ADD_FN(Algorithm::CN_0); @@ -252,18 +255,18 @@ xmrig::CnHash::CnHash() } -xmrig::cn_hash_fun xmrig::CnHash::fn(const Algorithm &algorithm, AlgoVariant av, Assembly::Id assembly) const +xmrig::cn_hash_fun xmrig::CnHash::fn(const Algorithm &algorithm, AlgoVariant av, Assembly::Id assembly) { if (!algorithm.isValid()) { return nullptr; } # ifdef XMRIG_FEATURE_ASM - cn_hash_fun fun = m_map[algorithm][av][assembly == Assembly::AUTO ? Cpu::info()->assembly() : assembly]; + cn_hash_fun fun = cnHash.m_map[algorithm][av][assembly == Assembly::AUTO ? Cpu::info()->assembly() : assembly]; if (fun) { return fun; } # endif - return m_map[algorithm][av][Assembly::NONE]; + return cnHash.m_map[algorithm][av][Assembly::NONE]; } diff --git a/src/crypto/cn/CnHash.h b/src/crypto/cn/CnHash.h index fdfcc9f3..e4a7ebd2 100644 --- a/src/crypto/cn/CnHash.h +++ b/src/crypto/cn/CnHash.h @@ -65,7 +65,7 @@ public: CnHash(); - cn_hash_fun fn(const Algorithm &algorithm, AlgoVariant av, Assembly::Id assembly) const; + static cn_hash_fun fn(const Algorithm &algorithm, AlgoVariant av, Assembly::Id assembly); private: cn_hash_fun m_map[Algorithm::MAX][AV_MAX][Assembly::MAX] = {}; diff --git a/src/crypto/common/Nonce.cpp b/src/crypto/common/Nonce.cpp index 6670308a..45c7001a 100644 --- a/src/crypto/common/Nonce.cpp +++ b/src/crypto/common/Nonce.cpp @@ -32,7 +32,8 @@ namespace xmrig { -std::atomic Nonce::m_sequence; +std::atomic Nonce::m_paused; +std::atomic Nonce::m_sequence[Nonce::MAX]; uint32_t Nonce::m_nonces[2] = { 0, 0 }; @@ -45,7 +46,11 @@ static Nonce nonce; xmrig::Nonce::Nonce() { - m_sequence = 1; + m_paused = true; + + for (int i = 0; i < MAX; ++i) { + m_sequence[i] = 1; + } uv_mutex_init(&mutex); } @@ -77,7 +82,25 @@ void xmrig::Nonce::reset(uint8_t index) uv_mutex_lock(&mutex); m_nonces[index] = 0; - m_sequence++; + touch(); uv_mutex_unlock(&mutex); } + + +void xmrig::Nonce::stop() +{ + pause(false); + + for (int i = 0; i < MAX; ++i) { + m_sequence[i] = 0; + } +} + + +void xmrig::Nonce::touch() +{ + for (int i = 0; i < MAX; ++i) { + m_sequence[i]++; + } +} diff --git a/src/crypto/common/Nonce.h b/src/crypto/common/Nonce.h index ea843bc9..401139fd 100644 --- a/src/crypto/common/Nonce.h +++ b/src/crypto/common/Nonce.h @@ -35,19 +35,32 @@ namespace xmrig { class Nonce { public: + enum Backend { + CPU, + OPENCL, + CUDA, + MAX + }; + + Nonce(); - static inline bool isOutdated(uint64_t sequence) { return m_sequence.load(std::memory_order_relaxed) != sequence; } - static inline uint64_t sequence() { return m_sequence.load(std::memory_order_relaxed); } - static inline void stop() { m_sequence = 0; } - static inline void touch() { m_sequence++; } + static inline bool isOutdated(Backend backend, uint64_t sequence) { return m_sequence[backend].load(std::memory_order_relaxed) != sequence; } + static inline bool isPaused() { return m_paused.load(std::memory_order_relaxed); } + static inline uint64_t sequence(Backend backend) { return m_sequence[backend].load(std::memory_order_relaxed); } + static inline void pause(bool paused) { m_paused = paused; } + static inline void stop(Backend backend) { m_sequence[backend] = 0; } + static inline void touch(Backend backend) { m_sequence[backend]++; } static uint32_t next(uint8_t index, uint32_t nonce, uint32_t reserveCount, bool nicehash); static void reset(uint8_t index); + static void stop(); + static void touch(); private: + static std::atomic m_paused; + static std::atomic m_sequence[MAX]; static uint32_t m_nonces[2]; - static std::atomic m_sequence; }; diff --git a/src/net/Network.cpp b/src/net/Network.cpp index d40bebd1..6622a080 100644 --- a/src/net/Network.cpp +++ b/src/net/Network.cpp @@ -40,11 +40,12 @@ #include "base/tools/Timer.h" #include "core/config/Config.h" #include "core/Controller.h" +#include "core/Miner.h" +#include "net/JobResult.h" #include "net/JobResults.h" #include "net/Network.h" #include "net/strategies/DonateStrategy.h" #include "rapidjson/document.h" -#include "workers/Workers.h" #ifdef XMRIG_FEATURE_API @@ -163,7 +164,8 @@ void xmrig::Network::onPause(IStrategy *strategy) if (!m_strategy->isActive()) { LOG_ERR("no active pools, stop mining"); m_state.stop(); - return Workers::pause(); + + return m_controller->miner()->pause(); } } @@ -212,7 +214,7 @@ void xmrig::Network::setJob(IClient *client, const Job &job, bool donate) } m_state.diff = job.diff(); - Workers::setJob(job, donate); + m_controller->miner()->setJob(job, donate); } diff --git a/src/workers/CpuThreadLegacy.cpp b/src/workers/CpuThreadLegacy.cpp index b8e33839..a560d33f 100644 --- a/src/workers/CpuThreadLegacy.cpp +++ b/src/workers/CpuThreadLegacy.cpp @@ -34,10 +34,6 @@ #include "workers/CpuThreadLegacy.h" - -static const xmrig::CnHash cnHash; - - xmrig::CpuThreadLegacy::CpuThreadLegacy(size_t index, Algorithm algorithm, CnHash::AlgoVariant av, Multiway multiway, int64_t affinity, int priority, bool softAES, bool prefetch, Assembly assembly) : m_algorithm(algorithm), m_av(av), @@ -54,7 +50,7 @@ xmrig::CpuThreadLegacy::CpuThreadLegacy(size_t index, Algorithm algorithm, CnHas xmrig::cn_hash_fun xmrig::CpuThreadLegacy::fn(const Algorithm &algorithm) const { - return cnHash.fn(algorithm, m_av, m_assembly); + return CnHash::fn(algorithm, m_av, m_assembly); } diff --git a/src/workers/Workers.cpp b/src/workers/Workers.cpp deleted file mode 100644 index 78954d8f..00000000 --- a/src/workers/Workers.cpp +++ /dev/null @@ -1,330 +0,0 @@ -/* XMRig - * Copyright 2010 Jeff Garzik - * Copyright 2012-2014 pooler - * Copyright 2014 Lucas Jones - * Copyright 2014-2016 Wolf9466 - * Copyright 2016 Jay D Dee - * Copyright 2017-2018 XMR-Stak , - * Copyright 2018-2019 SChernykh - * Copyright 2016-2019 XMRig , - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include - - -#include "api/Api.h" -#include "backend/cpu/CpuWorker.h" -#include "base/io/log/Log.h" -#include "base/tools/Chrono.h" -#include "base/tools/Handle.h" -#include "core/config/Config.h" -#include "core/Controller.h" -#include "crypto/common/Nonce.h" -#include "crypto/rx/RxAlgo.h" -#include "crypto/rx/RxCache.h" -#include "crypto/rx/RxDataset.h" -#include "Mem.h" -#include "rapidjson/document.h" -#include "workers/Hashrate.h" -#include "workers/ThreadHandle.h" -#include "workers/Workers.h" - - -bool Workers::m_active = false; -bool Workers::m_enabled = true; -Hashrate *Workers::m_hashrate = nullptr; -xmrig::Job Workers::m_job; -Workers::LaunchStatus Workers::m_status; -std::atomic Workers::m_paused; -std::vector Workers::m_workers; -uint64_t Workers::m_ticks = 0; -uv_mutex_t Workers::m_mutex; -uv_rwlock_t Workers::m_rwlock; -uv_timer_t *Workers::m_timer = nullptr; -xmrig::Controller *Workers::m_controller = nullptr; - - -xmrig::Job Workers::job() -{ - uv_rwlock_rdlock(&m_rwlock); - xmrig::Job job = m_job; - uv_rwlock_rdunlock(&m_rwlock); - - return job; -} - - -size_t Workers::hugePages() -{ - uv_mutex_lock(&m_mutex); - const size_t hugePages = m_status.hugePages; - uv_mutex_unlock(&m_mutex); - - return hugePages; -} - - -size_t Workers::threads() -{ - uv_mutex_lock(&m_mutex); - const size_t threads = m_status.threads; - uv_mutex_unlock(&m_mutex); - - return threads; -} - - -void Workers::pause() -{ - m_active = false; - m_paused = 1; - - xmrig::Nonce::touch(); -} - - -void Workers::printHashrate(bool detail) -{ - assert(m_controller != nullptr); - if (!m_controller) { - return; - } - - if (detail) { - char num1[8] = { 0 }; - char num2[8] = { 0 }; - char num3[8] = { 0 }; - - xmrig::Log::print(WHITE_BOLD_S "| THREAD | AFFINITY | 10s H/s | 60s H/s | 15m H/s |"); - - size_t i = 0; - for (const xmrig::IThread *thread : m_controller->config()->threads()) { - xmrig::Log::print("| %6zu | %8" PRId64 " | %7s | %7s | %7s |", - thread->index(), - thread->affinity(), - Hashrate::format(m_hashrate->calc(thread->index(), Hashrate::ShortInterval), num1, sizeof num1), - Hashrate::format(m_hashrate->calc(thread->index(), Hashrate::MediumInterval), num2, sizeof num2), - Hashrate::format(m_hashrate->calc(thread->index(), Hashrate::LargeInterval), num3, sizeof num3) - ); - - i++; - } - } - - m_hashrate->print(); -} - - -void Workers::setEnabled(bool enabled) -{ - if (m_enabled == enabled) { - return; - } - - m_enabled = enabled; - if (!m_active) { - return; - } - - m_paused = enabled ? 0 : 1; - xmrig::Nonce::touch(); -} - - -void Workers::setJob(const xmrig::Job &job, bool donate) -{ - uv_rwlock_wrlock(&m_rwlock); - - m_job = job; - m_job.setIndex(donate ? 1 : 0); - - xmrig::Nonce::reset(donate ? 1 : 0); - - uv_rwlock_wrunlock(&m_rwlock); - - m_active = true; - if (!m_enabled) { - return; - } - - m_paused = 0; -} - - -void Workers::start(xmrig::Controller *controller) -{ -# ifdef APP_DEBUG - LOG_NOTICE("THREADS ------------------------------------------------------------------"); - for (const xmrig::IThread *thread : controller->config()->threads()) { - thread->print(); - } - LOG_NOTICE("--------------------------------------------------------------------------"); -# endif - - m_controller = controller; - - const std::vector &threads = controller->config()->threads(); - m_status.algo = xmrig::Algorithm::RX_WOW; // FIXME algo - m_status.threads = threads.size(); - - for (const xmrig::IThread *thread : threads) { - m_status.ways += thread->multiway(); - } - - m_hashrate = new Hashrate(threads.size(), controller); - - uv_mutex_init(&m_mutex); - uv_rwlock_init(&m_rwlock); - - m_paused = 1; - - m_timer = new uv_timer_t; - uv_timer_init(uv_default_loop(), m_timer); - uv_timer_start(m_timer, Workers::onTick, 500, 500); - - for (xmrig::IThread *thread : threads) { - ThreadHandle *handle = new ThreadHandle(thread); - - m_workers.push_back(handle); - handle->start(Workers::onReady); - } -} - - -void Workers::stop() -{ - xmrig::Handle::close(m_timer); - m_hashrate->stop(); - - m_paused = 0; - - xmrig::Nonce::stop(); - - for (size_t i = 0; i < m_workers.size(); ++i) { - m_workers[i]->join(); - } -} - - -#ifdef XMRIG_FEATURE_API -void Workers::threadsSummary(rapidjson::Document &doc) -{ - uv_mutex_lock(&m_mutex); - const uint64_t pages[2] = { m_status.hugePages, m_status.pages }; - const uint64_t memory = m_status.ways * xmrig::CnAlgo<>::memory(m_status.algo); - uv_mutex_unlock(&m_mutex); - - auto &allocator = doc.GetAllocator(); - - rapidjson::Value hugepages(rapidjson::kArrayType); - hugepages.PushBack(pages[0], allocator); - hugepages.PushBack(pages[1], allocator); - - doc.AddMember("hugepages", hugepages, allocator); - doc.AddMember("memory", memory, allocator); -} -#endif - - -void Workers::onReady(void *arg) -{ - auto handle = static_cast(arg); - - IWorker *worker = nullptr; - - switch (handle->config()->multiway()) { - case 1: - worker = new xmrig::CpuWorker<1>(handle); - break; - - case 2: - worker = new xmrig::CpuWorker<2>(handle); - break; - - case 3: - worker = new xmrig::CpuWorker<3>(handle); - break; - - case 4: - worker = new xmrig::CpuWorker<4>(handle); - break; - - case 5: - worker = new xmrig::CpuWorker<5>(handle); - break; - } - - handle->setWorker(worker); - - if (!worker->selfTest()) { - LOG_ERR("thread %zu error: \"hash self-test failed\".", handle->worker()->id()); - - return; - } - - start(worker); -} - - -void Workers::onTick(uv_timer_t *) -{ - for (ThreadHandle *handle : m_workers) { - if (!handle->worker()) { - return; - } - - m_hashrate->add(handle->threadId(), handle->worker()->hashCount(), handle->worker()->timestamp()); - } - - if ((m_ticks++ & 0xF) == 0) { - m_hashrate->updateHighest(); - } -} - - -void Workers::start(IWorker *worker) -{ -// const Worker *w = static_cast(worker); - - uv_mutex_lock(&m_mutex); - m_status.started++; -// m_status.pages += w->memory().pages; -// m_status.hugePages += w->memory().hugePages; - - if (m_status.started == m_status.threads) { - const double percent = (double) m_status.hugePages / m_status.pages * 100.0; - const size_t memory = m_status.ways * xmrig::CnAlgo<>::memory(m_status.algo) / 1024; - -# ifdef XMRIG_ALGO_RANDOMX - if (m_status.algo.family() == xmrig::Algorithm::RANDOM_X) { - LOG_INFO(GREEN_BOLD("READY (CPU)") " threads " CYAN_BOLD("%zu(%zu)") " memory " CYAN_BOLD("%zu KB") "", - m_status.threads, m_status.ways, memory); - } else -# endif - { - LOG_INFO(GREEN_BOLD("READY (CPU)") " threads " CYAN_BOLD("%zu(%zu)") " huge pages %s%zu/%zu %1.0f%%\x1B[0m memory " CYAN_BOLD("%zu KB") "", - m_status.threads, m_status.ways, - (m_status.hugePages == m_status.pages ? GREEN_BOLD_S : (m_status.hugePages == 0 ? RED_BOLD_S : YELLOW_BOLD_S)), - m_status.hugePages, m_status.pages, percent, memory); - } - } - - uv_mutex_unlock(&m_mutex); - - worker->start(); -} diff --git a/src/workers/WorkersLegacy.cpp b/src/workers/WorkersLegacy.cpp new file mode 100644 index 00000000..29571608 --- /dev/null +++ b/src/workers/WorkersLegacy.cpp @@ -0,0 +1,331 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + + +#include "api/Api.h" +#include "backend/cpu/CpuWorker.h" +#include "base/io/log/Log.h" +#include "base/tools/Chrono.h" +#include "base/tools/Handle.h" +#include "core/config/Config.h" +#include "core/Controller.h" +#include "crypto/common/Nonce.h" +#include "crypto/rx/RxAlgo.h" +#include "crypto/rx/RxCache.h" +#include "crypto/rx/RxDataset.h" +#include "Mem.h" +#include "rapidjson/document.h" +#include "workers/Hashrate.h" +#include "workers/WorkersLegacy.h" + + +bool WorkersLegacy::m_active = false; +bool WorkersLegacy::m_enabled = true; +Hashrate *WorkersLegacy::m_hashrate = nullptr; +xmrig::Job WorkersLegacy::m_job; +WorkersLegacy::LaunchStatus WorkersLegacy::m_status; +std::vector* > WorkersLegacy::m_workers; +uint64_t WorkersLegacy::m_ticks = 0; +uv_mutex_t WorkersLegacy::m_mutex; +uv_rwlock_t WorkersLegacy::m_rwlock; +//uv_timer_t *Workers::m_timer = nullptr; +xmrig::Controller *WorkersLegacy::m_controller = nullptr; + + +//xmrig::Job WorkersLegacy::job() +//{ +// uv_rwlock_rdlock(&m_rwlock); +// xmrig::Job job = m_job; +// uv_rwlock_rdunlock(&m_rwlock); + +// return job; +//} + + +size_t WorkersLegacy::hugePages() +{ + uv_mutex_lock(&m_mutex); + const size_t hugePages = m_status.hugePages; + uv_mutex_unlock(&m_mutex); + + return hugePages; +} + + +size_t WorkersLegacy::threads() +{ + uv_mutex_lock(&m_mutex); + const size_t threads = m_status.threads; + uv_mutex_unlock(&m_mutex); + + return threads; +} + + +//void Workers::pause() +//{ +// m_active = false; + +// xmrig::Nonce::pause(true); +// xmrig::Nonce::touch(); +//} + + +//void Workers::printHashrate(bool detail) +//{ +// assert(m_controller != nullptr); +// if (!m_controller) { +// return; +// } + +// if (detail) { +// char num1[8] = { 0 }; +// char num2[8] = { 0 }; +// char num3[8] = { 0 }; + +// xmrig::Log::print(WHITE_BOLD_S "| THREAD | AFFINITY | 10s H/s | 60s H/s | 15m H/s |"); + +// size_t i = 0; +// for (const xmrig::IThread *thread : m_controller->config()->threads()) { +// xmrig::Log::print("| %6zu | %8" PRId64 " | %7s | %7s | %7s |", +// thread->index(), +// thread->affinity(), +// Hashrate::format(m_hashrate->calc(thread->index(), Hashrate::ShortInterval), num1, sizeof num1), +// Hashrate::format(m_hashrate->calc(thread->index(), Hashrate::MediumInterval), num2, sizeof num2), +// Hashrate::format(m_hashrate->calc(thread->index(), Hashrate::LargeInterval), num3, sizeof num3) +// ); + +// i++; +// } +// } + +// m_hashrate->print(); +//} + + +//void Workers::setEnabled(bool enabled) +//{ +// if (m_enabled == enabled) { +// return; +// } + +// m_enabled = enabled; +// if (!m_active) { +// return; +// } + +// xmrig::Nonce::pause(!enabled); +// xmrig::Nonce::touch(); +//} + + +//void Workers::setJob(const xmrig::Job &job, bool donate) +//{ +// uv_rwlock_wrlock(&m_rwlock); + +// m_job = job; +// m_job.setIndex(donate ? 1 : 0); + +// xmrig::Nonce::reset(donate ? 1 : 0); + +// uv_rwlock_wrunlock(&m_rwlock); + +// m_active = true; +// if (!m_enabled) { +// return; +// } + +// xmrig::Nonce::pause(false); +//} + + +void WorkersLegacy::start(xmrig::Controller *controller) +{ + using namespace xmrig; + +# ifdef APP_DEBUG + LOG_NOTICE("THREADS ------------------------------------------------------------------"); + for (const xmrig::IThread *thread : controller->config()->threads()) { + thread->print(); + } + LOG_NOTICE("--------------------------------------------------------------------------"); +# endif + + m_controller = controller; + + m_status.algo = xmrig::Algorithm::RX_WOW; // FIXME algo + const CpuThreads &threads = controller->config()->cpu().threads().get(m_status.algo); + m_status.threads = threads.size(); + + for (const CpuThread &thread : threads) { + m_status.ways += thread.intensity(); + } + + m_hashrate = new Hashrate(threads.size(), controller); + + uv_mutex_init(&m_mutex); + uv_rwlock_init(&m_rwlock); + +// m_timer = new uv_timer_t; +// uv_timer_init(uv_default_loop(), m_timer); +// uv_timer_start(m_timer, Workers::onTick, 500, 500); + +// size_t index = 0; +// for (const CpuThread &thread : threads) { +// Thread *handle = new Thread(index++, CpuLaunchData(m_status.algo, controller->config()->cpu(), thread)); + +// m_workers.push_back(handle); +// handle->start(WorkersLegacy::onReady); +// } +} + + +//void Workers::stop() +//{ +// xmrig::Handle::close(m_timer); +// m_hashrate->stop(); + +// xmrig::Nonce::stop(); + +// for (size_t i = 0; i < m_workers.size(); ++i) { +// m_workers[i]->join(); +// } +//} + + +#ifdef XMRIG_FEATURE_API +void WorkersLegacy::threadsSummary(rapidjson::Document &doc) +{ + uv_mutex_lock(&m_mutex); + const uint64_t pages[2] = { m_status.hugePages, m_status.pages }; + const uint64_t memory = m_status.ways * xmrig::CnAlgo<>::memory(m_status.algo); + uv_mutex_unlock(&m_mutex); + + auto &allocator = doc.GetAllocator(); + + rapidjson::Value hugepages(rapidjson::kArrayType); + hugepages.PushBack(pages[0], allocator); + hugepages.PushBack(pages[1], allocator); + + doc.AddMember("hugepages", hugepages, allocator); + doc.AddMember("memory", memory, allocator); +} +#endif + + +//void WorkersLegacy::onReady(void *arg) +//{ +// using namespace xmrig; + +// auto handle = static_cast* >(arg); + +// xmrig::IWorker *worker = nullptr; + +// switch (handle->config().intensity) { +// case 1: +// worker = new CpuWorker<1>(handle->index(), handle->config()); +// break; + +// case 2: +// worker = new CpuWorker<2>(handle->index(), handle->config()); +// break; + +// case 3: +// worker = new CpuWorker<3>(handle->index(), handle->config()); +// break; + +// case 4: +// worker = new CpuWorker<4>(handle->index(), handle->config()); +// break; + +// case 5: +// worker = new CpuWorker<5>(handle->index(), handle->config()); +// break; +// } + +// handle->setWorker(worker); + +// if (!worker->selfTest()) { +// LOG_ERR("thread %zu error: \"hash self-test failed\".", handle->worker()->id()); + +// return; +// } + +// start(worker); +//} + + +void WorkersLegacy::onTick(uv_timer_t *) +{ + using namespace xmrig; + + for (Thread *handle : m_workers) { + if (!handle->worker()) { + return; + } + + m_hashrate->add(handle->index(), handle->worker()->hashCount(), handle->worker()->timestamp()); + } + + if ((m_ticks++ & 0xF) == 0) { + m_hashrate->updateHighest(); + } +} + + +void WorkersLegacy::start(xmrig::IWorker *worker) +{ +// const Worker *w = static_cast(worker); + + uv_mutex_lock(&m_mutex); + m_status.started++; +// m_status.pages += w->memory().pages; +// m_status.hugePages += w->memory().hugePages; + + if (m_status.started == m_status.threads) { + const double percent = (double) m_status.hugePages / m_status.pages * 100.0; + const size_t memory = m_status.ways * xmrig::CnAlgo<>::memory(m_status.algo) / 1024; + +# ifdef XMRIG_ALGO_RANDOMX + if (m_status.algo.family() == xmrig::Algorithm::RANDOM_X) { + LOG_INFO(GREEN_BOLD("READY (CPU)") " threads " CYAN_BOLD("%zu(%zu)") " memory " CYAN_BOLD("%zu KB") "", + m_status.threads, m_status.ways, memory); + } else +# endif + { + LOG_INFO(GREEN_BOLD("READY (CPU)") " threads " CYAN_BOLD("%zu(%zu)") " huge pages %s%zu/%zu %1.0f%%\x1B[0m memory " CYAN_BOLD("%zu KB") "", + m_status.threads, m_status.ways, + (m_status.hugePages == m_status.pages ? GREEN_BOLD_S : (m_status.hugePages == 0 ? RED_BOLD_S : YELLOW_BOLD_S)), + m_status.hugePages, m_status.pages, percent, memory); + } + } + + uv_mutex_unlock(&m_mutex); + + worker->start(); +} diff --git a/src/workers/Workers.h b/src/workers/WorkersLegacy.h similarity index 76% rename from src/workers/Workers.h rename to src/workers/WorkersLegacy.h index 83777d0d..5ee53dbf 100644 --- a/src/workers/Workers.h +++ b/src/workers/WorkersLegacy.h @@ -22,8 +22,8 @@ * along with this program. If not, see . */ -#ifndef XMRIG_WORKERS_H -#define XMRIG_WORKERS_H +#ifndef XMRIG_WORKERSLEGACY_H +#define XMRIG_WORKERSLEGACY_H #include @@ -35,36 +35,37 @@ # include #endif +#include "backend/common/Thread.h" +#include "backend/cpu/CpuLaunchData.h" #include "base/net/stratum/Job.h" #include "net/JobResult.h" #include "rapidjson/fwd.h" class Hashrate; -class IWorker; -class ThreadHandle; namespace xmrig { + class IWorker; class Controller; + class ThreadHandle; } -class Workers +class WorkersLegacy { public: static size_t hugePages(); static size_t threads(); - static void pause(); - static void printHashrate(bool detail); - static void setEnabled(bool enabled); - static void setJob(const xmrig::Job &job, bool donate); +// static void pause(); +// static void printHashrate(bool detail); +// static void setEnabled(bool enabled); +// static void setJob(const xmrig::Job &job, bool donate); static void start(xmrig::Controller *controller); - static void stop(); - static xmrig::Job job(); +// static void stop(); +// static xmrig::Job job(); - static inline bool isEnabled() { return m_enabled; } - static inline bool isPaused() { return m_paused.load(std::memory_order_relaxed) == 1; } +// static inline bool isEnabled() { return m_enabled; } static inline Hashrate *hashrate() { return m_hashrate; } # ifdef XMRIG_FEATURE_API @@ -72,9 +73,9 @@ public: # endif private: - static void onReady(void *arg); +// static void onReady(void *arg); static void onTick(uv_timer_t *handle); - static void start(IWorker *worker); + static void start(xmrig::IWorker *worker); class LaunchStatus { @@ -100,14 +101,13 @@ private: static Hashrate *m_hashrate; static xmrig::Job m_job; static LaunchStatus m_status; - static std::atomic m_paused; - static std::vector m_workers; + static std::vector* > m_workers; static uint64_t m_ticks; static uv_mutex_t m_mutex; static uv_rwlock_t m_rwlock; - static uv_timer_t *m_timer; +// static uv_timer_t *m_timer; static xmrig::Controller *m_controller; }; -#endif /* XMRIG_WORKERS_H */ +#endif /* XMRIG_WORKERSLEGACY_H */ From 5699147aabe84a413c73e1e74dc6d416b100b885 Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 17 Jul 2019 01:28:42 +0700 Subject: [PATCH 027/172] Restored printable hashrate. --- CMakeLists.txt | 2 - src/api/v1/ApiRouter.cpp | 86 +++++++++--------- src/api/v1/ApiRouter.h | 2 +- src/{workers => backend/common}/Hashrate.cpp | 72 +++++---------- src/{workers => backend/common}/Hashrate.h | 14 +-- src/backend/common/Workers.cpp | 40 +++++++- src/backend/common/Workers.h | 3 + src/backend/common/common.cmake | 2 + src/backend/common/interfaces/IBackend.h | 2 + src/backend/cpu/CpuBackend.cpp | 32 ++++++- src/backend/cpu/CpuBackend.h | 1 + src/core/Miner.cpp | 35 ++++++- src/core/Miner.h | 5 + src/workers/WorkersLegacy.cpp | 96 +++----------------- src/workers/WorkersLegacy.h | 8 +- 15 files changed, 196 insertions(+), 204 deletions(-) rename src/{workers => backend/common}/Hashrate.cpp (71%) rename src/{workers => backend/common}/Hashrate.h (90%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8749a003..543836c8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -44,7 +44,6 @@ set(HEADERS src/Summary.h src/version.h src/workers/CpuThreadLegacy.h - src/workers/Hashrate.h src/workers/WorkersLegacy.h ) @@ -92,7 +91,6 @@ set(SOURCES src/net/strategies/DonateStrategy.cpp src/Summary.cpp src/workers/CpuThreadLegacy.cpp - src/workers/Hashrate.cpp src/workers/WorkersLegacy.cpp src/xmrig.cpp ) diff --git a/src/api/v1/ApiRouter.cpp b/src/api/v1/ApiRouter.cpp index 9e609d13..48a92c93 100644 --- a/src/api/v1/ApiRouter.cpp +++ b/src/api/v1/ApiRouter.cpp @@ -36,7 +36,7 @@ #include "core/config/Config.h" #include "rapidjson/document.h" #include "version.h" -#include "workers/Hashrate.h" +//#include "workers/Hashrate.h" #include "workers/WorkersLegacy.h" @@ -69,7 +69,7 @@ void xmrig::ApiRouter::onRequest(IApiRequest &request) if (request.url() == "/1/summary" || request.url() == "/api.json") { request.accept(); getMiner(request.reply(), request.doc()); - getHashrate(request.reply(), request.doc()); +// getHashrate(request.reply(), request.doc()); } else if (request.url() == "/1/threads") { request.accept(); @@ -98,35 +98,35 @@ void xmrig::ApiRouter::onRequest(IApiRequest &request) } -void xmrig::ApiRouter::getHashrate(rapidjson::Value &reply, rapidjson::Document &doc) const -{ - using namespace rapidjson; - auto &allocator = doc.GetAllocator(); +//void xmrig::ApiRouter::getHashrate(rapidjson::Value &reply, rapidjson::Document &doc) const +//{ +// using namespace rapidjson; +// auto &allocator = doc.GetAllocator(); - Value hashrate(kObjectType); - Value total(kArrayType); - Value threads(kArrayType); +// Value hashrate(kObjectType); +// Value total(kArrayType); +// Value threads(kArrayType); - const Hashrate *hr = WorkersLegacy::hashrate(); +// const Hashrate *hr = WorkersLegacy::hashrate(); - total.PushBack(normalize(hr->calc(Hashrate::ShortInterval)), allocator); - total.PushBack(normalize(hr->calc(Hashrate::MediumInterval)), allocator); - total.PushBack(normalize(hr->calc(Hashrate::LargeInterval)), allocator); +// total.PushBack(normalize(hr->calc(Hashrate::ShortInterval)), allocator); +// total.PushBack(normalize(hr->calc(Hashrate::MediumInterval)), allocator); +// total.PushBack(normalize(hr->calc(Hashrate::LargeInterval)), allocator); - for (size_t i = 0; i < WorkersLegacy::threads(); i++) { - Value thread(kArrayType); - thread.PushBack(normalize(hr->calc(i, Hashrate::ShortInterval)), allocator); - thread.PushBack(normalize(hr->calc(i, Hashrate::MediumInterval)), allocator); - thread.PushBack(normalize(hr->calc(i, Hashrate::LargeInterval)), allocator); +// for (size_t i = 0; i < WorkersLegacy::threads(); i++) { +// Value thread(kArrayType); +// thread.PushBack(normalize(hr->calc(i, Hashrate::ShortInterval)), allocator); +// thread.PushBack(normalize(hr->calc(i, Hashrate::MediumInterval)), allocator); +// thread.PushBack(normalize(hr->calc(i, Hashrate::LargeInterval)), allocator); - threads.PushBack(thread, allocator); - } +// threads.PushBack(thread, allocator); +// } - hashrate.AddMember("total", total, allocator); - hashrate.AddMember("highest", normalize(hr->highest()), allocator); - hashrate.AddMember("threads", threads, allocator); - reply.AddMember("hashrate", hashrate, allocator); -} +// hashrate.AddMember("total", total, allocator); +// hashrate.AddMember("highest", normalize(hr->highest()), allocator); +// hashrate.AddMember("threads", threads, allocator); +// reply.AddMember("hashrate", hashrate, allocator); +//} void xmrig::ApiRouter::getMiner(rapidjson::Value &reply, rapidjson::Document &doc) const @@ -151,29 +151,29 @@ void xmrig::ApiRouter::getMiner(rapidjson::Value &reply, rapidjson::Document &do void xmrig::ApiRouter::getThreads(rapidjson::Value &reply, rapidjson::Document &doc) const { - using namespace rapidjson; - auto &allocator = doc.GetAllocator(); - const Hashrate *hr = WorkersLegacy::hashrate(); +// using namespace rapidjson; +// auto &allocator = doc.GetAllocator(); +// const Hashrate *hr = WorkersLegacy::hashrate(); - WorkersLegacy::threadsSummary(doc); +// WorkersLegacy::threadsSummary(doc); - const std::vector &threads = m_base->config()->threads(); - Value list(kArrayType); +// const std::vector &threads = m_base->config()->threads(); +// Value list(kArrayType); - size_t i = 0; - for (const xmrig::IThread *thread : threads) { - Value value = thread->toAPI(doc); +// size_t i = 0; +// for (const xmrig::IThread *thread : threads) { +// Value value = thread->toAPI(doc); - Value hashrate(kArrayType); - hashrate.PushBack(normalize(hr->calc(i, Hashrate::ShortInterval)), allocator); - hashrate.PushBack(normalize(hr->calc(i, Hashrate::MediumInterval)), allocator); - hashrate.PushBack(normalize(hr->calc(i, Hashrate::LargeInterval)), allocator); +// Value hashrate(kArrayType); +// hashrate.PushBack(normalize(hr->calc(i, Hashrate::ShortInterval)), allocator); +// hashrate.PushBack(normalize(hr->calc(i, Hashrate::MediumInterval)), allocator); +// hashrate.PushBack(normalize(hr->calc(i, Hashrate::LargeInterval)), allocator); - i++; +// i++; - value.AddMember("hashrate", hashrate, allocator); - list.PushBack(value, allocator); - } +// value.AddMember("hashrate", hashrate, allocator); +// list.PushBack(value, allocator); +// } - reply.AddMember("threads", list, allocator); +// reply.AddMember("threads", list, allocator); } diff --git a/src/api/v1/ApiRouter.h b/src/api/v1/ApiRouter.h index bdbbaea4..e2b9bd25 100644 --- a/src/api/v1/ApiRouter.h +++ b/src/api/v1/ApiRouter.h @@ -49,7 +49,7 @@ protected: void onRequest(IApiRequest &request) override; private: - void getHashrate(rapidjson::Value &reply, rapidjson::Document &doc) const; +// void getHashrate(rapidjson::Value &reply, rapidjson::Document &doc) const; void getMiner(rapidjson::Value &reply, rapidjson::Document &doc) const; void getThreads(rapidjson::Value &reply, rapidjson::Document &doc) const; diff --git a/src/workers/Hashrate.cpp b/src/backend/common/Hashrate.cpp similarity index 71% rename from src/workers/Hashrate.cpp rename to src/backend/common/Hashrate.cpp index 0a683caa..6ffd45b7 100644 --- a/src/workers/Hashrate.cpp +++ b/src/backend/common/Hashrate.cpp @@ -29,12 +29,9 @@ #include -#include "base/io/log/Log.h" #include "base/tools/Chrono.h" #include "base/tools/Handle.h" -#include "core/config/Config.h" -#include "core/Controller.h" -#include "workers/Hashrate.h" +#include "backend/common/Hashrate.h" inline static const char *format(double h, char *buf, size_t size) @@ -48,10 +45,9 @@ inline static const char *format(double h, char *buf, size_t size) } -Hashrate::Hashrate(size_t threads, xmrig::Controller *controller) : +xmrig::Hashrate::Hashrate(size_t threads) : m_highest(0.0), - m_threads(threads), - m_timer(nullptr) + m_threads(threads) { m_counts = new uint64_t*[threads]; m_timestamps = new uint64_t*[threads]; @@ -62,20 +58,23 @@ Hashrate::Hashrate(size_t threads, xmrig::Controller *controller) : m_timestamps[i] = new uint64_t[kBucketSize](); m_top[i] = 0; } - - const int printTime = controller->config()->printTime(); - - if (printTime > 0) { - m_timer = new uv_timer_t; - uv_timer_init(uv_default_loop(), m_timer); - m_timer->data = this; - - uv_timer_start(m_timer, Hashrate::onReport, (printTime + 4) * 1000, printTime * 1000); - } } -double Hashrate::calc(size_t ms) const +xmrig::Hashrate::~Hashrate() +{ + for (size_t i = 0; i < m_threads; i++) { + delete [] m_counts[i]; + delete [] m_timestamps[i]; + } + + delete [] m_counts; + delete [] m_timestamps; + delete [] m_top; +} + + +double xmrig::Hashrate::calc(size_t ms) const { double result = 0.0; double data; @@ -91,7 +90,7 @@ double Hashrate::calc(size_t ms) const } -double Hashrate::calc(size_t threadId, size_t ms) const +double xmrig::Hashrate::calc(size_t threadId, size_t ms) const { assert(threadId < m_threads); if (threadId >= m_threads) { @@ -140,7 +139,7 @@ double Hashrate::calc(size_t threadId, size_t ms) const } -void Hashrate::add(size_t threadId, uint64_t count, uint64_t timestamp) +void xmrig::Hashrate::add(size_t threadId, uint64_t count, uint64_t timestamp) { const size_t top = m_top[threadId]; m_counts[threadId][top] = count; @@ -150,30 +149,7 @@ void Hashrate::add(size_t threadId, uint64_t count, uint64_t timestamp) } -void Hashrate::print() const -{ - char num1[8] = { 0 }; - char num2[8] = { 0 }; - char num3[8] = { 0 }; - char num4[8] = { 0 }; - - LOG_INFO(WHITE_BOLD("speed") " 10s/60s/15m " CYAN_BOLD("%s") CYAN(" %s %s ") CYAN_BOLD("H/s") " max " CYAN_BOLD("%s H/s"), - format(calc(ShortInterval), num1, sizeof(num1)), - format(calc(MediumInterval), num2, sizeof(num2)), - format(calc(LargeInterval), num3, sizeof(num3)), - format(m_highest, num4, sizeof(num4)) - ); -} - - -void Hashrate::stop() -{ - xmrig::Handle::close(m_timer); - m_timer = nullptr; -} - - -void Hashrate::updateHighest() +void xmrig::Hashrate::updateHighest() { double highest = calc(ShortInterval); if (isnormal(highest) && highest > m_highest) { @@ -182,13 +158,7 @@ void Hashrate::updateHighest() } -const char *Hashrate::format(double h, char *buf, size_t size) +const char *xmrig::Hashrate::format(double h, char *buf, size_t size) { return ::format(h, buf, size); } - - -void Hashrate::onReport(uv_timer_t *handle) -{ - static_cast(handle->data)->print(); -} diff --git a/src/workers/Hashrate.h b/src/backend/common/Hashrate.h similarity index 90% rename from src/workers/Hashrate.h rename to src/backend/common/Hashrate.h index d27b289e..1787bf6a 100644 --- a/src/workers/Hashrate.h +++ b/src/backend/common/Hashrate.h @@ -27,12 +27,9 @@ #include -#include namespace xmrig { - class Controller; -} class Hashrate @@ -44,12 +41,11 @@ public: LargeInterval = 900000 }; - Hashrate(size_t threads, xmrig::Controller *controller); + Hashrate(size_t threads); + ~Hashrate(); double calc(size_t ms) const; double calc(size_t threadId, size_t ms) const; void add(size_t threadId, uint64_t count, uint64_t timestamp); - void print() const; - void stop(); void updateHighest(); inline double highest() const { return m_highest; } @@ -58,8 +54,6 @@ public: static const char *format(double h, char *buf, size_t size); private: - static void onReport(uv_timer_t *handle); - constexpr static size_t kBucketSize = 2 << 11; constexpr static size_t kBucketMask = kBucketSize - 1; @@ -68,8 +62,10 @@ private: uint32_t* m_top; uint64_t** m_counts; uint64_t** m_timestamps; - uv_timer_t *m_timer; }; +} // namespace xmrig + + #endif /* XMRIG_HASHRATE_H */ diff --git a/src/backend/common/Workers.cpp b/src/backend/common/Workers.cpp index 987ca526..c4ac38a5 100644 --- a/src/backend/common/Workers.cpp +++ b/src/backend/common/Workers.cpp @@ -24,6 +24,7 @@ */ +#include "backend/common/Hashrate.h" #include "backend/common/Workers.h" #include "backend/cpu/CpuWorker.h" #include "base/io/log/Log.h" @@ -37,13 +38,16 @@ class WorkersPrivate public: inline WorkersPrivate() { - } inline ~WorkersPrivate() { + delete hashrate; } + + + Hashrate *hashrate = nullptr; }; @@ -65,6 +69,13 @@ xmrig::Workers::~Workers() } +template +const xmrig::Hashrate *xmrig::Workers::hashrate() const +{ + return d_ptr->hashrate; +} + + template void xmrig::Workers::add(const T &data) { @@ -75,6 +86,8 @@ void xmrig::Workers::add(const T &data) template void xmrig::Workers::start() { + d_ptr->hashrate = new Hashrate(m_workers.size()); + for (Thread *worker : m_workers) { worker->start(Workers::onReady); } @@ -92,13 +105,34 @@ void xmrig::Workers::stop() m_workers.clear(); Nonce::touch(T::backend()); + + delete d_ptr->hashrate; + d_ptr->hashrate = nullptr; } template -void xmrig::Workers::onReady(void *arg) +void xmrig::Workers::tick(uint64_t) +{ + if (!d_ptr->hashrate) { + return; + } + + for (Thread *handle : m_workers) { + if (!handle->worker()) { + return; + } + + d_ptr->hashrate->add(handle->index(), handle->worker()->hashCount(), handle->worker()->timestamp()); + } + + d_ptr->hashrate->updateHighest(); +} + + +template +void xmrig::Workers::onReady(void *) { - printf("ON READY\n"); } diff --git a/src/backend/common/Workers.h b/src/backend/common/Workers.h index 25f81c5b..3ef4b015 100644 --- a/src/backend/common/Workers.h +++ b/src/backend/common/Workers.h @@ -34,6 +34,7 @@ namespace xmrig { +class Hashrate; class WorkersPrivate; @@ -44,9 +45,11 @@ public: Workers(); ~Workers(); + const Hashrate *hashrate() const; void add(const T &data); void start(); void stop(); + void tick(uint64_t ticks); private: static void onReady(void *arg); diff --git a/src/backend/common/common.cmake b/src/backend/common/common.cmake index bb84af58..c470ea50 100644 --- a/src/backend/common/common.cmake +++ b/src/backend/common/common.cmake @@ -2,6 +2,7 @@ set(HEADERS_BACKEND_COMMON src/backend/common/interfaces/IBackend.h src/backend/common/interfaces/IThread.h src/backend/common/interfaces/IWorker.h + src/backend/common/Hashrate.h src/backend/common/Thread.h src/backend/common/Threads.h src/backend/common/Worker.h @@ -10,6 +11,7 @@ set(HEADERS_BACKEND_COMMON ) set(SOURCES_BACKEND_COMMON + src/backend/common/Hashrate.cpp src/backend/common/Threads.cpp src/backend/common/Worker.cpp src/backend/common/Workers.cpp diff --git a/src/backend/common/interfaces/IBackend.h b/src/backend/common/interfaces/IBackend.h index d6fe1695..69ed4c8c 100644 --- a/src/backend/common/interfaces/IBackend.h +++ b/src/backend/common/interfaces/IBackend.h @@ -32,6 +32,7 @@ namespace xmrig { +class Hashrate; class Job; class String; @@ -41,6 +42,7 @@ class IBackend public: virtual ~IBackend() = default; + virtual const Hashrate *hashrate() const = 0; virtual const String &profileName() const = 0; virtual void printHashrate(bool details) = 0; virtual void setJob(const Job &job) = 0; diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp index a1ae5747..c8d38ca6 100644 --- a/src/backend/cpu/CpuBackend.cpp +++ b/src/backend/cpu/CpuBackend.cpp @@ -23,17 +23,16 @@ */ +#include "backend/common/Hashrate.h" #include "backend/common/Workers.h" #include "backend/cpu/CpuBackend.h" +#include "base/io/log/Log.h" #include "base/net/stratum/Job.h" #include "base/tools/String.h" #include "core/config/Config.h" #include "core/Controller.h" -#include "base/io/log/Log.h" - - namespace xmrig { @@ -98,6 +97,12 @@ xmrig::CpuBackend::~CpuBackend() } +const xmrig::Hashrate *xmrig::CpuBackend::hashrate() const +{ + return d_ptr->workers.hashrate(); +} + + const xmrig::String &xmrig::CpuBackend::profileName() const { return d_ptr->profileName; @@ -106,7 +111,26 @@ const xmrig::String &xmrig::CpuBackend::profileName() const void xmrig::CpuBackend::printHashrate(bool details) { + if (!details || !hashrate()) { + return; + } + char num[8 * 3] = { 0 }; + + Log::print(WHITE_BOLD_S "| CPU THREAD | AFFINITY | 10s H/s | 60s H/s | 15m H/s |"); + + size_t i = 0; + for (const CpuThread &thread : d_ptr->threads) { + Log::print("| %13zu | %8" PRId64 " | %7s | %7s | %7s |", + i, + thread.affinity(), + Hashrate::format(hashrate()->calc(i, Hashrate::ShortInterval), num, sizeof num / 3), + Hashrate::format(hashrate()->calc(i, Hashrate::MediumInterval), num + 8, sizeof num / 3), + Hashrate::format(hashrate()->calc(i, Hashrate::LargeInterval), num + 8 * 2, sizeof num / 3) + ); + + i++; + } } @@ -147,5 +171,5 @@ void xmrig::CpuBackend::stop() void xmrig::CpuBackend::tick(uint64_t ticks) { - + d_ptr->workers.tick(ticks); } diff --git a/src/backend/cpu/CpuBackend.h b/src/backend/cpu/CpuBackend.h index d39ab38d..a7b742eb 100644 --- a/src/backend/cpu/CpuBackend.h +++ b/src/backend/cpu/CpuBackend.h @@ -44,6 +44,7 @@ public: ~CpuBackend() override; protected: + const Hashrate *hashrate() const override; const String &profileName() const override; void printHashrate(bool details) override; void setJob(const Job &job) override; diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index 1f819694..83ce2206 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -26,6 +26,7 @@ #include +#include "backend/common/Hashrate.h" #include "backend/cpu/CpuBackend.h" #include "base/io/log/Log.h" #include "base/net/stratum/Job.h" @@ -35,8 +36,6 @@ #include "core/Miner.h" #include "crypto/common/Nonce.h" -#include "base/tools/Chrono.h" - namespace xmrig { @@ -83,6 +82,7 @@ public: bool active = false; bool enabled = true; Controller *controller; + double maxHashrate = 0.0; Job job; std::vector backends; Timer *timer = nullptr; @@ -116,6 +116,12 @@ bool xmrig::Miner::isEnabled() const } +const std::vector &xmrig::Miner::backends() const +{ + return d_ptr->backends; +} + + xmrig::Job xmrig::Miner::job() const { uv_rwlock_rdlock(&d_ptr->rwlock); @@ -137,9 +143,26 @@ void xmrig::Miner::pause() void xmrig::Miner::printHashrate(bool details) { + char num[8 * 4] = { 0 }; + double speed[3] = { 0.0 }; + for (IBackend *backend : d_ptr->backends) { + const Hashrate *hashrate = backend->hashrate(); + if (hashrate) { + speed[0] += hashrate->calc(Hashrate::ShortInterval); + speed[1] += hashrate->calc(Hashrate::MediumInterval); + speed[2] += hashrate->calc(Hashrate::LargeInterval); + } + backend->printHashrate(details); } + + LOG_INFO(WHITE_BOLD("speed") " 10s/60s/15m " CYAN_BOLD("%s") CYAN(" %s %s ") CYAN_BOLD("H/s") " max " CYAN_BOLD("%s H/s"), + Hashrate::format(speed[0], num, sizeof(num) / 4), + Hashrate::format(speed[1], num + 8, sizeof(num) / 4), + Hashrate::format(speed[2], num + 8 * 2, sizeof(num) / 4 ), + Hashrate::format(d_ptr->maxHashrate, num + 8 * 3, sizeof(num) / 4) + ); } @@ -203,10 +226,18 @@ void xmrig::Miner::stop() void xmrig::Miner::onTimer(const Timer *) { + double maxHashrate = 0.0; + for (IBackend *backend : d_ptr->backends) { backend->tick(d_ptr->ticks); + + if (backend->hashrate()) { + maxHashrate += backend->hashrate()->calc(Hashrate::ShortInterval); + } } + d_ptr->maxHashrate = std::max(d_ptr->maxHashrate, maxHashrate); + if ((d_ptr->ticks % (d_ptr->controller->config()->printTime() * 2)) == 0) { printHashrate(false); } diff --git a/src/core/Miner.h b/src/core/Miner.h index e7904575..f32524a7 100644 --- a/src/core/Miner.h +++ b/src/core/Miner.h @@ -26,6 +26,9 @@ #define XMRIG_MINER_H +#include + + #include "base/kernel/interfaces/ITimerListener.h" @@ -35,6 +38,7 @@ namespace xmrig { class Controller; class Job; class MinerPrivate; +class IBackend; class Miner : public ITimerListener @@ -44,6 +48,7 @@ public: ~Miner() override; bool isEnabled() const; + const std::vector &backends() const; Job job() const; void pause(); void printHashrate(bool details); diff --git a/src/workers/WorkersLegacy.cpp b/src/workers/WorkersLegacy.cpp index 29571608..0db0a3cf 100644 --- a/src/workers/WorkersLegacy.cpp +++ b/src/workers/WorkersLegacy.cpp @@ -40,13 +40,13 @@ #include "crypto/rx/RxDataset.h" #include "Mem.h" #include "rapidjson/document.h" -#include "workers/Hashrate.h" +//#include "workers/Hashrate.h" #include "workers/WorkersLegacy.h" bool WorkersLegacy::m_active = false; bool WorkersLegacy::m_enabled = true; -Hashrate *WorkersLegacy::m_hashrate = nullptr; +//Hashrate *WorkersLegacy::m_hashrate = nullptr; xmrig::Job WorkersLegacy::m_job; WorkersLegacy::LaunchStatus WorkersLegacy::m_status; std::vector* > WorkersLegacy::m_workers; @@ -96,38 +96,6 @@ size_t WorkersLegacy::threads() //} -//void Workers::printHashrate(bool detail) -//{ -// assert(m_controller != nullptr); -// if (!m_controller) { -// return; -// } - -// if (detail) { -// char num1[8] = { 0 }; -// char num2[8] = { 0 }; -// char num3[8] = { 0 }; - -// xmrig::Log::print(WHITE_BOLD_S "| THREAD | AFFINITY | 10s H/s | 60s H/s | 15m H/s |"); - -// size_t i = 0; -// for (const xmrig::IThread *thread : m_controller->config()->threads()) { -// xmrig::Log::print("| %6zu | %8" PRId64 " | %7s | %7s | %7s |", -// thread->index(), -// thread->affinity(), -// Hashrate::format(m_hashrate->calc(thread->index(), Hashrate::ShortInterval), num1, sizeof num1), -// Hashrate::format(m_hashrate->calc(thread->index(), Hashrate::MediumInterval), num2, sizeof num2), -// Hashrate::format(m_hashrate->calc(thread->index(), Hashrate::LargeInterval), num3, sizeof num3) -// ); - -// i++; -// } -// } - -// m_hashrate->print(); -//} - - //void Workers::setEnabled(bool enabled) //{ // if (m_enabled == enabled) { @@ -186,7 +154,7 @@ void WorkersLegacy::start(xmrig::Controller *controller) m_status.ways += thread.intensity(); } - m_hashrate = new Hashrate(threads.size(), controller); +// m_hashrate = new Hashrate(threads.size(), controller); uv_mutex_init(&m_mutex); uv_rwlock_init(&m_rwlock); @@ -238,66 +206,24 @@ void WorkersLegacy::threadsSummary(rapidjson::Document &doc) #endif -//void WorkersLegacy::onReady(void *arg) +//void WorkersLegacy::onTick(uv_timer_t *) //{ // using namespace xmrig; -// auto handle = static_cast* >(arg); +// for (Thread *handle : m_workers) { +// if (!handle->worker()) { +// return; +// } -// xmrig::IWorker *worker = nullptr; - -// switch (handle->config().intensity) { -// case 1: -// worker = new CpuWorker<1>(handle->index(), handle->config()); -// break; - -// case 2: -// worker = new CpuWorker<2>(handle->index(), handle->config()); -// break; - -// case 3: -// worker = new CpuWorker<3>(handle->index(), handle->config()); -// break; - -// case 4: -// worker = new CpuWorker<4>(handle->index(), handle->config()); -// break; - -// case 5: -// worker = new CpuWorker<5>(handle->index(), handle->config()); -// break; +// m_hashrate->add(handle->index(), handle->worker()->hashCount(), handle->worker()->timestamp()); // } -// handle->setWorker(worker); - -// if (!worker->selfTest()) { -// LOG_ERR("thread %zu error: \"hash self-test failed\".", handle->worker()->id()); - -// return; +// if ((m_ticks++ & 0xF) == 0) { +// m_hashrate->updateHighest(); // } - -// start(worker); //} -void WorkersLegacy::onTick(uv_timer_t *) -{ - using namespace xmrig; - - for (Thread *handle : m_workers) { - if (!handle->worker()) { - return; - } - - m_hashrate->add(handle->index(), handle->worker()->hashCount(), handle->worker()->timestamp()); - } - - if ((m_ticks++ & 0xF) == 0) { - m_hashrate->updateHighest(); - } -} - - void WorkersLegacy::start(xmrig::IWorker *worker) { // const Worker *w = static_cast(worker); diff --git a/src/workers/WorkersLegacy.h b/src/workers/WorkersLegacy.h index 5ee53dbf..be9e417a 100644 --- a/src/workers/WorkersLegacy.h +++ b/src/workers/WorkersLegacy.h @@ -42,7 +42,7 @@ #include "rapidjson/fwd.h" -class Hashrate; +//class Hashrate; namespace xmrig { @@ -66,7 +66,7 @@ public: // static xmrig::Job job(); // static inline bool isEnabled() { return m_enabled; } - static inline Hashrate *hashrate() { return m_hashrate; } +// static inline Hashrate *hashrate() { return m_hashrate; } # ifdef XMRIG_FEATURE_API static void threadsSummary(rapidjson::Document &doc); @@ -74,7 +74,7 @@ public: private: // static void onReady(void *arg); - static void onTick(uv_timer_t *handle); +// static void onTick(uv_timer_t *handle); static void start(xmrig::IWorker *worker); class LaunchStatus @@ -98,7 +98,7 @@ private: static bool m_active; static bool m_enabled; - static Hashrate *m_hashrate; +// static Hashrate *m_hashrate; static xmrig::Job m_job; static LaunchStatus m_status; static std::vector* > m_workers; From 20313cbc569442f1a7440607c7885ade2f207241 Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 17 Jul 2019 04:33:11 +0700 Subject: [PATCH 028/172] Implemented unified cryptonight and RandomX scratchpad memory. --- CMakeLists.txt | 2 + src/Mem.cpp | 51 ------------------ src/Mem.h | 2 - src/api/v1/ApiRouter.cpp | 2 +- src/backend/common/Worker.h | 7 +-- src/backend/common/interfaces/IWorker.h | 14 +++-- src/backend/cpu/CpuBackend.cpp | 10 ++-- src/backend/cpu/CpuWorker.cpp | 48 ++++++++++++----- src/backend/cpu/CpuWorker.h | 7 +-- src/crypto/cn/CnCtx.cpp | 60 +++++++++++++++++++++ src/crypto/cn/CnCtx.h | 52 ++++++++++++++++++ src/crypto/common/VirtualMemory.h | 20 +++++++ src/crypto/common/VirtualMemory_unix.cpp | 42 +++++++++++++++ src/crypto/common/VirtualMemory_win.cpp | 31 +++++++++++ src/crypto/randomx/randomx.cpp | 44 +++------------ src/crypto/randomx/randomx.h | 2 +- src/crypto/randomx/virtual_machine.cpp | 40 ++++++-------- src/crypto/randomx/virtual_machine.hpp | 19 ++++--- src/crypto/randomx/vm_compiled.cpp | 22 ++++---- src/crypto/randomx/vm_compiled.hpp | 28 +++++----- src/crypto/randomx/vm_compiled_light.cpp | 20 ++++--- src/crypto/randomx/vm_compiled_light.hpp | 27 +++++----- src/crypto/randomx/vm_interpreted.cpp | 30 +++++------ src/crypto/randomx/vm_interpreted.hpp | 31 ++++++----- src/crypto/randomx/vm_interpreted_light.cpp | 14 +++-- src/crypto/randomx/vm_interpreted_light.hpp | 18 ++++--- src/crypto/rx/RxVm.cpp | 18 +------ src/crypto/rx/RxVm.h | 2 +- src/workers/WorkersLegacy.cpp | 58 ++++++++++---------- src/workers/WorkersLegacy.h | 10 ++-- 30 files changed, 434 insertions(+), 297 deletions(-) create mode 100644 src/crypto/cn/CnCtx.cpp create mode 100644 src/crypto/cn/CnCtx.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 543836c8..f8a56c48 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -54,6 +54,7 @@ set(HEADERS_CRYPTO src/crypto/cn/c_jh.h src/crypto/cn/c_skein.h src/crypto/cn/CnAlgo.h + src/crypto/cn/CnCtx.h src/crypto/cn/CnHash.h src/crypto/cn/CryptoNight_monero.h src/crypto/cn/CryptoNight_test.h @@ -100,6 +101,7 @@ set(SOURCES_CRYPTO src/crypto/cn/c_groestl.c src/crypto/cn/c_jh.c src/crypto/cn/c_skein.c + src/crypto/cn/CnCtx.cpp src/crypto/cn/CnHash.cpp src/crypto/common/Algorithm.cpp src/crypto/common/keccak.cpp diff --git a/src/Mem.cpp b/src/Mem.cpp index e8eabe3b..5fcea306 100644 --- a/src/Mem.cpp +++ b/src/Mem.cpp @@ -24,59 +24,8 @@ */ -#include - - -#include "crypto/cn/CryptoNight.h" -#include "crypto/common/portable/mm_malloc.h" -#include "crypto/common/VirtualMemory.h" #include "Mem.h" bool Mem::m_enabled = true; int Mem::m_flags = 0; - - -MemInfo Mem::create(cryptonight_ctx **ctx, const xmrig::Algorithm &algorithm, size_t count) -{ - using namespace xmrig; - - constexpr CnAlgo props; - - MemInfo info; - info.size = props.memory(algorithm.id()) * count; - - constexpr const size_t align_size = 2 * 1024 * 1024; - info.size = ((info.size + align_size - 1) / align_size) * align_size; - info.pages = info.size / align_size; - - allocate(info, m_enabled); - - for (size_t i = 0; i < count; ++i) { - cryptonight_ctx *c = static_cast(_mm_malloc(sizeof(cryptonight_ctx), 4096)); - c->memory = info.memory + (i * props.memory(algorithm.id())); - - c->generated_code = reinterpret_cast(VirtualMemory::allocateExecutableMemory(0x4000)); - c->generated_code_data.algo = Algorithm::INVALID; - c->generated_code_data.height = std::numeric_limits::max(); - - ctx[i] = c; - } - - return info; -} - - -void Mem::release(cryptonight_ctx **ctx, size_t count, MemInfo &info) -{ - if (info.memory == nullptr) { - return; - } - - release(info); - - for (size_t i = 0; i < count; ++i) { - _mm_free(ctx[i]); - } -} - diff --git a/src/Mem.h b/src/Mem.h index 5c60d281..8e5c418b 100644 --- a/src/Mem.h +++ b/src/Mem.h @@ -56,9 +56,7 @@ public: Lock = 4 }; - static MemInfo create(cryptonight_ctx **ctx, const xmrig::Algorithm &algorithm, size_t count); static void init(bool enabled); - static void release(cryptonight_ctx **ctx, size_t count, MemInfo &info); static inline bool isHugepagesAvailable() { return (m_flags & HugepagesAvailable) != 0; } diff --git a/src/api/v1/ApiRouter.cpp b/src/api/v1/ApiRouter.cpp index 48a92c93..2e6a815c 100644 --- a/src/api/v1/ApiRouter.cpp +++ b/src/api/v1/ApiRouter.cpp @@ -144,7 +144,7 @@ void xmrig::ApiRouter::getMiner(rapidjson::Value &reply, rapidjson::Document &do reply.AddMember("kind", APP_KIND, allocator); reply.AddMember("ua", StringRef(Platform::userAgent()), allocator); reply.AddMember("cpu", cpu, allocator); - reply.AddMember("hugepages", WorkersLegacy::hugePages() > 0, allocator); + reply.AddMember("hugepages", false, allocator); // FIXME hugepages reply.AddMember("donate_level", m_base->config()->pools().donateLevel(), allocator); } diff --git a/src/backend/common/Worker.h b/src/backend/common/Worker.h index 3223a60c..faebf128 100644 --- a/src/backend/common/Worker.h +++ b/src/backend/common/Worker.h @@ -42,9 +42,10 @@ class Worker : public IWorker public: Worker(size_t id, int64_t affinity, int priority); - inline size_t id() const override { return m_id; } - inline uint64_t hashCount() const override { return m_hashCount.load(std::memory_order_relaxed); } - inline uint64_t timestamp() const override { return m_timestamp.load(std::memory_order_relaxed); } + inline const VirtualMemory *memory() const override { return nullptr; } + inline size_t id() const override { return m_id; } + inline uint64_t hashCount() const override { return m_hashCount.load(std::memory_order_relaxed); } + inline uint64_t timestamp() const override { return m_timestamp.load(std::memory_order_relaxed); } protected: void storeStats(); diff --git a/src/backend/common/interfaces/IWorker.h b/src/backend/common/interfaces/IWorker.h index de22de02..5c99680b 100644 --- a/src/backend/common/interfaces/IWorker.h +++ b/src/backend/common/interfaces/IWorker.h @@ -32,16 +32,20 @@ namespace xmrig { +class VirtualMemory; + + class IWorker { public: virtual ~IWorker() = default; - virtual bool selfTest() = 0; - virtual size_t id() const = 0; - virtual uint64_t hashCount() const = 0; - virtual uint64_t timestamp() const = 0; - virtual void start() = 0; + virtual bool selfTest() = 0; + virtual const VirtualMemory *memory() const = 0; + virtual size_t id() const = 0; + virtual uint64_t hashCount() const = 0; + virtual uint64_t timestamp() const = 0; + virtual void start() = 0; }; diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp index c8d38ca6..a0463832 100644 --- a/src/backend/cpu/CpuBackend.cpp +++ b/src/backend/cpu/CpuBackend.cpp @@ -136,14 +136,10 @@ void xmrig::CpuBackend::printHashrate(bool details) void xmrig::CpuBackend::setJob(const Job &job) { - LOG_WARN("PROFILE %s %zu", d_ptr->controller->config()->cpu().threads().profileName(job.algorithm()).data(), job.algorithm().memory()); - if (d_ptr->isReady(job.algorithm())) { return; } - LOG_INFO(GREEN_BOLD_S "INIT"); - const CpuConfig &cpu = d_ptr->controller->config()->cpu(); const Threads &threads = cpu.threads(); @@ -151,7 +147,11 @@ void xmrig::CpuBackend::setJob(const Job &job) d_ptr->profileName = threads.profileName(job.algorithm()); d_ptr->threads = threads.get(d_ptr->profileName); - LOG_INFO(BLUE_BG_S " %zu ", d_ptr->threads.size()); + LOG_INFO(GREEN_BOLD("CPU") " use profile " BLUE_BG(WHITE_BOLD_S " %s ") WHITE_BOLD_S " (" CYAN_BOLD("%zu") WHITE_BOLD(" threads)") " scratchpad " CYAN_BOLD("%zu KB"), + d_ptr->profileName.data(), + d_ptr->threads.size(), + d_ptr->algo.memory() / 1024 + ); d_ptr->workers.stop(); diff --git a/src/backend/cpu/CpuWorker.cpp b/src/backend/cpu/CpuWorker.cpp index 96466252..356dfb1b 100644 --- a/src/backend/cpu/CpuWorker.cpp +++ b/src/backend/cpu/CpuWorker.cpp @@ -29,8 +29,10 @@ #include "backend/cpu/CpuWorker.h" #include "core/Miner.h" +#include "crypto/cn/CnCtx.h" #include "crypto/cn/CryptoNight_test.h" #include "crypto/common/Nonce.h" +#include "crypto/common/VirtualMemory.h" #include "crypto/rx/Rx.h" #include "crypto/rx/RxVm.h" #include "net/JobResults.h" @@ -56,18 +58,18 @@ xmrig::CpuWorker::CpuWorker(size_t index, const CpuLaunchData &data) : m_assembly(data.assembly), m_hwAES(data.hwAES), m_av(data.av()), - m_miner(data.miner) + m_miner(data.miner), + m_ctx() { - if (m_algorithm.family() != Algorithm::RANDOM_X) { - m_memory = Mem::create(m_ctx, m_algorithm, N); - } + m_memory = new VirtualMemory(m_algorithm.memory() * N, data.hugePages); } template xmrig::CpuWorker::~CpuWorker() { - Mem::release(m_ctx, N, m_memory); + CnCtx::release(m_ctx, N); + delete m_memory; # ifdef XMRIG_ALGO_RANDOMX delete m_vm; @@ -81,7 +83,7 @@ void xmrig::CpuWorker::allocateRandomX_VM() { if (!m_vm) { RxDataset *dataset = Rx::dataset(m_job.currentJob().seedHash(), m_job.currentJob().algorithm()); - m_vm = new RxVm(dataset, true, !m_hwAES); + m_vm = new RxVm(dataset, m_memory->scratchpad(), !m_hwAES); } } #endif @@ -90,6 +92,14 @@ void xmrig::CpuWorker::allocateRandomX_VM() template bool xmrig::CpuWorker::selfTest() { +# ifdef XMRIG_ALGO_RANDOMX + if (m_algorithm.family() == Algorithm::RANDOM_X) { + return true; + } +# endif + + allocateCnCtx(); + if (m_algorithm.family() == Algorithm::CN) { const bool rc = verify(Algorithm::CN_0, test_output_v0) && verify(Algorithm::CN_1, test_output_v1) && @@ -136,12 +146,6 @@ bool xmrig::CpuWorker::selfTest() } # endif -# ifdef XMRIG_ALGO_RANDOMX - if (m_algorithm.family() == Algorithm::RANDOM_X) { - return true; - } -# endif - return false; } @@ -172,7 +176,6 @@ void xmrig::CpuWorker::start() # ifdef XMRIG_ALGO_RANDOMX if (job.algorithm().family() == Algorithm::RANDOM_X) { - allocateRandomX_VM(); randomx_calculate_hash(m_vm->get(), m_job.blob(), job.size(), m_hash); } else @@ -262,10 +265,29 @@ bool CpuWorker<1>::verify2(const Algorithm &algorithm, const uint8_t *referenceV } // namespace xmrig +template +void xmrig::CpuWorker::allocateCnCtx() +{ + if (m_ctx[0] == nullptr) { + CnCtx::create(m_ctx, m_memory->scratchpad(), m_memory->size(), N); + } +} + + template void xmrig::CpuWorker::consumeJob() { m_job.add(m_miner->job(), Nonce::sequence(Nonce::CPU), kReserveCount); + +# ifdef XMRIG_ALGO_RANDOMX + if (m_job.currentJob().algorithm().family() == Algorithm::RANDOM_X) { + allocateRandomX_VM(); + } + else +# endif + { + allocateCnCtx(); + } } diff --git a/src/backend/cpu/CpuWorker.h b/src/backend/cpu/CpuWorker.h index c67d355b..c0f9dfaf 100644 --- a/src/backend/cpu/CpuWorker.h +++ b/src/backend/cpu/CpuWorker.h @@ -48,12 +48,12 @@ public: CpuWorker(size_t index, const CpuLaunchData &data); ~CpuWorker() override; - inline const MemInfo &memory() const { return m_memory; } - protected: bool selfTest() override; void start() override; + inline const VirtualMemory *memory() const override { return m_memory; } + private: inline cn_hash_fun fn(const Algorithm &algorithm) const { return CnHash::fn(algorithm, m_av, m_assembly); } @@ -63,6 +63,7 @@ private: bool verify(const Algorithm &algorithm, const uint8_t *referenceValue); bool verify2(const Algorithm &algorithm, const uint8_t *referenceValue); + void allocateCnCtx(); void consumeJob(); const Algorithm m_algorithm; @@ -71,8 +72,8 @@ private: const CnHash::AlgoVariant m_av; const Miner *m_miner; cryptonight_ctx *m_ctx[N]; - MemInfo m_memory; uint8_t m_hash[N * 32]; + VirtualMemory *m_memory = nullptr; WorkerJob m_job; # ifdef XMRIG_ALGO_RANDOMX diff --git a/src/crypto/cn/CnCtx.cpp b/src/crypto/cn/CnCtx.cpp new file mode 100644 index 00000000..5d41bca0 --- /dev/null +++ b/src/crypto/cn/CnCtx.cpp @@ -0,0 +1,60 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2019 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include + + +#include "crypto/cn/CnCtx.h" +#include "crypto/cn/CryptoNight.h" +#include "crypto/common/Algorithm.h" +#include "crypto/common/portable/mm_malloc.h" +#include "crypto/common/VirtualMemory.h" + + +void xmrig::CnCtx::create(cryptonight_ctx **ctx, uint8_t *memory, size_t size, size_t count) +{ + for (size_t i = 0; i < count; ++i) { + cryptonight_ctx *c = static_cast(_mm_malloc(sizeof(cryptonight_ctx), 4096)); + c->memory = memory + (i * size); + + c->generated_code = reinterpret_cast(VirtualMemory::allocateExecutableMemory(0x4000)); + c->generated_code_data.algo = Algorithm::INVALID; + c->generated_code_data.height = std::numeric_limits::max(); + + ctx[i] = c; + } +} + + +void xmrig::CnCtx::release(cryptonight_ctx **ctx, size_t count) +{ + if (ctx[0] == nullptr) { + return; + } + + for (size_t i = 0; i < count; ++i) { + _mm_free(ctx[i]); + } +} diff --git a/src/crypto/cn/CnCtx.h b/src/crypto/cn/CnCtx.h new file mode 100644 index 00000000..7b0adbec --- /dev/null +++ b/src/crypto/cn/CnCtx.h @@ -0,0 +1,52 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2019 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_CN_CTX_H +#define XMRIG_CN_CTX_H + + +#include +#include + + +struct cryptonight_ctx; + + +namespace xmrig +{ + + +class CnCtx +{ +public: + static void create(cryptonight_ctx **ctx, uint8_t *memory, size_t size, size_t count); + static void release(cryptonight_ctx **ctx, size_t count); +}; + + +} /* namespace xmrig */ + + +#endif /* XMRIG_CN_CTX_H */ diff --git a/src/crypto/common/VirtualMemory.h b/src/crypto/common/VirtualMemory.h index a83c35ed..98212e40 100644 --- a/src/crypto/common/VirtualMemory.h +++ b/src/crypto/common/VirtualMemory.h @@ -38,6 +38,15 @@ namespace xmrig { class VirtualMemory { public: + inline VirtualMemory() {} + VirtualMemory(size_t size, bool hugePages = true, size_t align = 64); + ~VirtualMemory(); + + inline bool isHugePages() const { return m_flags & HUGEPAGES; } + inline size_t hugePages() const { return isHugePages() ? (align(size()) / 2097152) : 0; } + inline size_t size() const { return m_size; } + inline uint8_t *scratchpad() const { return m_scratchpad; } + static void *allocateExecutableMemory(size_t size); static void *allocateLargePagesMemory(size_t size); static void flushInstructionCache(void *p, size_t size); @@ -46,6 +55,17 @@ public: static void unprotectExecutableMemory(void *p, size_t size); static inline constexpr size_t align(size_t pos, size_t align = 2097152) { return ((pos - 1) / align + 1) * align; } + +private: + enum Flags { + HUGEPAGES_AVAILABLE = 1, + HUGEPAGES = 2, + LOCK = 4 + }; + + int m_flags = 0; + size_t m_size = 0; + uint8_t *m_scratchpad = nullptr; }; diff --git a/src/crypto/common/VirtualMemory_unix.cpp b/src/crypto/common/VirtualMemory_unix.cpp index beac976d..665fc02b 100644 --- a/src/crypto/common/VirtualMemory_unix.cpp +++ b/src/crypto/common/VirtualMemory_unix.cpp @@ -29,6 +29,7 @@ #include +#include "crypto/common/portable/mm_malloc.h" #include "crypto/common/VirtualMemory.h" @@ -37,6 +38,47 @@ #endif +xmrig::VirtualMemory::VirtualMemory(size_t size, bool hugePages, size_t align) : + m_size(VirtualMemory::align(size)) +{ + if (hugePages) { + m_scratchpad = static_cast(allocateLargePagesMemory(m_size)); + if (m_scratchpad) { + m_flags |= HUGEPAGES; + + madvise(m_scratchpad, size, MADV_RANDOM | MADV_WILLNEED); + + if (mlock(m_scratchpad, m_size) == 0) { + m_flags |= LOCK; + } + + return; + } + } + + m_scratchpad = static_cast(_mm_malloc(m_size, align)); +} + + +xmrig::VirtualMemory::~VirtualMemory() +{ + if (!m_scratchpad) { + return; + } + + if (isHugePages()) { + if (m_flags & LOCK) { + munlock(m_scratchpad, m_size); + } + + freeLargePagesMemory(m_scratchpad, m_size); + } + else { + _mm_free(m_scratchpad); + } +} + + void *xmrig::VirtualMemory::allocateExecutableMemory(size_t size) { diff --git a/src/crypto/common/VirtualMemory_win.cpp b/src/crypto/common/VirtualMemory_win.cpp index 7f1d6f43..7aa98f89 100644 --- a/src/crypto/common/VirtualMemory_win.cpp +++ b/src/crypto/common/VirtualMemory_win.cpp @@ -32,6 +32,37 @@ #include "crypto/common/VirtualMemory.h" +xmrig::VirtualMemory::VirtualMemory(size_t size, bool hugePages, size_t align) : + m_size(VirtualMemory::align(size)) +{ + if (hugePages) { + m_scratchpad = static_cast(allocateLargePagesMemory(m_size)); + if (m_scratchpad) { + m_flags |= HUGEPAGES; + + return; + } + } + + m_scratchpad = static_cast(_mm_malloc(m_size, align)); +} + + +xmrig::VirtualMemory::~VirtualMemory() +{ + if (!m_scratchpad) { + return; + } + + if (isHugePages()) { + freeLargePagesMemory(m_scratchpad, m_size); + } + else { + _mm_free(m_scratchpad); + } +} + + void *xmrig::VirtualMemory::allocateExecutableMemory(size_t size) { return VirtualAlloc(nullptr, size, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE); diff --git a/src/crypto/randomx/randomx.cpp b/src/crypto/randomx/randomx.cpp index dde838b9..df5fc9cb 100644 --- a/src/crypto/randomx/randomx.cpp +++ b/src/crypto/randomx/randomx.cpp @@ -345,7 +345,7 @@ extern "C" { delete dataset; } - randomx_vm *randomx_create_vm(randomx_flags flags, randomx_cache *cache, randomx_dataset *dataset) { + randomx_vm *randomx_create_vm(randomx_flags flags, randomx_cache *cache, randomx_dataset *dataset, uint8_t *scratchpad) { assert(cache != nullptr || (flags & RANDOMX_FLAG_FULL_MEM)); assert(cache == nullptr || cache->isInitialized()); assert(dataset != nullptr || !(flags & RANDOMX_FLAG_FULL_MEM)); @@ -353,7 +353,7 @@ extern "C" { randomx_vm *vm = nullptr; try { - switch (flags & (RANDOMX_FLAG_FULL_MEM | RANDOMX_FLAG_JIT | RANDOMX_FLAG_HARD_AES | RANDOMX_FLAG_LARGE_PAGES)) { + switch (flags & (RANDOMX_FLAG_FULL_MEM | RANDOMX_FLAG_JIT | RANDOMX_FLAG_HARD_AES)) { case RANDOMX_FLAG_DEFAULT: vm = new randomx::InterpretedLightVmDefault(); break; @@ -386,49 +386,19 @@ extern "C" { vm = new randomx::CompiledVmHardAes(); break; - case RANDOMX_FLAG_LARGE_PAGES: - vm = new randomx::InterpretedLightVmLargePage(); - break; - - case RANDOMX_FLAG_FULL_MEM | RANDOMX_FLAG_LARGE_PAGES: - vm = new randomx::InterpretedVmLargePage(); - break; - - case RANDOMX_FLAG_JIT | RANDOMX_FLAG_LARGE_PAGES: - vm = new randomx::CompiledLightVmLargePage(); - break; - - case RANDOMX_FLAG_FULL_MEM | RANDOMX_FLAG_JIT | RANDOMX_FLAG_LARGE_PAGES: - vm = new randomx::CompiledVmLargePage(); - break; - - case RANDOMX_FLAG_HARD_AES | RANDOMX_FLAG_LARGE_PAGES: - vm = new randomx::InterpretedLightVmLargePageHardAes(); - break; - - case RANDOMX_FLAG_FULL_MEM | RANDOMX_FLAG_HARD_AES | RANDOMX_FLAG_LARGE_PAGES: - vm = new randomx::InterpretedVmLargePageHardAes(); - break; - - case RANDOMX_FLAG_JIT | RANDOMX_FLAG_HARD_AES | RANDOMX_FLAG_LARGE_PAGES: - vm = new randomx::CompiledLightVmLargePageHardAes(); - break; - - case RANDOMX_FLAG_FULL_MEM | RANDOMX_FLAG_JIT | RANDOMX_FLAG_HARD_AES | RANDOMX_FLAG_LARGE_PAGES: - vm = new randomx::CompiledVmLargePageHardAes(); - break; - default: UNREACHABLE; } - if(cache != nullptr) + if (cache != nullptr) { vm->setCache(cache); + } - if(dataset != nullptr) + if (dataset != nullptr) { vm->setDataset(dataset); + } - vm->allocate(); + vm->setScratchpad(scratchpad); } catch (std::exception &ex) { delete vm; diff --git a/src/crypto/randomx/randomx.h b/src/crypto/randomx/randomx.h index cd07ac64..d688189f 100644 --- a/src/crypto/randomx/randomx.h +++ b/src/crypto/randomx/randomx.h @@ -286,7 +286,7 @@ RANDOMX_EXPORT void randomx_release_dataset(randomx_dataset *dataset); * (3) cache parameter is NULL and RANDOMX_FLAG_FULL_MEM is not set * (4) dataset parameter is NULL and RANDOMX_FLAG_FULL_MEM is set */ -RANDOMX_EXPORT randomx_vm *randomx_create_vm(randomx_flags flags, randomx_cache *cache, randomx_dataset *dataset); +RANDOMX_EXPORT randomx_vm *randomx_create_vm(randomx_flags flags, randomx_cache *cache, randomx_dataset *dataset, uint8_t *scratchpad); /** * Reinitializes a virtual machine with a new Cache. This function should be called anytime diff --git a/src/crypto/randomx/virtual_machine.cpp b/src/crypto/randomx/virtual_machine.cpp index caa1efbf..6560dc95 100644 --- a/src/crypto/randomx/virtual_machine.cpp +++ b/src/crypto/randomx/virtual_machine.cpp @@ -95,43 +95,35 @@ void randomx_vm::initialize() { namespace randomx { - alignas(16) volatile static rx_vec_i128 aesDummy; - - template - VmBase::~VmBase() { - Allocator::freeMemory(scratchpad, RANDOMX_SCRATCHPAD_L3_MAX_SIZE); + template + VmBase::~VmBase() { } - template - void VmBase::allocate() { - if (datasetPtr == nullptr) + template + void VmBase::setScratchpad(uint8_t *scratchpad) { + if (datasetPtr == nullptr) { throw std::invalid_argument("Cache/Dataset not set"); - if (!softAes) { //if hardware AES is not supported, it's better to fail now than to return a ticking bomb - rx_vec_i128 tmp = rx_load_vec_i128((const rx_vec_i128*)&aesDummy); - tmp = rx_aesenc_vec_i128(tmp, tmp); - rx_store_vec_i128((rx_vec_i128*)&aesDummy, tmp); } - scratchpad = (uint8_t*)Allocator::allocMemory(RANDOMX_SCRATCHPAD_L3_MAX_SIZE); + + this->scratchpad = scratchpad; } - template - void VmBase::getFinalResult(void* out, size_t outSize) { + template + void VmBase::getFinalResult(void* out, size_t outSize) { hashAes1Rx4(scratchpad, ScratchpadSize, ®.a); blake2b(out, outSize, ®, sizeof(RegisterFile), nullptr, 0); } - template - void VmBase::initScratchpad(void* seed) { + template + void VmBase::initScratchpad(void* seed) { fillAes1Rx4(seed, ScratchpadSize, scratchpad); } - template - void VmBase::generateProgram(void* seed) { + template + void VmBase::generateProgram(void* seed) { fillAes4Rx4(seed, sizeof(program), &program); } - template class VmBase, false>; - template class VmBase, true>; - template class VmBase; - template class VmBase; -} \ No newline at end of file + template class VmBase; + template class VmBase; +} diff --git a/src/crypto/randomx/virtual_machine.hpp b/src/crypto/randomx/virtual_machine.hpp index 488994df..cba79d72 100644 --- a/src/crypto/randomx/virtual_machine.hpp +++ b/src/crypto/randomx/virtual_machine.hpp @@ -33,26 +33,31 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "program.hpp" /* Global namespace for C binding */ -class randomx_vm { +class randomx_vm +{ public: virtual ~randomx_vm() = 0; - virtual void allocate() = 0; + virtual void setScratchpad(uint8_t *scratchpad) = 0; virtual void getFinalResult(void* out, size_t outSize) = 0; virtual void setDataset(randomx_dataset* dataset) { } virtual void setCache(randomx_cache* cache) { } virtual void initScratchpad(void* seed) = 0; virtual void run(void* seed) = 0; void resetRoundingMode(); + randomx::RegisterFile *getRegisterFile() { return ® } + const void* getScratchpad() { return scratchpad; } + const randomx::Program& getProgram() { return program; } + protected: void initialize(); alignas(64) randomx::Program program; @@ -69,15 +74,17 @@ protected: namespace randomx { - template - class VmBase : public randomx_vm { + template + class VmBase : public randomx_vm + { public: ~VmBase() override; - void allocate() override; + void setScratchpad(uint8_t *scratchpad) override; void initScratchpad(void* seed) override; void getFinalResult(void* out, size_t outSize) override; + protected: void generateProgram(void* seed); }; -} \ No newline at end of file +} diff --git a/src/crypto/randomx/vm_compiled.cpp b/src/crypto/randomx/vm_compiled.cpp index 7f621a33..4d14c793 100644 --- a/src/crypto/randomx/vm_compiled.cpp +++ b/src/crypto/randomx/vm_compiled.cpp @@ -34,27 +34,25 @@ namespace randomx { static_assert(sizeof(MemoryRegisters) == 2 * sizeof(addr_t) + sizeof(uintptr_t), "Invalid alignment of struct randomx::MemoryRegisters"); static_assert(sizeof(RegisterFile) == 256, "Invalid alignment of struct randomx::RegisterFile"); - template - void CompiledVm::setDataset(randomx_dataset* dataset) { + template + void CompiledVm::setDataset(randomx_dataset* dataset) { datasetPtr = dataset; } - template - void CompiledVm::run(void* seed) { - VmBase::generateProgram(seed); + template + void CompiledVm::run(void* seed) { + VmBase::generateProgram(seed); randomx_vm::initialize(); compiler.generateProgram(program, config); mem.memory = datasetPtr->memory + datasetOffset; execute(); } - template - void CompiledVm::execute() { + template + void CompiledVm::execute() { compiler.getProgramFunc()(reg, mem, scratchpad, RandomX_CurrentConfig.ProgramIterations); } - template class CompiledVm, false>; - template class CompiledVm, true>; - template class CompiledVm; - template class CompiledVm; -} \ No newline at end of file + template class CompiledVm; + template class CompiledVm; +} diff --git a/src/crypto/randomx/vm_compiled.hpp b/src/crypto/randomx/vm_compiled.hpp index 856f00d8..05b34b9c 100644 --- a/src/crypto/randomx/vm_compiled.hpp +++ b/src/crypto/randomx/vm_compiled.hpp @@ -37,8 +37,9 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. namespace randomx { - template - class CompiledVm : public VmBase { + template + class CompiledVm : public VmBase + { public: void* operator new(size_t size) { void* ptr = AlignedAllocator::allocMemory(size); @@ -46,27 +47,28 @@ namespace randomx { throw std::bad_alloc(); return ptr; } + void operator delete(void* ptr) { AlignedAllocator::freeMemory(ptr, sizeof(CompiledVm)); } + void setDataset(randomx_dataset* dataset) override; void run(void* seed) override; - using VmBase::mem; - using VmBase::program; - using VmBase::config; - using VmBase::reg; - using VmBase::scratchpad; - using VmBase::datasetPtr; - using VmBase::datasetOffset; + using VmBase::mem; + using VmBase::program; + using VmBase::config; + using VmBase::reg; + using VmBase::scratchpad; + using VmBase::datasetPtr; + using VmBase::datasetOffset; + protected: void execute(); JitCompiler compiler; }; - using CompiledVmDefault = CompiledVm, true>; - using CompiledVmHardAes = CompiledVm, false>; - using CompiledVmLargePage = CompiledVm; - using CompiledVmLargePageHardAes = CompiledVm; + using CompiledVmDefault = CompiledVm; + using CompiledVmHardAes = CompiledVm; } diff --git a/src/crypto/randomx/vm_compiled_light.cpp b/src/crypto/randomx/vm_compiled_light.cpp index c083f4aa..6009216b 100644 --- a/src/crypto/randomx/vm_compiled_light.cpp +++ b/src/crypto/randomx/vm_compiled_light.cpp @@ -32,23 +32,21 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. namespace randomx { - template - void CompiledLightVm::setCache(randomx_cache* cache) { + template + void CompiledLightVm::setCache(randomx_cache* cache) { cachePtr = cache; mem.memory = cache->memory; compiler.generateSuperscalarHash(cache->programs, cache->reciprocalCache); } - template - void CompiledLightVm::run(void* seed) { - VmBase::generateProgram(seed); + template + void CompiledLightVm::run(void* seed) { + VmBase::generateProgram(seed); randomx_vm::initialize(); compiler.generateProgramLight(program, config, datasetOffset); - CompiledVm::execute(); + CompiledVm::execute(); } - template class CompiledLightVm, false>; - template class CompiledLightVm, true>; - template class CompiledLightVm; - template class CompiledLightVm; -} \ No newline at end of file + template class CompiledLightVm; + template class CompiledLightVm; +} diff --git a/src/crypto/randomx/vm_compiled_light.hpp b/src/crypto/randomx/vm_compiled_light.hpp index 6af82bbe..6cd3cb20 100644 --- a/src/crypto/randomx/vm_compiled_light.hpp +++ b/src/crypto/randomx/vm_compiled_light.hpp @@ -33,8 +33,9 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. namespace randomx { - template - class CompiledLightVm : public CompiledVm { + template + class CompiledLightVm : public CompiledVm + { public: void* operator new(size_t size) { void* ptr = AlignedAllocator::allocMemory(size); @@ -42,23 +43,23 @@ namespace randomx { throw std::bad_alloc(); return ptr; } + void operator delete(void* ptr) { AlignedAllocator::freeMemory(ptr, sizeof(CompiledLightVm)); } + void setCache(randomx_cache* cache) override; void setDataset(randomx_dataset* dataset) override { } void run(void* seed) override; - using CompiledVm::mem; - using CompiledVm::compiler; - using CompiledVm::program; - using CompiledVm::config; - using CompiledVm::cachePtr; - using CompiledVm::datasetOffset; + using CompiledVm::mem; + using CompiledVm::compiler; + using CompiledVm::program; + using CompiledVm::config; + using CompiledVm::cachePtr; + using CompiledVm::datasetOffset; }; - using CompiledLightVmDefault = CompiledLightVm, true>; - using CompiledLightVmHardAes = CompiledLightVm, false>; - using CompiledLightVmLargePage = CompiledLightVm; - using CompiledLightVmLargePageHardAes = CompiledLightVm; -} \ No newline at end of file + using CompiledLightVmDefault = CompiledLightVm; + using CompiledLightVmHardAes = CompiledLightVm; +} diff --git a/src/crypto/randomx/vm_interpreted.cpp b/src/crypto/randomx/vm_interpreted.cpp index 236d3efe..f4c1e05c 100644 --- a/src/crypto/randomx/vm_interpreted.cpp +++ b/src/crypto/randomx/vm_interpreted.cpp @@ -33,21 +33,21 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. namespace randomx { - template - void InterpretedVm::setDataset(randomx_dataset* dataset) { + template + void InterpretedVm::setDataset(randomx_dataset* dataset) { datasetPtr = dataset; mem.memory = dataset->memory; } - template - void InterpretedVm::run(void* seed) { - VmBase::generateProgram(seed); + template + void InterpretedVm::run(void* seed) { + VmBase::generateProgram(seed); randomx_vm::initialize(); execute(); } - template - void InterpretedVm::execute() { + template + void InterpretedVm::execute() { NativeRegisterFile nreg; @@ -106,20 +106,18 @@ namespace randomx { rx_store_vec_f128(®.e[i].lo, nreg.e[i]); } - template - void InterpretedVm::datasetRead(uint64_t address, int_reg_t(&r)[RegistersCount]) { + template + void InterpretedVm::datasetRead(uint64_t address, int_reg_t(&r)[RegistersCount]) { uint64_t* datasetLine = (uint64_t*)(mem.memory + address); for (int i = 0; i < RegistersCount; ++i) r[i] ^= datasetLine[i]; } - template - void InterpretedVm::datasetPrefetch(uint64_t address) { + template + void InterpretedVm::datasetPrefetch(uint64_t address) { rx_prefetch_nta(mem.memory + address); } - template class InterpretedVm, false>; - template class InterpretedVm, true>; - template class InterpretedVm; - template class InterpretedVm; -} \ No newline at end of file + template class InterpretedVm; + template class InterpretedVm; +} diff --git a/src/crypto/randomx/vm_interpreted.hpp b/src/crypto/randomx/vm_interpreted.hpp index 99c88852..1dc9ab6d 100644 --- a/src/crypto/randomx/vm_interpreted.hpp +++ b/src/crypto/randomx/vm_interpreted.hpp @@ -38,38 +38,41 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. namespace randomx { - template - class InterpretedVm : public VmBase, public BytecodeMachine { + template + class InterpretedVm : public VmBase, public BytecodeMachine { public: - using VmBase::mem; - using VmBase::scratchpad; - using VmBase::program; - using VmBase::config; - using VmBase::reg; - using VmBase::datasetPtr; - using VmBase::datasetOffset; + using VmBase::mem; + using VmBase::scratchpad; + using VmBase::program; + using VmBase::config; + using VmBase::reg; + using VmBase::datasetPtr; + using VmBase::datasetOffset; + void* operator new(size_t size) { void* ptr = AlignedAllocator::allocMemory(size); if (ptr == nullptr) throw std::bad_alloc(); return ptr; } + void operator delete(void* ptr) { AlignedAllocator::freeMemory(ptr, sizeof(InterpretedVm)); } + void run(void* seed) override; void setDataset(randomx_dataset* dataset) override; + protected: virtual void datasetRead(uint64_t blockNumber, int_reg_t(&r)[RegistersCount]); virtual void datasetPrefetch(uint64_t blockNumber); + private: void execute(); InstructionByteCode bytecode[RANDOMX_PROGRAM_MAX_SIZE]; }; - using InterpretedVmDefault = InterpretedVm, true>; - using InterpretedVmHardAes = InterpretedVm, false>; - using InterpretedVmLargePage = InterpretedVm; - using InterpretedVmLargePageHardAes = InterpretedVm; -} \ No newline at end of file + using InterpretedVmDefault = InterpretedVm; + using InterpretedVmHardAes = InterpretedVm; +} diff --git a/src/crypto/randomx/vm_interpreted_light.cpp b/src/crypto/randomx/vm_interpreted_light.cpp index c54b32f6..9c97187b 100644 --- a/src/crypto/randomx/vm_interpreted_light.cpp +++ b/src/crypto/randomx/vm_interpreted_light.cpp @@ -31,14 +31,14 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. namespace randomx { - template - void InterpretedLightVm::setCache(randomx_cache* cache) { + template + void InterpretedLightVm::setCache(randomx_cache* cache) { cachePtr = cache; mem.memory = cache->memory; } - template - void InterpretedLightVm::datasetRead(uint64_t address, int_reg_t(&r)[8]) { + template + void InterpretedLightVm::datasetRead(uint64_t address, int_reg_t(&r)[8]) { uint32_t itemNumber = address / CacheLineSize; int_reg_t rl[8]; @@ -48,8 +48,6 @@ namespace randomx { r[q] ^= rl[q]; } - template class InterpretedLightVm, false>; - template class InterpretedLightVm, true>; - template class InterpretedLightVm; - template class InterpretedLightVm; + template class InterpretedLightVm; + template class InterpretedLightVm; } diff --git a/src/crypto/randomx/vm_interpreted_light.hpp b/src/crypto/randomx/vm_interpreted_light.hpp index 02d678f6..1a35c580 100644 --- a/src/crypto/randomx/vm_interpreted_light.hpp +++ b/src/crypto/randomx/vm_interpreted_light.hpp @@ -33,29 +33,31 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. namespace randomx { - template - class InterpretedLightVm : public InterpretedVm { + template + class InterpretedLightVm : public InterpretedVm { public: - using VmBase::mem; - using VmBase::cachePtr; + using VmBase::mem; + using VmBase::cachePtr; + void* operator new(size_t size) { void* ptr = AlignedAllocator::allocMemory(size); if (ptr == nullptr) throw std::bad_alloc(); return ptr; } + void operator delete(void* ptr) { AlignedAllocator::freeMemory(ptr, sizeof(InterpretedLightVm)); } + void setDataset(randomx_dataset* dataset) override { } void setCache(randomx_cache* cache) override; + protected: void datasetRead(uint64_t address, int_reg_t(&r)[8]) override; void datasetPrefetch(uint64_t address) override { } }; - using InterpretedLightVmDefault = InterpretedLightVm, true>; - using InterpretedLightVmHardAes = InterpretedLightVm, false>; - using InterpretedLightVmLargePage = InterpretedLightVm; - using InterpretedLightVmLargePageHardAes = InterpretedLightVm; + using InterpretedLightVmDefault = InterpretedLightVm; + using InterpretedLightVmHardAes = InterpretedLightVm; } diff --git a/src/crypto/rx/RxVm.cpp b/src/crypto/rx/RxVm.cpp index b02f708e..6426443a 100644 --- a/src/crypto/rx/RxVm.cpp +++ b/src/crypto/rx/RxVm.cpp @@ -31,12 +31,8 @@ #include "crypto/rx/RxVm.h" -xmrig::RxVm::RxVm(RxDataset *dataset, bool hugePages, bool softAes) +xmrig::RxVm::RxVm(RxDataset *dataset, uint8_t *scratchpad, bool softAes) { - if (hugePages) { - m_flags |= RANDOMX_FLAG_LARGE_PAGES; - } - if (!softAes) { m_flags |= RANDOMX_FLAG_HARD_AES; } @@ -49,17 +45,7 @@ xmrig::RxVm::RxVm(RxDataset *dataset, bool hugePages, bool softAes) m_flags |= RANDOMX_FLAG_JIT; } - m_vm = randomx_create_vm(static_cast(m_flags), dataset->cache()->get(), dataset->get()); - - if (!m_vm) { - m_flags &= ~RANDOMX_FLAG_LARGE_PAGES; - m_vm = randomx_create_vm(static_cast(m_flags), dataset->cache()->get(), dataset->get()); - } - - if (!m_vm) { - m_flags &= ~RANDOMX_FLAG_HARD_AES; - m_vm = randomx_create_vm(static_cast(m_flags), dataset->cache()->get(), dataset->get()); - } + m_vm = randomx_create_vm(static_cast(m_flags), dataset->cache()->get(), dataset->get(), scratchpad); } diff --git a/src/crypto/rx/RxVm.h b/src/crypto/rx/RxVm.h index 90af8187..d7e617e4 100644 --- a/src/crypto/rx/RxVm.h +++ b/src/crypto/rx/RxVm.h @@ -44,7 +44,7 @@ class RxDataset; class RxVm { public: - RxVm(RxDataset *dataset, bool hugePages, bool softAes); + RxVm(RxDataset *dataset, uint8_t *scratchpad, bool softAes); ~RxVm(); inline randomx_vm *get() const { return m_vm; } diff --git a/src/workers/WorkersLegacy.cpp b/src/workers/WorkersLegacy.cpp index 0db0a3cf..4d6f9de9 100644 --- a/src/workers/WorkersLegacy.cpp +++ b/src/workers/WorkersLegacy.cpp @@ -67,24 +67,24 @@ xmrig::Controller *WorkersLegacy::m_controller = nullptr; //} -size_t WorkersLegacy::hugePages() -{ - uv_mutex_lock(&m_mutex); - const size_t hugePages = m_status.hugePages; - uv_mutex_unlock(&m_mutex); +//size_t WorkersLegacy::hugePages() +//{ +// uv_mutex_lock(&m_mutex); +// const size_t hugePages = m_status.hugePages; +// uv_mutex_unlock(&m_mutex); - return hugePages; -} +// return hugePages; +//} -size_t WorkersLegacy::threads() -{ - uv_mutex_lock(&m_mutex); - const size_t threads = m_status.threads; - uv_mutex_unlock(&m_mutex); +//size_t WorkersLegacy::threads() +//{ +// uv_mutex_lock(&m_mutex); +// const size_t threads = m_status.threads; +// uv_mutex_unlock(&m_mutex); - return threads; -} +// return threads; +//} //void Workers::pause() @@ -186,24 +186,24 @@ void WorkersLegacy::start(xmrig::Controller *controller) //} -#ifdef XMRIG_FEATURE_API -void WorkersLegacy::threadsSummary(rapidjson::Document &doc) -{ - uv_mutex_lock(&m_mutex); - const uint64_t pages[2] = { m_status.hugePages, m_status.pages }; - const uint64_t memory = m_status.ways * xmrig::CnAlgo<>::memory(m_status.algo); - uv_mutex_unlock(&m_mutex); +//#ifdef XMRIG_FEATURE_API +//void WorkersLegacy::threadsSummary(rapidjson::Document &doc) +//{ +// uv_mutex_lock(&m_mutex); +// const uint64_t pages[2] = { m_status.hugePages, m_status.pages }; +// const uint64_t memory = m_status.ways * xmrig::CnAlgo<>::memory(m_status.algo); +// uv_mutex_unlock(&m_mutex); - auto &allocator = doc.GetAllocator(); +// auto &allocator = doc.GetAllocator(); - rapidjson::Value hugepages(rapidjson::kArrayType); - hugepages.PushBack(pages[0], allocator); - hugepages.PushBack(pages[1], allocator); +// rapidjson::Value hugepages(rapidjson::kArrayType); +// hugepages.PushBack(pages[0], allocator); +// hugepages.PushBack(pages[1], allocator); - doc.AddMember("hugepages", hugepages, allocator); - doc.AddMember("memory", memory, allocator); -} -#endif +// doc.AddMember("hugepages", hugepages, allocator); +// doc.AddMember("memory", memory, allocator); +//} +//#endif //void WorkersLegacy::onTick(uv_timer_t *) diff --git a/src/workers/WorkersLegacy.h b/src/workers/WorkersLegacy.h index be9e417a..d8ab1e59 100644 --- a/src/workers/WorkersLegacy.h +++ b/src/workers/WorkersLegacy.h @@ -55,8 +55,8 @@ namespace xmrig { class WorkersLegacy { public: - static size_t hugePages(); - static size_t threads(); +// static size_t hugePages(); +// static size_t threads(); // static void pause(); // static void printHashrate(bool detail); // static void setEnabled(bool enabled); @@ -68,9 +68,9 @@ public: // static inline bool isEnabled() { return m_enabled; } // static inline Hashrate *hashrate() { return m_hashrate; } -# ifdef XMRIG_FEATURE_API - static void threadsSummary(rapidjson::Document &doc); -# endif +//# ifdef XMRIG_FEATURE_API +// static void threadsSummary(rapidjson::Document &doc); +//# endif private: // static void onReady(void *arg); From 2bf5ffb2df492734ffc69db1582effac4d28607f Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 17 Jul 2019 04:57:58 +0700 Subject: [PATCH 029/172] Class Mem replaced to VirtualMemory. --- CMakeLists.txt | 5 - src/App.cpp | 4 +- src/Mem.cpp | 31 ---- src/Mem.h | 72 --------- src/Mem_unix.cpp | 88 ----------- src/Mem_win.cpp | 184 ----------------------- src/Summary.cpp | 4 +- src/backend/common/interfaces/IWorker.h | 1 + src/core/Miner.cpp | 1 + src/crypto/common/VirtualMemory.h | 4 + src/crypto/common/VirtualMemory_unix.cpp | 11 ++ src/crypto/common/VirtualMemory_win.cpp | 126 ++++++++++++++++ 12 files changed, 147 insertions(+), 384 deletions(-) delete mode 100644 src/Mem.cpp delete mode 100644 src/Mem.h delete mode 100644 src/Mem_unix.cpp delete mode 100644 src/Mem_win.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index f8a56c48..97491518 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -34,7 +34,6 @@ set(HEADERS src/core/config/usage.h src/core/Controller.h src/core/Miner.h - src/Mem.h src/net/interfaces/IJobResultListener.h src/net/JobResult.h src/net/JobResults.h @@ -85,7 +84,6 @@ set(SOURCES src/core/config/ConfigTransform.cpp src/core/Controller.cpp src/core/Miner.cpp - src/Mem.cpp src/net/JobResults.cpp src/net/Network.cpp src/net/NetworkState.cpp @@ -113,7 +111,6 @@ if (WIN32) "${SOURCES_OS}" res/app.rc src/App_win.cpp - src/Mem_win.cpp src/crypto/common/VirtualMemory_win.cpp ) @@ -123,14 +120,12 @@ elseif (APPLE) set(SOURCES_OS "${SOURCES_OS}" src/App_unix.cpp - src/Mem_unix.cpp src/crypto/common/VirtualMemory_unix.cpp ) else() set(SOURCES_OS "${SOURCES_OS}" src/App_unix.cpp - src/Mem_unix.cpp src/crypto/common/VirtualMemory_unix.cpp ) diff --git a/src/App.cpp b/src/App.cpp index d6c39595..ccbaad4f 100644 --- a/src/App.cpp +++ b/src/App.cpp @@ -37,7 +37,7 @@ #include "core/config/Config.h" #include "core/Controller.h" #include "core/Miner.h" -#include "Mem.h" +#include "crypto/common/VirtualMemory.h" #include "net/Network.h" #include "Summary.h" #include "version.h" @@ -76,7 +76,7 @@ int xmrig::App::exec() background(); - Mem::init(m_controller->config()->cpu().isHugePages()); + VirtualMemory::init(m_controller->config()->cpu().isHugePages()); Summary::print(m_controller); diff --git a/src/Mem.cpp b/src/Mem.cpp deleted file mode 100644 index 5fcea306..00000000 --- a/src/Mem.cpp +++ /dev/null @@ -1,31 +0,0 @@ -/* XMRig - * Copyright 2010 Jeff Garzik - * Copyright 2012-2014 pooler - * Copyright 2014 Lucas Jones - * Copyright 2014-2016 Wolf9466 - * Copyright 2016 Jay D Dee - * Copyright 2017-2018 XMR-Stak , - * Copyright 2018 Lee Clagett - * Copyright 2018-2019 SChernykh - * Copyright 2016-2019 XMRig , - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - - -#include "Mem.h" - - -bool Mem::m_enabled = true; -int Mem::m_flags = 0; diff --git a/src/Mem.h b/src/Mem.h deleted file mode 100644 index 8e5c418b..00000000 --- a/src/Mem.h +++ /dev/null @@ -1,72 +0,0 @@ -/* XMRig - * Copyright 2010 Jeff Garzik - * Copyright 2012-2014 pooler - * Copyright 2014 Lucas Jones - * Copyright 2014-2016 Wolf9466 - * Copyright 2016 Jay D Dee - * Copyright 2017-2018 XMR-Stak , - * Copyright 2018 Lee Clagett - * Copyright 2018-2019 SChernykh - * Copyright 2016-2019 XMRig , - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef XMRIG_MEM_H -#define XMRIG_MEM_H - - -#include -#include - - -#include "crypto/cn/CnAlgo.h" - - -struct cryptonight_ctx; - - -struct MemInfo -{ - alignas(16) uint8_t *memory = nullptr; - - size_t hugePages = 0; - size_t pages = 0; - size_t size = 0; -}; - - -class Mem -{ -public: - enum Flags { - HugepagesAvailable = 1, - HugepagesEnabled = 2, - Lock = 4 - }; - - static void init(bool enabled); - - static inline bool isHugepagesAvailable() { return (m_flags & HugepagesAvailable) != 0; } - -private: - static void allocate(MemInfo &info, bool enabled); - static void release(MemInfo &info); - - static int m_flags; - static bool m_enabled; -}; - - -#endif /* XMRIG_MEM_H */ diff --git a/src/Mem_unix.cpp b/src/Mem_unix.cpp deleted file mode 100644 index 4dc13e93..00000000 --- a/src/Mem_unix.cpp +++ /dev/null @@ -1,88 +0,0 @@ -/* XMRig - * Copyright 2010 Jeff Garzik - * Copyright 2012-2014 pooler - * Copyright 2014 Lucas Jones - * Copyright 2014-2016 Wolf9466 - * Copyright 2016 Jay D Dee - * Copyright 2017-2018 XMR-Stak , - * Copyright 2018 Lee Clagett - * Copyright 2018-2019 SChernykh - * Copyright 2016-2019 XMRig , - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - - -#include -#include - - -#include "base/io/log/Log.h" -#include "crypto/common/portable/mm_malloc.h" -#include "crypto/common/VirtualMemory.h" -#include "crypto/cn/CryptoNight.h" -#include "Mem.h" - - -#if defined(__APPLE__) -# include -#endif - - -void Mem::init(bool enabled) -{ - m_enabled = enabled; -} - - -void Mem::allocate(MemInfo &info, bool enabled) -{ - info.hugePages = 0; - - if (!enabled) { - info.memory = static_cast(_mm_malloc(info.size, 4096)); - - return; - } - - info.memory = static_cast(xmrig::VirtualMemory::allocateLargePagesMemory(info.size)); - if (!info.memory) { - return allocate(info, false);; - } - - info.hugePages = info.pages; - - if (madvise(info.memory, info.size, MADV_RANDOM | MADV_WILLNEED) != 0) { - LOG_ERR("madvise failed"); - } - - if (mlock(info.memory, info.size) == 0) { - m_flags |= Lock; - } -} - - -void Mem::release(MemInfo &info) -{ - if (info.hugePages) { - if (m_flags & Lock) { - munlock(info.memory, info.size); - } - - xmrig::VirtualMemory::freeLargePagesMemory(info.memory, info.size); - } - else { - _mm_free(info.memory); - } -} diff --git a/src/Mem_win.cpp b/src/Mem_win.cpp deleted file mode 100644 index 56b4521d..00000000 --- a/src/Mem_win.cpp +++ /dev/null @@ -1,184 +0,0 @@ -/* XMRig - * Copyright 2010 Jeff Garzik - * Copyright 2012-2014 pooler - * Copyright 2014 Lucas Jones - * Copyright 2014-2016 Wolf9466 - * Copyright 2016 Jay D Dee - * Copyright 2017-2018 XMR-Stak , - * Copyright 2018 Lee Clagett - * Copyright 2018-2019 SChernykh - * Copyright 2016-2019 XMRig , - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - - -#include -#include -#include -#include - - -#include "base/io/log/Log.h" -#include "crypto/common/portable/mm_malloc.h" -#include "crypto/common/VirtualMemory.h" -#include "crypto/cn/CryptoNight.h" -#include "Mem.h" - - -/***************************************************************** -SetLockPagesPrivilege: a function to obtain or -release the privilege of locking physical pages. - -Inputs: - -HANDLE hProcess: Handle for the process for which the -privilege is needed - -BOOL bEnable: Enable (TRUE) or disable? - -Return value: TRUE indicates success, FALSE failure. - -*****************************************************************/ -/** - * AWE Example: https://msdn.microsoft.com/en-us/library/windows/desktop/aa366531(v=vs.85).aspx - * Creating a File Mapping Using Large Pages: https://msdn.microsoft.com/en-us/library/aa366543(VS.85).aspx - */ -static BOOL SetLockPagesPrivilege() { - HANDLE token; - - if (OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token) != TRUE) { - return FALSE; - } - - TOKEN_PRIVILEGES tp; - tp.PrivilegeCount = 1; - tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; - - if (LookupPrivilegeValue(nullptr, SE_LOCK_MEMORY_NAME, &(tp.Privileges[0].Luid)) != TRUE) { - return FALSE; - } - - BOOL rc = AdjustTokenPrivileges(token, FALSE, (PTOKEN_PRIVILEGES) &tp, 0, nullptr, nullptr); - if (rc != TRUE || GetLastError() != ERROR_SUCCESS) { - return FALSE; - } - - CloseHandle(token); - - return TRUE; -} - - -static LSA_UNICODE_STRING StringToLsaUnicodeString(LPCTSTR string) { - LSA_UNICODE_STRING lsaString; - - DWORD dwLen = (DWORD) wcslen(string); - lsaString.Buffer = (LPWSTR) string; - lsaString.Length = (USHORT)((dwLen) * sizeof(WCHAR)); - lsaString.MaximumLength = (USHORT)((dwLen + 1) * sizeof(WCHAR)); - return lsaString; -} - - -static BOOL ObtainLockPagesPrivilege() { - HANDLE token; - PTOKEN_USER user = nullptr; - - if (OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &token) == TRUE) { - DWORD size = 0; - - GetTokenInformation(token, TokenUser, nullptr, 0, &size); - if (size) { - user = (PTOKEN_USER) LocalAlloc(LPTR, size); - } - - GetTokenInformation(token, TokenUser, user, size, &size); - CloseHandle(token); - } - - if (!user) { - return FALSE; - } - - LSA_HANDLE handle; - LSA_OBJECT_ATTRIBUTES attributes; - ZeroMemory(&attributes, sizeof(attributes)); - - BOOL result = FALSE; - if (LsaOpenPolicy(nullptr, &attributes, POLICY_ALL_ACCESS, &handle) == 0) { - LSA_UNICODE_STRING str = StringToLsaUnicodeString(_T(SE_LOCK_MEMORY_NAME)); - - if (LsaAddAccountRights(handle, user->User.Sid, &str, 1) == 0) { - LOG_NOTICE("Huge pages support was successfully enabled, but reboot required to use it"); - result = TRUE; - } - - LsaClose(handle); - } - - LocalFree(user); - return result; -} - - -static BOOL TrySetLockPagesPrivilege() { - if (SetLockPagesPrivilege()) { - return TRUE; - } - - return ObtainLockPagesPrivilege() && SetLockPagesPrivilege(); -} - - -void Mem::init(bool enabled) -{ - m_enabled = enabled; - - if (enabled && TrySetLockPagesPrivilege()) { - m_flags |= HugepagesAvailable; - } -} - - -void Mem::allocate(MemInfo &info, bool enabled) -{ - info.hugePages = 0; - - if (!enabled) { - info.memory = static_cast(_mm_malloc(info.size, 4096)); - - return; - } - - info.memory = static_cast(xmrig::VirtualMemory::allocateLargePagesMemory(info.size)); - if (info.memory) { - info.hugePages = info.pages; - - return; - } - - allocate(info, false); -} - - -void Mem::release(MemInfo &info) -{ - if (info.hugePages) { - xmrig::VirtualMemory::freeLargePagesMemory(info.memory, info.size); - } - else { - _mm_free(info.memory); - } -} diff --git a/src/Summary.cpp b/src/Summary.cpp index 59e540d4..af7cad09 100644 --- a/src/Summary.cpp +++ b/src/Summary.cpp @@ -34,7 +34,7 @@ #include "core/config/Config.h" #include "core/Controller.h" #include "crypto/common/Assembly.h" -#include "Mem.h" +#include "crypto/common/VirtualMemory.h" #include "Summary.h" #include "version.h" @@ -59,7 +59,7 @@ inline static const char *asmName(xmrig::Assembly::Id assembly) static void print_memory(xmrig::Config *) { # ifdef _WIN32 xmrig::Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") "%s", - "HUGE PAGES", Mem::isHugepagesAvailable() ? GREEN_BOLD("available") : RED_BOLD("unavailable")); + "HUGE PAGES", xmrig::VirtualMemory::isHugepagesAvailable() ? GREEN_BOLD("available") : RED_BOLD("unavailable")); # endif } diff --git a/src/backend/common/interfaces/IWorker.h b/src/backend/common/interfaces/IWorker.h index 5c99680b..0d7fe1d2 100644 --- a/src/backend/common/interfaces/IWorker.h +++ b/src/backend/common/interfaces/IWorker.h @@ -27,6 +27,7 @@ #include +#include namespace xmrig { diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index 83ce2206..1764a79e 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -23,6 +23,7 @@ */ +#include #include diff --git a/src/crypto/common/VirtualMemory.h b/src/crypto/common/VirtualMemory.h index 98212e40..e2a5ac22 100644 --- a/src/crypto/common/VirtualMemory.h +++ b/src/crypto/common/VirtualMemory.h @@ -51,9 +51,11 @@ public: static void *allocateLargePagesMemory(size_t size); static void flushInstructionCache(void *p, size_t size); static void freeLargePagesMemory(void *p, size_t size); + static void init(bool hugePages); static void protectExecutableMemory(void *p, size_t size); static void unprotectExecutableMemory(void *p, size_t size); + static inline bool isHugepagesAvailable() { return (m_globalFlags & HUGEPAGES_AVAILABLE) != 0; } static inline constexpr size_t align(size_t pos, size_t align = 2097152) { return ((pos - 1) / align + 1) * align; } private: @@ -63,6 +65,8 @@ private: LOCK = 4 }; + static int m_globalFlags; + int m_flags = 0; size_t m_size = 0; uint8_t *m_scratchpad = nullptr; diff --git a/src/crypto/common/VirtualMemory_unix.cpp b/src/crypto/common/VirtualMemory_unix.cpp index 665fc02b..310a043a 100644 --- a/src/crypto/common/VirtualMemory_unix.cpp +++ b/src/crypto/common/VirtualMemory_unix.cpp @@ -38,6 +38,9 @@ #endif +int xmrig::VirtualMemory::m_globalFlags = 0; + + xmrig::VirtualMemory::VirtualMemory(size_t size, bool hugePages, size_t align) : m_size(VirtualMemory::align(size)) { @@ -120,6 +123,14 @@ void xmrig::VirtualMemory::freeLargePagesMemory(void *p, size_t size) } +void xmrig::VirtualMemory::init(bool hugePages) +{ + if (hugePages) { + m_globalFlags = HUGEPAGES | HUGEPAGES_AVAILABLE; + } +} + + void xmrig::VirtualMemory::protectExecutableMemory(void *p, size_t size) { mprotect(p, size, PROT_READ | PROT_EXEC); diff --git a/src/crypto/common/VirtualMemory_win.cpp b/src/crypto/common/VirtualMemory_win.cpp index 7aa98f89..7bdb6365 100644 --- a/src/crypto/common/VirtualMemory_win.cpp +++ b/src/crypto/common/VirtualMemory_win.cpp @@ -27,11 +27,123 @@ #include #include +#include +#include +#include "base/io/log/Log.h" +#include "crypto/common/portable/mm_malloc.h" #include "crypto/common/VirtualMemory.h" +/***************************************************************** +SetLockPagesPrivilege: a function to obtain or +release the privilege of locking physical pages. + +Inputs: + +HANDLE hProcess: Handle for the process for which the +privilege is needed + +BOOL bEnable: Enable (TRUE) or disable? + +Return value: TRUE indicates success, FALSE failure. + +*****************************************************************/ +/** + * AWE Example: https://msdn.microsoft.com/en-us/library/windows/desktop/aa366531(v=vs.85).aspx + * Creating a File Mapping Using Large Pages: https://msdn.microsoft.com/en-us/library/aa366543(VS.85).aspx + */ +static BOOL SetLockPagesPrivilege() { + HANDLE token; + + if (OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token) != TRUE) { + return FALSE; + } + + TOKEN_PRIVILEGES tp; + tp.PrivilegeCount = 1; + tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; + + if (LookupPrivilegeValue(nullptr, SE_LOCK_MEMORY_NAME, &(tp.Privileges[0].Luid)) != TRUE) { + return FALSE; + } + + BOOL rc = AdjustTokenPrivileges(token, FALSE, (PTOKEN_PRIVILEGES) &tp, 0, nullptr, nullptr); + if (rc != TRUE || GetLastError() != ERROR_SUCCESS) { + return FALSE; + } + + CloseHandle(token); + + return TRUE; +} + + +static LSA_UNICODE_STRING StringToLsaUnicodeString(LPCTSTR string) { + LSA_UNICODE_STRING lsaString; + + DWORD dwLen = (DWORD) wcslen(string); + lsaString.Buffer = (LPWSTR) string; + lsaString.Length = (USHORT)((dwLen) * sizeof(WCHAR)); + lsaString.MaximumLength = (USHORT)((dwLen + 1) * sizeof(WCHAR)); + return lsaString; +} + + +static BOOL ObtainLockPagesPrivilege() { + HANDLE token; + PTOKEN_USER user = nullptr; + + if (OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &token) == TRUE) { + DWORD size = 0; + + GetTokenInformation(token, TokenUser, nullptr, 0, &size); + if (size) { + user = (PTOKEN_USER) LocalAlloc(LPTR, size); + } + + GetTokenInformation(token, TokenUser, user, size, &size); + CloseHandle(token); + } + + if (!user) { + return FALSE; + } + + LSA_HANDLE handle; + LSA_OBJECT_ATTRIBUTES attributes; + ZeroMemory(&attributes, sizeof(attributes)); + + BOOL result = FALSE; + if (LsaOpenPolicy(nullptr, &attributes, POLICY_ALL_ACCESS, &handle) == 0) { + LSA_UNICODE_STRING str = StringToLsaUnicodeString(_T(SE_LOCK_MEMORY_NAME)); + + if (LsaAddAccountRights(handle, user->User.Sid, &str, 1) == 0) { + LOG_NOTICE("Huge pages support was successfully enabled, but reboot required to use it"); + result = TRUE; + } + + LsaClose(handle); + } + + LocalFree(user); + return result; +} + + +static BOOL TrySetLockPagesPrivilege() { + if (SetLockPagesPrivilege()) { + return TRUE; + } + + return ObtainLockPagesPrivilege() && SetLockPagesPrivilege(); +} + + +int xmrig::VirtualMemory::m_globalFlags = 0; + + xmrig::VirtualMemory::VirtualMemory(size_t size, bool hugePages, size_t align) : m_size(VirtualMemory::align(size)) { @@ -94,6 +206,20 @@ void xmrig::VirtualMemory::freeLargePagesMemory(void *p, size_t) } +void xmrig::VirtualMemory::init(bool hugePages) +{ + if (!hugePages) { + return; + } + + m_globalFlags = HUGEPAGES; + + if (TrySetLockPagesPrivilege()) { + m_globalFlags |= HUGEPAGES_AVAILABLE; + } +} + + void xmrig::VirtualMemory::protectExecutableMemory(void *p, size_t size) { DWORD oldProtect; From bcae974ea1b3dfed8797e161c3852c95c4c76c9b Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 17 Jul 2019 05:01:12 +0700 Subject: [PATCH 030/172] Fixed build. --- src/backend/cpu/CpuWorker.h | 1 - src/crypto/cn/r/CryptonightR_gen.cpp | 1 - src/workers/CpuThreadLegacy.cpp | 1 - src/workers/WorkersLegacy.cpp | 1 - 4 files changed, 4 deletions(-) diff --git a/src/backend/cpu/CpuWorker.h b/src/backend/cpu/CpuWorker.h index c0f9dfaf..4cdd10f8 100644 --- a/src/backend/cpu/CpuWorker.h +++ b/src/backend/cpu/CpuWorker.h @@ -31,7 +31,6 @@ #include "backend/common/WorkerJob.h" #include "backend/cpu/CpuLaunchData.h" #include "base/net/stratum/Job.h" -#include "Mem.h" #include "net/JobResult.h" diff --git a/src/crypto/cn/r/CryptonightR_gen.cpp b/src/crypto/cn/r/CryptonightR_gen.cpp index 3037327a..3b80f805 100644 --- a/src/crypto/cn/r/CryptonightR_gen.cpp +++ b/src/crypto/cn/r/CryptonightR_gen.cpp @@ -31,7 +31,6 @@ typedef void(*void_func)(); #include "crypto/cn/asm/CryptonightR_template.h" #include "crypto/common/Assembly.h" #include "crypto/common/VirtualMemory.h" -#include "Mem.h" static inline void add_code(uint8_t* &p, void (*p1)(), void (*p2)()) diff --git a/src/workers/CpuThreadLegacy.cpp b/src/workers/CpuThreadLegacy.cpp index a560d33f..b5d457c7 100644 --- a/src/workers/CpuThreadLegacy.cpp +++ b/src/workers/CpuThreadLegacy.cpp @@ -29,7 +29,6 @@ #include "crypto/cn/CnHash.h" #include "crypto/common/Assembly.h" #include "crypto/common/VirtualMemory.h" -#include "Mem.h" #include "rapidjson/document.h" #include "workers/CpuThreadLegacy.h" diff --git a/src/workers/WorkersLegacy.cpp b/src/workers/WorkersLegacy.cpp index 4d6f9de9..e7191116 100644 --- a/src/workers/WorkersLegacy.cpp +++ b/src/workers/WorkersLegacy.cpp @@ -38,7 +38,6 @@ #include "crypto/rx/RxAlgo.h" #include "crypto/rx/RxCache.h" #include "crypto/rx/RxDataset.h" -#include "Mem.h" #include "rapidjson/document.h" //#include "workers/Hashrate.h" #include "workers/WorkersLegacy.h" From 4f49533e9876fe9902809a0599bed22e570157cc Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 17 Jul 2019 05:33:13 +0700 Subject: [PATCH 031/172] Fixed warnings. --- src/backend/common/Hashrate.h | 1 + src/crypto/randomx/argon2_core.c | 13 ++++++------- src/crypto/randomx/bytecode_machine.hpp | 2 +- src/crypto/randomx/dataset.cpp | 2 +- src/crypto/randomx/jit_compiler_x86.cpp | 2 +- src/crypto/randomx/randomx.cpp | 12 +++++------- src/crypto/randomx/superscalar.cpp | 12 ++++++------ 7 files changed, 21 insertions(+), 23 deletions(-) diff --git a/src/backend/common/Hashrate.h b/src/backend/common/Hashrate.h index 1787bf6a..2187c0be 100644 --- a/src/backend/common/Hashrate.h +++ b/src/backend/common/Hashrate.h @@ -26,6 +26,7 @@ #define XMRIG_HASHRATE_H +#include #include diff --git a/src/crypto/randomx/argon2_core.c b/src/crypto/randomx/argon2_core.c index e9174222..4b8fa43d 100644 --- a/src/crypto/randomx/argon2_core.c +++ b/src/crypto/randomx/argon2_core.c @@ -90,12 +90,12 @@ static void load_block(block *dst, const void *input) { } } -static void store_block(void *output, const block *src) { - unsigned i; - for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) { - store64((uint8_t *)output + i * sizeof(src->v[i]), src->v[i]); - } -} +//static void store_block(void *output, const block *src) { +// unsigned i; +// for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) { +// store64((uint8_t *)output + i * sizeof(src->v[i]), src->v[i]); +// } +//} /***************Memory functions*****************/ @@ -484,7 +484,6 @@ void rxa2_initial_hash(uint8_t *blockhash, argon2_context *context, argon2_type int rxa2_argon_initialize(argon2_instance_t *instance, argon2_context *context) { uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH]; - int result = ARGON2_OK; if (instance == NULL || context == NULL) return ARGON2_INCORRECT_PARAMETER; diff --git a/src/crypto/randomx/bytecode_machine.hpp b/src/crypto/randomx/bytecode_machine.hpp index 3b05f378..810f854a 100644 --- a/src/crypto/randomx/bytecode_machine.hpp +++ b/src/crypto/randomx/bytecode_machine.hpp @@ -90,7 +90,7 @@ namespace randomx { } static void executeBytecode(InstructionByteCode* bytecode, uint8_t* scratchpad, ProgramConfiguration& config) { - for (int pc = 0; pc < RandomX_CurrentConfig.ProgramSize; ++pc) { + for (int pc = 0; pc < static_cast(RandomX_CurrentConfig.ProgramSize); ++pc) { auto& ibc = bytecode[pc]; executeInstruction(ibc, pc, scratchpad, config); } diff --git a/src/crypto/randomx/dataset.cpp b/src/crypto/randomx/dataset.cpp index 3951b55b..b094b1cb 100644 --- a/src/crypto/randomx/dataset.cpp +++ b/src/crypto/randomx/dataset.cpp @@ -121,7 +121,7 @@ namespace randomx { cache->reciprocalCache.clear(); randomx::Blake2Generator gen(key, keySize); - for (int i = 0; i < RandomX_CurrentConfig.CacheAccesses; ++i) { + for (uint32_t i = 0; i < RandomX_CurrentConfig.CacheAccesses; ++i) { randomx::generateSuperscalar(cache->programs[i], gen); for (unsigned j = 0; j < cache->programs[i].getSize(); ++j) { auto& instr = cache->programs[i](j); diff --git a/src/crypto/randomx/jit_compiler_x86.cpp b/src/crypto/randomx/jit_compiler_x86.cpp index 8870a018..6f04e28a 100644 --- a/src/crypto/randomx/jit_compiler_x86.cpp +++ b/src/crypto/randomx/jit_compiler_x86.cpp @@ -194,7 +194,7 @@ namespace randomx { static const uint8_t NOP7[] = { 0x0F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00 }; static const uint8_t NOP8[] = { 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00 }; - static const uint8_t* NOPX[] = { NOP1, NOP2, NOP3, NOP4, NOP5, NOP6, NOP7, NOP8 }; +// static const uint8_t* NOPX[] = { NOP1, NOP2, NOP3, NOP4, NOP5, NOP6, NOP7, NOP8 }; size_t JitCompilerX86::getCodeSize() { return codePos - prologueSize; diff --git a/src/crypto/randomx/randomx.cpp b/src/crypto/randomx/randomx.cpp index df5fc9cb..9e88bc6d 100644 --- a/src/crypto/randomx/randomx.cpp +++ b/src/crypto/randomx/randomx.cpp @@ -233,7 +233,7 @@ RandomX_ConfigurationBase RandomX_CurrentConfig; extern "C" { randomx_cache *randomx_alloc_cache(randomx_flags flags) { - randomx_cache *cache; + randomx_cache *cache = nullptr; try { cache = new randomx_cache(); @@ -297,7 +297,7 @@ extern "C" { } randomx_dataset *randomx_alloc_dataset(randomx_flags flags) { - randomx_dataset *dataset; + randomx_dataset *dataset = nullptr; try { dataset = new randomx_dataset(); @@ -430,14 +430,12 @@ extern "C" { assert(inputSize == 0 || input != nullptr); assert(output != nullptr); alignas(16) uint64_t tempHash[8]; - int blakeResult = blake2b(tempHash, sizeof(tempHash), input, inputSize, nullptr, 0); - assert(blakeResult == 0); + blake2b(tempHash, sizeof(tempHash), input, inputSize, nullptr, 0); machine->initScratchpad(&tempHash); machine->resetRoundingMode(); - for (int chain = 0; chain < RandomX_CurrentConfig.ProgramCount - 1; ++chain) { + for (uint32_t chain = 0; chain < RandomX_CurrentConfig.ProgramCount - 1; ++chain) { machine->run(&tempHash); - blakeResult = blake2b(tempHash, sizeof(tempHash), machine->getRegisterFile(), sizeof(randomx::RegisterFile), nullptr, 0); - assert(blakeResult == 0); + blake2b(tempHash, sizeof(tempHash), machine->getRegisterFile(), sizeof(randomx::RegisterFile), nullptr, 0); } machine->run(&tempHash); machine->getFinalResult(output, RANDOMX_HASH_SIZE); diff --git a/src/crypto/randomx/superscalar.cpp b/src/crypto/randomx/superscalar.cpp index da605622..0ca1fe69 100644 --- a/src/crypto/randomx/superscalar.cpp +++ b/src/crypto/randomx/superscalar.cpp @@ -500,7 +500,7 @@ namespace randomx { // * either the last instruction applied to the register or its source must be different than this instruction // - this avoids optimizable instruction sequences such as "xor r1, r2; xor r1, r2" or "ror r, C1; ror r, C2" or "add r, C1; add r, C2" // * register r5 cannot be the destination of the IADD_RS instruction (limitation of the x86 lea instruction) - for (unsigned i = 0; i < 8; ++i) { + for (int i = 0; i < 8; ++i) { if (registers[i].latency <= cycle && (canReuse_ || i != src_) && (allowChainedMul || opGroup_ != SuperscalarInstructionType::IMUL_R || registers[i].lastOpGroup != SuperscalarInstructionType::IMUL_R) && (registers[i].lastOpGroup != opGroup_ || registers[i].lastOpPar != opGroupPar_) && (info_->getType() != SuperscalarInstructionType::IADD_RS || i != RegisterNeedsDisplacement)) availableRegisters.push_back(i); } @@ -581,7 +581,7 @@ namespace randomx { static int scheduleUop(ExecutionPort::type uop, ExecutionPort::type(&portBusy)[CYCLE_MAP_SIZE][3], int cycle) { //The scheduling here is done optimistically by checking port availability in order P5 -> P0 -> P1 to not overload //port P1 (multiplication) by instructions that can go to any port. - for (; cycle < RandomX_CurrentConfig.SuperscalarLatency + 4; ++cycle) { + for (; cycle < static_cast(RandomX_CurrentConfig.SuperscalarLatency) + 4; ++cycle) { if ((uop & ExecutionPort::P5) != 0 && !portBusy[cycle][2]) { if (commit) { if (trace) std::cout << "; P5 at cycle " << cycle << std::endl; @@ -626,7 +626,7 @@ namespace randomx { } else { //macro-ops with 2 uOPs are scheduled conservatively by requiring both uOPs to execute in the same cycle - for (; cycle < RandomX_CurrentConfig.SuperscalarLatency + 4; ++cycle) { + for (; cycle < static_cast(RandomX_CurrentConfig.SuperscalarLatency) + 4; ++cycle) { int cycle1 = scheduleUop(mop.getUop1(), portBusy, cycle); int cycle2 = scheduleUop(mop.getUop2(), portBusy, cycle); @@ -669,7 +669,7 @@ namespace randomx { //Since a decode cycle produces on average 3.45 macro-ops and there are only 3 ALU ports, execution ports are always //saturated first. The cycle limit is present only to guarantee loop termination. //Program size is limited to SuperscalarMaxSize instructions. - for (decodeCycle = 0; decodeCycle < RandomX_CurrentConfig.SuperscalarLatency && !portsSaturated && programSize < 3 * RandomX_CurrentConfig.SuperscalarLatency + 2; ++decodeCycle) { + for (decodeCycle = 0; decodeCycle < static_cast(RandomX_CurrentConfig.SuperscalarLatency) && !portsSaturated && programSize < 3 * static_cast(RandomX_CurrentConfig.SuperscalarLatency) + 2; ++decodeCycle) { //select a decode configuration decodeBuffer = decodeBuffer->fetchNext(currentInstruction.getType(), decodeCycle, mulCount, gen); @@ -683,7 +683,7 @@ namespace randomx { //if we have issued all macro-ops for the current RandomX instruction, create a new instruction if (macroOpIndex >= currentInstruction.getInfo().getSize()) { - if (portsSaturated || programSize >= 3 * RandomX_CurrentConfig.SuperscalarLatency + 2) + if (portsSaturated || programSize >= 3 * static_cast(RandomX_CurrentConfig.SuperscalarLatency) + 2) break; //select an instruction so that the first macro-op fits into the current slot currentInstruction.createForSlot(gen, decodeBuffer->getCounts()[bufferIndex], decodeBuffer->getIndex(), decodeBuffer->getSize() == bufferIndex + 1, bufferIndex == 0); @@ -777,7 +777,7 @@ namespace randomx { macroOpCount++; //terminating condition - if (scheduleCycle >= RandomX_CurrentConfig.SuperscalarLatency) { + if (scheduleCycle >= static_cast(RandomX_CurrentConfig.SuperscalarLatency)) { portsSaturated = true; } cycle = topCycle; From 8ce00adda4bb607f8bee5c69c8673921059ac3ce Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 17 Jul 2019 14:54:08 +0700 Subject: [PATCH 032/172] Restored "CPU READY" message. --- CMakeLists.txt | 2 - src/api/v1/ApiRouter.cpp | 2 - src/backend/common/Thread.h | 5 +- src/backend/common/Workers.cpp | 13 +- src/backend/common/Workers.h | 1 + src/backend/common/interfaces/IBackend.h | 2 + src/backend/cpu/CpuBackend.cpp | 106 ++++++++-- src/backend/cpu/CpuBackend.h | 3 +- src/core/Miner.cpp | 9 +- src/crypto/common/VirtualMemory.h | 7 +- src/crypto/rx/Rx.cpp | 2 +- src/workers/WorkersLegacy.cpp | 256 ----------------------- src/workers/WorkersLegacy.h | 113 ---------- 13 files changed, 114 insertions(+), 407 deletions(-) delete mode 100644 src/workers/WorkersLegacy.cpp delete mode 100644 src/workers/WorkersLegacy.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 97491518..89be20ec 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -43,7 +43,6 @@ set(HEADERS src/Summary.h src/version.h src/workers/CpuThreadLegacy.h - src/workers/WorkersLegacy.h ) set(HEADERS_CRYPTO @@ -90,7 +89,6 @@ set(SOURCES src/net/strategies/DonateStrategy.cpp src/Summary.cpp src/workers/CpuThreadLegacy.cpp - src/workers/WorkersLegacy.cpp src/xmrig.cpp ) diff --git a/src/api/v1/ApiRouter.cpp b/src/api/v1/ApiRouter.cpp index 2e6a815c..2a5bd3d0 100644 --- a/src/api/v1/ApiRouter.cpp +++ b/src/api/v1/ApiRouter.cpp @@ -36,8 +36,6 @@ #include "core/config/Config.h" #include "rapidjson/document.h" #include "version.h" -//#include "workers/Hashrate.h" -#include "workers/WorkersLegacy.h" static inline rapidjson::Value normalize(double d) diff --git a/src/backend/common/Thread.h b/src/backend/common/Thread.h index f1d174ec..b62d880c 100644 --- a/src/backend/common/Thread.h +++ b/src/backend/common/Thread.h @@ -32,6 +32,7 @@ namespace xmrig { +class IBackend; class IWorker; @@ -39,10 +40,11 @@ template class Thread { public: - inline Thread(size_t index, const T &config) : m_index(index), m_config(config) {} + inline Thread(IBackend *backend, size_t index, const T &config) : m_index(index), m_config(config), m_backend(backend) {} inline ~Thread() { uv_thread_join(&m_thread); } inline const T &config() const { return m_config; } + inline IBackend *backend() const { return m_backend; } inline IWorker *worker() const { return m_worker; } inline size_t index() const { return m_index; } inline void setWorker(IWorker *worker) { m_worker = worker; } @@ -51,6 +53,7 @@ public: private: const size_t m_index = 0; const T m_config; + IBackend *m_backend; IWorker *m_worker = nullptr; uv_thread_t m_thread; }; diff --git a/src/backend/common/Workers.cpp b/src/backend/common/Workers.cpp index c4ac38a5..629b564b 100644 --- a/src/backend/common/Workers.cpp +++ b/src/backend/common/Workers.cpp @@ -25,6 +25,7 @@ #include "backend/common/Hashrate.h" +#include "backend/common/interfaces/IBackend.h" #include "backend/common/Workers.h" #include "backend/cpu/CpuWorker.h" #include "base/io/log/Log.h" @@ -48,6 +49,7 @@ public: Hashrate *hashrate = nullptr; + IBackend *backend = nullptr; }; @@ -79,7 +81,14 @@ const xmrig::Hashrate *xmrig::Workers::hashrate() const template void xmrig::Workers::add(const T &data) { - m_workers.push_back(new Thread(m_workers.size(), data)); + m_workers.push_back(new Thread(d_ptr->backend, m_workers.size(), data)); +} + + +template +void xmrig::Workers::setBackend(IBackend *backend) +{ + d_ptr->backend = backend; } @@ -176,7 +185,7 @@ void xmrig::Workers::onReady(void *arg) return; } - worker->start(); + handle->backend()->start(worker); } diff --git a/src/backend/common/Workers.h b/src/backend/common/Workers.h index 3ef4b015..c13f5e77 100644 --- a/src/backend/common/Workers.h +++ b/src/backend/common/Workers.h @@ -47,6 +47,7 @@ public: const Hashrate *hashrate() const; void add(const T &data); + void setBackend(IBackend *backend); void start(); void stop(); void tick(uint64_t ticks); diff --git a/src/backend/common/interfaces/IBackend.h b/src/backend/common/interfaces/IBackend.h index 69ed4c8c..8ad7bb53 100644 --- a/src/backend/common/interfaces/IBackend.h +++ b/src/backend/common/interfaces/IBackend.h @@ -33,6 +33,7 @@ namespace xmrig { class Hashrate; +class IWorker; class Job; class String; @@ -46,6 +47,7 @@ public: virtual const String &profileName() const = 0; virtual void printHashrate(bool details) = 0; virtual void setJob(const Job &job) = 0; + virtual void start(IWorker *worker) = 0; virtual void stop() = 0; virtual void tick(uint64_t ticks) = 0; }; diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp index a0463832..bdb592ff 100644 --- a/src/backend/cpu/CpuBackend.cpp +++ b/src/backend/cpu/CpuBackend.cpp @@ -23,7 +23,11 @@ */ +#include + + #include "backend/common/Hashrate.h" +#include "backend/common/interfaces/IWorker.h" #include "backend/common/Workers.h" #include "backend/cpu/CpuBackend.h" #include "base/io/log/Log.h" @@ -31,6 +35,7 @@ #include "base/tools/String.h" #include "core/config/Config.h" #include "core/Controller.h" +#include "crypto/common/VirtualMemory.h" namespace xmrig { @@ -39,18 +44,41 @@ namespace xmrig { extern template class Threads; +struct LaunchStatus +{ +public: + inline void reset() + { + hugePages = 0; + memory = 0; + pages = 0; + started = 0; + threads = 0; + ways = 0; + } + + size_t hugePages; + size_t memory; + size_t pages; + size_t started; + size_t threads; + size_t ways; +}; + + class CpuBackendPrivate { public: - inline CpuBackendPrivate(const Miner *miner, Controller *controller) : - miner(miner), + inline CpuBackendPrivate(Controller *controller) : controller(controller) { + uv_mutex_init(&mutex); } inline ~CpuBackendPrivate() { + uv_mutex_destroy(&mutex); } @@ -72,11 +100,42 @@ public: } + inline void start(const Job &job) + { + const CpuConfig &cpu = controller->config()->cpu(); + + algo = job.algorithm(); + profileName = cpu.threads().profileName(job.algorithm()); + threads = cpu.threads().get(profileName); + + LOG_INFO(GREEN_BOLD("CPU") " use profile " BLUE_BG(WHITE_BOLD_S " %s ") WHITE_BOLD_S " (" CYAN_BOLD("%zu") WHITE_BOLD(" threads)") " scratchpad " CYAN_BOLD("%zu KB"), + profileName.data(), + threads.size(), + algo.memory() / 1024 + ); + + workers.stop(); + + status.reset(); + status.memory = algo.memory(); + status.threads = threads.size(); + + for (const CpuThread &thread : threads) { + workers.add(CpuLaunchData(controller->miner(), algo, cpu, thread)); + + status.ways += static_cast(thread.intensity()); + } + + workers.start(); + } + + Algorithm algo; - const Miner *miner; Controller *controller; CpuThreads threads; + LaunchStatus status; String profileName; + uv_mutex_t mutex; Workers workers; }; @@ -84,10 +143,10 @@ public: } // namespace xmrig -xmrig::CpuBackend::CpuBackend(const Miner *miner, Controller *controller) : - d_ptr(new CpuBackendPrivate(miner, controller)) +xmrig::CpuBackend::CpuBackend(Controller *controller) : + d_ptr(new CpuBackendPrivate(controller)) { - + d_ptr->workers.setBackend(this); } @@ -140,26 +199,33 @@ void xmrig::CpuBackend::setJob(const Job &job) return; } - const CpuConfig &cpu = d_ptr->controller->config()->cpu(); - const Threads &threads = cpu.threads(); + d_ptr->start(job); +} - d_ptr->algo = job.algorithm(); - d_ptr->profileName = threads.profileName(job.algorithm()); - d_ptr->threads = threads.get(d_ptr->profileName); - LOG_INFO(GREEN_BOLD("CPU") " use profile " BLUE_BG(WHITE_BOLD_S " %s ") WHITE_BOLD_S " (" CYAN_BOLD("%zu") WHITE_BOLD(" threads)") " scratchpad " CYAN_BOLD("%zu KB"), - d_ptr->profileName.data(), - d_ptr->threads.size(), - d_ptr->algo.memory() / 1024 - ); +void xmrig::CpuBackend::start(IWorker *worker) +{ + uv_mutex_lock(&d_ptr->mutex); - d_ptr->workers.stop(); + const auto pages = worker->memory()->hugePages(); - for (const CpuThread &thread : d_ptr->threads) { - d_ptr->workers.add(CpuLaunchData(d_ptr->miner, d_ptr->algo, cpu, thread)); + d_ptr->status.started++; + d_ptr->status.hugePages += pages.first; + d_ptr->status.pages += pages.second; + + if (d_ptr->status.started == d_ptr->status.threads) { + const double percent = d_ptr->status.hugePages == 0 ? 0.0 : static_cast(d_ptr->status.hugePages) / d_ptr->status.pages * 100.0; + const size_t memory = d_ptr->status.ways * d_ptr->status.memory / 1024; + + LOG_INFO(GREEN_BOLD("CPU READY") " threads " CYAN_BOLD("%zu(%zu)") " huge pages %s%zu/%zu %1.0f%%\x1B[0m memory " CYAN_BOLD("%zu KB") "", + d_ptr->status.threads, d_ptr->status.ways, + (d_ptr->status.hugePages == d_ptr->status.pages ? GREEN_BOLD_S : (d_ptr->status.hugePages == 0 ? RED_BOLD_S : YELLOW_BOLD_S)), + d_ptr->status.hugePages, d_ptr->status.pages, percent, memory); } - d_ptr->workers.start(); + uv_mutex_unlock(&d_ptr->mutex); + + worker->start(); } diff --git a/src/backend/cpu/CpuBackend.h b/src/backend/cpu/CpuBackend.h index a7b742eb..aabccb49 100644 --- a/src/backend/cpu/CpuBackend.h +++ b/src/backend/cpu/CpuBackend.h @@ -40,7 +40,7 @@ class Miner; class CpuBackend : public IBackend { public: - CpuBackend(const Miner *miner, Controller *controller); + CpuBackend(Controller *controller); ~CpuBackend() override; protected: @@ -48,6 +48,7 @@ protected: const String &profileName() const override; void printHashrate(bool details) override; void setJob(const Job &job) override; + void start(IWorker *worker) override; void stop() override; void tick(uint64_t ticks) override; diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index 1764a79e..40321662 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -101,7 +101,7 @@ xmrig::Miner::Miner(Controller *controller) { d_ptr->timer = new Timer(this); - d_ptr->backends.push_back(new CpuBackend(this, controller)); + d_ptr->backends.push_back(new CpuBackend(controller)); } @@ -210,15 +210,8 @@ void xmrig::Miner::setJob(const Job &job, bool donate) void xmrig::Miner::stop() { -// xmrig::Handle::close(m_timer); -// m_hashrate->stop(); - Nonce::stop(); -// for (size_t i = 0; i < m_workers.size(); ++i) { -// m_workers[i]->join(); -// } - for (IBackend *backend : d_ptr->backends) { backend->stop(); } diff --git a/src/crypto/common/VirtualMemory.h b/src/crypto/common/VirtualMemory.h index e2a5ac22..44f77a23 100644 --- a/src/crypto/common/VirtualMemory.h +++ b/src/crypto/common/VirtualMemory.h @@ -30,6 +30,7 @@ #include #include +#include namespace xmrig { @@ -43,10 +44,14 @@ public: ~VirtualMemory(); inline bool isHugePages() const { return m_flags & HUGEPAGES; } - inline size_t hugePages() const { return isHugePages() ? (align(size()) / 2097152) : 0; } inline size_t size() const { return m_size; } inline uint8_t *scratchpad() const { return m_scratchpad; } + inline std::pair hugePages() const + { + return std::pair(isHugePages() ? (align(size()) / 2097152) : 0, align(size()) / 2097152); + } + static void *allocateExecutableMemory(size_t size); static void *allocateLargePagesMemory(size_t size); static void flushInstructionCache(void *p, size_t size); diff --git a/src/crypto/rx/Rx.cpp b/src/crypto/rx/Rx.cpp index 630dd45a..7f482034 100644 --- a/src/crypto/rx/Rx.cpp +++ b/src/crypto/rx/Rx.cpp @@ -113,7 +113,7 @@ xmrig::RxDataset *xmrig::Rx::dataset(const uint8_t *seed, const Algorithm &algor const uint64_t ts = Chrono::steadyMSecs(); if (d_ptr->dataset->get() != nullptr) { - LOG_INFO("%s" MAGENTA_BOLD(" init dataset") " algo " WHITE_BOLD("%s") " threads " WHITE_BOLD("%u") BLACK_BOLD(" seed %s..."), + LOG_INFO("%s" MAGENTA_BOLD(" init dataset") " algo " WHITE_BOLD("%s (") CYAN_BOLD("%u") WHITE_BOLD(" threads)") BLACK_BOLD(" seed %s..."), tag, algorithm.shortName(), d_ptr->initThreads, diff --git a/src/workers/WorkersLegacy.cpp b/src/workers/WorkersLegacy.cpp deleted file mode 100644 index e7191116..00000000 --- a/src/workers/WorkersLegacy.cpp +++ /dev/null @@ -1,256 +0,0 @@ -/* XMRig - * Copyright 2010 Jeff Garzik - * Copyright 2012-2014 pooler - * Copyright 2014 Lucas Jones - * Copyright 2014-2016 Wolf9466 - * Copyright 2016 Jay D Dee - * Copyright 2017-2018 XMR-Stak , - * Copyright 2018-2019 SChernykh - * Copyright 2016-2019 XMRig , - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include - - -#include "api/Api.h" -#include "backend/cpu/CpuWorker.h" -#include "base/io/log/Log.h" -#include "base/tools/Chrono.h" -#include "base/tools/Handle.h" -#include "core/config/Config.h" -#include "core/Controller.h" -#include "crypto/common/Nonce.h" -#include "crypto/rx/RxAlgo.h" -#include "crypto/rx/RxCache.h" -#include "crypto/rx/RxDataset.h" -#include "rapidjson/document.h" -//#include "workers/Hashrate.h" -#include "workers/WorkersLegacy.h" - - -bool WorkersLegacy::m_active = false; -bool WorkersLegacy::m_enabled = true; -//Hashrate *WorkersLegacy::m_hashrate = nullptr; -xmrig::Job WorkersLegacy::m_job; -WorkersLegacy::LaunchStatus WorkersLegacy::m_status; -std::vector* > WorkersLegacy::m_workers; -uint64_t WorkersLegacy::m_ticks = 0; -uv_mutex_t WorkersLegacy::m_mutex; -uv_rwlock_t WorkersLegacy::m_rwlock; -//uv_timer_t *Workers::m_timer = nullptr; -xmrig::Controller *WorkersLegacy::m_controller = nullptr; - - -//xmrig::Job WorkersLegacy::job() -//{ -// uv_rwlock_rdlock(&m_rwlock); -// xmrig::Job job = m_job; -// uv_rwlock_rdunlock(&m_rwlock); - -// return job; -//} - - -//size_t WorkersLegacy::hugePages() -//{ -// uv_mutex_lock(&m_mutex); -// const size_t hugePages = m_status.hugePages; -// uv_mutex_unlock(&m_mutex); - -// return hugePages; -//} - - -//size_t WorkersLegacy::threads() -//{ -// uv_mutex_lock(&m_mutex); -// const size_t threads = m_status.threads; -// uv_mutex_unlock(&m_mutex); - -// return threads; -//} - - -//void Workers::pause() -//{ -// m_active = false; - -// xmrig::Nonce::pause(true); -// xmrig::Nonce::touch(); -//} - - -//void Workers::setEnabled(bool enabled) -//{ -// if (m_enabled == enabled) { -// return; -// } - -// m_enabled = enabled; -// if (!m_active) { -// return; -// } - -// xmrig::Nonce::pause(!enabled); -// xmrig::Nonce::touch(); -//} - - -//void Workers::setJob(const xmrig::Job &job, bool donate) -//{ -// uv_rwlock_wrlock(&m_rwlock); - -// m_job = job; -// m_job.setIndex(donate ? 1 : 0); - -// xmrig::Nonce::reset(donate ? 1 : 0); - -// uv_rwlock_wrunlock(&m_rwlock); - -// m_active = true; -// if (!m_enabled) { -// return; -// } - -// xmrig::Nonce::pause(false); -//} - - -void WorkersLegacy::start(xmrig::Controller *controller) -{ - using namespace xmrig; - -# ifdef APP_DEBUG - LOG_NOTICE("THREADS ------------------------------------------------------------------"); - for (const xmrig::IThread *thread : controller->config()->threads()) { - thread->print(); - } - LOG_NOTICE("--------------------------------------------------------------------------"); -# endif - - m_controller = controller; - - m_status.algo = xmrig::Algorithm::RX_WOW; // FIXME algo - const CpuThreads &threads = controller->config()->cpu().threads().get(m_status.algo); - m_status.threads = threads.size(); - - for (const CpuThread &thread : threads) { - m_status.ways += thread.intensity(); - } - -// m_hashrate = new Hashrate(threads.size(), controller); - - uv_mutex_init(&m_mutex); - uv_rwlock_init(&m_rwlock); - -// m_timer = new uv_timer_t; -// uv_timer_init(uv_default_loop(), m_timer); -// uv_timer_start(m_timer, Workers::onTick, 500, 500); - -// size_t index = 0; -// for (const CpuThread &thread : threads) { -// Thread *handle = new Thread(index++, CpuLaunchData(m_status.algo, controller->config()->cpu(), thread)); - -// m_workers.push_back(handle); -// handle->start(WorkersLegacy::onReady); -// } -} - - -//void Workers::stop() -//{ -// xmrig::Handle::close(m_timer); -// m_hashrate->stop(); - -// xmrig::Nonce::stop(); - -// for (size_t i = 0; i < m_workers.size(); ++i) { -// m_workers[i]->join(); -// } -//} - - -//#ifdef XMRIG_FEATURE_API -//void WorkersLegacy::threadsSummary(rapidjson::Document &doc) -//{ -// uv_mutex_lock(&m_mutex); -// const uint64_t pages[2] = { m_status.hugePages, m_status.pages }; -// const uint64_t memory = m_status.ways * xmrig::CnAlgo<>::memory(m_status.algo); -// uv_mutex_unlock(&m_mutex); - -// auto &allocator = doc.GetAllocator(); - -// rapidjson::Value hugepages(rapidjson::kArrayType); -// hugepages.PushBack(pages[0], allocator); -// hugepages.PushBack(pages[1], allocator); - -// doc.AddMember("hugepages", hugepages, allocator); -// doc.AddMember("memory", memory, allocator); -//} -//#endif - - -//void WorkersLegacy::onTick(uv_timer_t *) -//{ -// using namespace xmrig; - -// for (Thread *handle : m_workers) { -// if (!handle->worker()) { -// return; -// } - -// m_hashrate->add(handle->index(), handle->worker()->hashCount(), handle->worker()->timestamp()); -// } - -// if ((m_ticks++ & 0xF) == 0) { -// m_hashrate->updateHighest(); -// } -//} - - -void WorkersLegacy::start(xmrig::IWorker *worker) -{ -// const Worker *w = static_cast(worker); - - uv_mutex_lock(&m_mutex); - m_status.started++; -// m_status.pages += w->memory().pages; -// m_status.hugePages += w->memory().hugePages; - - if (m_status.started == m_status.threads) { - const double percent = (double) m_status.hugePages / m_status.pages * 100.0; - const size_t memory = m_status.ways * xmrig::CnAlgo<>::memory(m_status.algo) / 1024; - -# ifdef XMRIG_ALGO_RANDOMX - if (m_status.algo.family() == xmrig::Algorithm::RANDOM_X) { - LOG_INFO(GREEN_BOLD("READY (CPU)") " threads " CYAN_BOLD("%zu(%zu)") " memory " CYAN_BOLD("%zu KB") "", - m_status.threads, m_status.ways, memory); - } else -# endif - { - LOG_INFO(GREEN_BOLD("READY (CPU)") " threads " CYAN_BOLD("%zu(%zu)") " huge pages %s%zu/%zu %1.0f%%\x1B[0m memory " CYAN_BOLD("%zu KB") "", - m_status.threads, m_status.ways, - (m_status.hugePages == m_status.pages ? GREEN_BOLD_S : (m_status.hugePages == 0 ? RED_BOLD_S : YELLOW_BOLD_S)), - m_status.hugePages, m_status.pages, percent, memory); - } - } - - uv_mutex_unlock(&m_mutex); - - worker->start(); -} diff --git a/src/workers/WorkersLegacy.h b/src/workers/WorkersLegacy.h deleted file mode 100644 index d8ab1e59..00000000 --- a/src/workers/WorkersLegacy.h +++ /dev/null @@ -1,113 +0,0 @@ -/* XMRig - * Copyright 2010 Jeff Garzik - * Copyright 2012-2014 pooler - * Copyright 2014 Lucas Jones - * Copyright 2014-2016 Wolf9466 - * Copyright 2016 Jay D Dee - * Copyright 2017-2018 XMR-Stak , - * Copyright 2018-2019 SChernykh - * Copyright 2016-2019 XMRig , - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef XMRIG_WORKERSLEGACY_H -#define XMRIG_WORKERSLEGACY_H - - -#include -#include -#include -#include - -#ifdef XMRIG_ALGO_RANDOMX -# include -#endif - -#include "backend/common/Thread.h" -#include "backend/cpu/CpuLaunchData.h" -#include "base/net/stratum/Job.h" -#include "net/JobResult.h" -#include "rapidjson/fwd.h" - - -//class Hashrate; - - -namespace xmrig { - class IWorker; - class Controller; - class ThreadHandle; -} - - -class WorkersLegacy -{ -public: -// static size_t hugePages(); -// static size_t threads(); -// static void pause(); -// static void printHashrate(bool detail); -// static void setEnabled(bool enabled); -// static void setJob(const xmrig::Job &job, bool donate); - static void start(xmrig::Controller *controller); -// static void stop(); -// static xmrig::Job job(); - -// static inline bool isEnabled() { return m_enabled; } -// static inline Hashrate *hashrate() { return m_hashrate; } - -//# ifdef XMRIG_FEATURE_API -// static void threadsSummary(rapidjson::Document &doc); -//# endif - -private: -// static void onReady(void *arg); -// static void onTick(uv_timer_t *handle); - static void start(xmrig::IWorker *worker); - - class LaunchStatus - { - public: - inline LaunchStatus() : - hugePages(0), - pages(0), - started(0), - threads(0), - ways(0) - {} - - size_t hugePages; - size_t pages; - size_t started; - size_t threads; - size_t ways; - xmrig::Algorithm algo; - }; - - static bool m_active; - static bool m_enabled; -// static Hashrate *m_hashrate; - static xmrig::Job m_job; - static LaunchStatus m_status; - static std::vector* > m_workers; - static uint64_t m_ticks; - static uv_mutex_t m_mutex; - static uv_rwlock_t m_rwlock; -// static uv_timer_t *m_timer; - static xmrig::Controller *m_controller; -}; - - -#endif /* XMRIG_WORKERSLEGACY_H */ From 630a5dce676d5492fc81518853fef329e9dd4240 Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 17 Jul 2019 15:09:37 +0700 Subject: [PATCH 033/172] Improved log. --- src/backend/cpu/CpuBackend.cpp | 17 +++++++++++++++-- src/core/config/Config.cpp | 9 --------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp index bdb592ff..f325d0ff 100644 --- a/src/backend/cpu/CpuBackend.cpp +++ b/src/backend/cpu/CpuBackend.cpp @@ -32,6 +32,7 @@ #include "backend/cpu/CpuBackend.h" #include "base/io/log/Log.h" #include "base/net/stratum/Job.h" +#include "base/tools/Chrono.h" #include "base/tools/String.h" #include "core/config/Config.h" #include "core/Controller.h" @@ -55,6 +56,7 @@ public: started = 0; threads = 0; ways = 0; + ts = Chrono::steadyMSecs(); } size_t hugePages; @@ -63,6 +65,7 @@ public: size_t started; size_t threads; size_t ways; + uint64_t ts; }; @@ -108,6 +111,14 @@ public: profileName = cpu.threads().profileName(job.algorithm()); threads = cpu.threads().get(profileName); + if (profileName.isNull() || threads.empty()) { + workers.stop(); + + LOG_WARN(YELLOW_BOLD_S "CPU disabled, no suitable configuration for algo %s", job.algorithm().shortName()); + + return; + } + LOG_INFO(GREEN_BOLD("CPU") " use profile " BLUE_BG(WHITE_BOLD_S " %s ") WHITE_BOLD_S " (" CYAN_BOLD("%zu") WHITE_BOLD(" threads)") " scratchpad " CYAN_BOLD("%zu KB"), profileName.data(), threads.size(), @@ -217,10 +228,12 @@ void xmrig::CpuBackend::start(IWorker *worker) const double percent = d_ptr->status.hugePages == 0 ? 0.0 : static_cast(d_ptr->status.hugePages) / d_ptr->status.pages * 100.0; const size_t memory = d_ptr->status.ways * d_ptr->status.memory / 1024; - LOG_INFO(GREEN_BOLD("CPU READY") " threads " CYAN_BOLD("%zu(%zu)") " huge pages %s%zu/%zu %1.0f%%\x1B[0m memory " CYAN_BOLD("%zu KB") "", + LOG_INFO(GREEN_BOLD("CPU READY") " threads " CYAN_BOLD("%zu(%zu)") " huge pages %s%zu/%zu %1.0f%%\x1B[0m memory " CYAN_BOLD("%zu KB") BLACK_BOLD(" (%" PRIu64 " ms)"), d_ptr->status.threads, d_ptr->status.ways, (d_ptr->status.hugePages == d_ptr->status.pages ? GREEN_BOLD_S : (d_ptr->status.hugePages == 0 ? RED_BOLD_S : YELLOW_BOLD_S)), - d_ptr->status.hugePages, d_ptr->status.pages, percent, memory); + d_ptr->status.hugePages, d_ptr->status.pages, percent, memory, + Chrono::steadyMSecs() - d_ptr->status.ts + ); } uv_mutex_unlock(&d_ptr->mutex); diff --git a/src/core/config/Config.cpp b/src/core/config/Config.cpp index 02ef9c90..09728b4e 100644 --- a/src/core/config/Config.cpp +++ b/src/core/config/Config.cpp @@ -79,15 +79,6 @@ void xmrig::Config::getJSON(rapidjson::Document &doc) const doc.AddMember("background", isBackground(), allocator); doc.AddMember("colors", Log::colors, allocator); -// if (affinity() != -1L) { -// snprintf(affinity_tmp, sizeof(affinity_tmp) - 1, "0x%" PRIX64, affinity()); -// doc.AddMember("cpu-affinity", StringRef(affinity_tmp), allocator); -// } -// else { -// doc.AddMember("cpu-affinity", kNullType, allocator); -// } - - doc.AddMember("cpu", m_cpu.toJSON(doc), allocator); doc.AddMember("donate-level", m_pools.donateLevel(), allocator); From 6f93b7b38d164c8f6ed90885375ddbe63126b9df Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 17 Jul 2019 15:28:59 +0700 Subject: [PATCH 034/172] Removed unused code. --- CMakeLists.txt | 2 - src/Summary.cpp | 28 +-- src/backend/cpu/interfaces/ICpuInfo.h | 1 - src/backend/cpu/platform/AdvancedCpuInfo.cpp | 39 ---- src/backend/cpu/platform/AdvancedCpuInfo.h | 1 - src/backend/cpu/platform/BasicCpuInfo.cpp | 8 - src/backend/cpu/platform/BasicCpuInfo.h | 1 - src/backend/cpu/platform/BasicCpuInfo_arm.cpp | 6 - src/core/config/Config.cpp | 155 +------------ src/core/config/Config.h | 42 +--- src/workers/CpuThreadLegacy.cpp | 219 ------------------ src/workers/CpuThreadLegacy.h | 108 --------- 12 files changed, 19 insertions(+), 591 deletions(-) delete mode 100644 src/workers/CpuThreadLegacy.cpp delete mode 100644 src/workers/CpuThreadLegacy.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 89be20ec..4d205f55 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -42,7 +42,6 @@ set(HEADERS src/net/strategies/DonateStrategy.h src/Summary.h src/version.h - src/workers/CpuThreadLegacy.h ) set(HEADERS_CRYPTO @@ -88,7 +87,6 @@ set(SOURCES src/net/NetworkState.cpp src/net/strategies/DonateStrategy.cpp src/Summary.cpp - src/workers/CpuThreadLegacy.cpp src/xmrig.cpp ) diff --git a/src/Summary.cpp b/src/Summary.cpp index af7cad09..36f59ba3 100644 --- a/src/Summary.cpp +++ b/src/Summary.cpp @@ -84,29 +84,11 @@ static void print_cpu(xmrig::Config *) static void print_threads(xmrig::Config *config) { - if (config->threadsMode() != xmrig::Config::Advanced) { - char buf[32] = { 0 }; -// if (config->affinity() != -1L) { -// snprintf(buf, sizeof buf, ", affinity=0x%" PRIX64, config->affinity()); -// } - - xmrig::Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") CYAN_BOLD("%d") WHITE_BOLD(", av=%d, %sdonate=%d%%") WHITE_BOLD("%s"), - "THREADS", - config->threadsCount(), - config->algoVariant(), - config->pools().donateLevel() == 0 ? RED_BOLD_S : "", - config->pools().donateLevel(), - buf - ); - } - else { - xmrig::Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") CYAN_BOLD("%d") WHITE_BOLD(", %sdonate=%d%%"), - "THREADS", - config->threadsCount(), - config->pools().donateLevel() == 0 ? RED_BOLD_S : "", - config->pools().donateLevel() - ); - } + xmrig::Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") WHITE_BOLD("%s%d%%"), + "DONATE", + config->pools().donateLevel() == 0 ? RED_BOLD_S : "", + config->pools().donateLevel() + ); # ifdef XMRIG_FEATURE_ASM if (config->cpu().assembly() == xmrig::Assembly::AUTO) { diff --git a/src/backend/cpu/interfaces/ICpuInfo.h b/src/backend/cpu/interfaces/ICpuInfo.h index 74f6baee..5848db89 100644 --- a/src/backend/cpu/interfaces/ICpuInfo.h +++ b/src/backend/cpu/interfaces/ICpuInfo.h @@ -55,7 +55,6 @@ public: virtual size_t L2() const = 0; virtual size_t L3() const = 0; virtual size_t nodes() const = 0; - virtual size_t optimalThreadsCount(size_t memSize, int maxCpuUsage) const = 0; virtual size_t sockets() const = 0; virtual size_t threads() const = 0; }; diff --git a/src/backend/cpu/platform/AdvancedCpuInfo.cpp b/src/backend/cpu/platform/AdvancedCpuInfo.cpp index f3c4ed23..45b0dd66 100644 --- a/src/backend/cpu/platform/AdvancedCpuInfo.cpp +++ b/src/backend/cpu/platform/AdvancedCpuInfo.cpp @@ -80,45 +80,6 @@ xmrig::AdvancedCpuInfo::AdvancedCpuInfo() : } -size_t xmrig::AdvancedCpuInfo::optimalThreadsCount(size_t memSize, int maxCpuUsage) const -{ - if (threads() == 1) { - return 1; - } - - size_t cache = 0; - if (m_L3) { - cache = m_L2_exclusive ? (m_L2 + m_L3) : m_L3; - } - else { - cache = m_L2; - } - - size_t count = 0; - - if (cache) { - count = cache / memSize; - - if (cache % memSize >= memSize / 2) { - count++; - } - } - else { - count = threads() / 2; - } - - if (count > (size_t) threads()) { - count = threads(); - } - - if (((float) count / threads() * 100) > maxCpuUsage) { - count = (int) ceil((float) threads() * (maxCpuUsage / 100.0)); - } - - return count < 1 ? 1 : count; -} - - xmrig::CpuThreads xmrig::AdvancedCpuInfo::threads(const Algorithm &algorithm) const { if (threads() == 1) { diff --git a/src/backend/cpu/platform/AdvancedCpuInfo.h b/src/backend/cpu/platform/AdvancedCpuInfo.h index 9852f6bd..889fba00 100644 --- a/src/backend/cpu/platform/AdvancedCpuInfo.h +++ b/src/backend/cpu/platform/AdvancedCpuInfo.h @@ -38,7 +38,6 @@ public: AdvancedCpuInfo(); protected: - size_t optimalThreadsCount(size_t memSize, int maxCpuUsage) const override; CpuThreads threads(const Algorithm &algorithm) const override; inline Assembly::Id assembly() const override { return m_assembly; } diff --git a/src/backend/cpu/platform/BasicCpuInfo.cpp b/src/backend/cpu/platform/BasicCpuInfo.cpp index 369392b6..f30466fe 100644 --- a/src/backend/cpu/platform/BasicCpuInfo.cpp +++ b/src/backend/cpu/platform/BasicCpuInfo.cpp @@ -153,14 +153,6 @@ xmrig::BasicCpuInfo::BasicCpuInfo() : } -size_t xmrig::BasicCpuInfo::optimalThreadsCount(size_t memSize, int maxCpuUsage) const -{ - const size_t count = threads() / 2; - - return count < 1 ? 1 : count; -} - - xmrig::CpuThreads xmrig::BasicCpuInfo::threads(const Algorithm &algorithm) const { if (threads() == 1) { diff --git a/src/backend/cpu/platform/BasicCpuInfo.h b/src/backend/cpu/platform/BasicCpuInfo.h index 886d59c3..12d0e037 100644 --- a/src/backend/cpu/platform/BasicCpuInfo.h +++ b/src/backend/cpu/platform/BasicCpuInfo.h @@ -38,7 +38,6 @@ public: BasicCpuInfo(); protected: - size_t optimalThreadsCount(size_t memSize, int maxCpuUsage) const override; CpuThreads threads(const Algorithm &algorithm) const override; inline Assembly::Id assembly() const override { return m_assembly; } diff --git a/src/backend/cpu/platform/BasicCpuInfo_arm.cpp b/src/backend/cpu/platform/BasicCpuInfo_arm.cpp index 6702f6f0..3d733535 100644 --- a/src/backend/cpu/platform/BasicCpuInfo_arm.cpp +++ b/src/backend/cpu/platform/BasicCpuInfo_arm.cpp @@ -57,12 +57,6 @@ xmrig::BasicCpuInfo::BasicCpuInfo() : } -size_t xmrig::BasicCpuInfo::optimalThreadsCount(size_t memSize, int maxCpuUsage) const -{ - return threads(); -} - - xmrig::CpuThreads xmrig::BasicCpuInfo::threads(const Algorithm &algorithm) const { return CpuThreads(threads()); diff --git a/src/core/config/Config.cpp b/src/core/config/Config.cpp index 09728b4e..d6336b67 100644 --- a/src/core/config/Config.cpp +++ b/src/core/config/Config.cpp @@ -36,12 +36,9 @@ #include "rapidjson/document.h" #include "rapidjson/filewritestream.h" #include "rapidjson/prettywriter.h" -#include "workers/CpuThreadLegacy.h" -xmrig::Config::Config() : - m_algoVariant(CnHash::AV_AUTO), - m_shouldSave(false) +xmrig::Config::Config() : BaseConfig() { } @@ -54,10 +51,7 @@ bool xmrig::Config::read(const IJsonReader &reader, const char *fileName) m_cpu.read(reader.getValue("cpu")); - setAlgoVariant(reader.getInt("av")); - setThreads(reader.getValue("threads")); - - return finalize(); + return true; } @@ -72,148 +66,21 @@ void xmrig::Config::getJSON(rapidjson::Document &doc) const Value api(kObjectType); api.AddMember("id", m_apiId.toJSON(), allocator); api.AddMember("worker-id", m_apiWorkerId.toJSON(), allocator); - doc.AddMember("api", api, allocator); - doc.AddMember("http", m_http.toJSON(doc), allocator); - doc.AddMember("autosave", isAutoSave(), allocator); - doc.AddMember("av", algoVariant(), allocator); - doc.AddMember("background", isBackground(), allocator); - doc.AddMember("colors", Log::colors, allocator); - - doc.AddMember("cpu", m_cpu.toJSON(doc), allocator); + doc.AddMember("api", api, allocator); + doc.AddMember("autosave", isAutoSave(), allocator); + doc.AddMember("background", isBackground(), allocator); + doc.AddMember("colors", Log::colors, allocator); + doc.AddMember("cpu", m_cpu.toJSON(doc), allocator); doc.AddMember("donate-level", m_pools.donateLevel(), allocator); doc.AddMember("donate-over-proxy", m_pools.proxyDonate(), allocator); + doc.AddMember("http", m_http.toJSON(doc), allocator); doc.AddMember("log-file", m_logFile.toJSON(), allocator); doc.AddMember("pools", m_pools.toJSON(doc), allocator); doc.AddMember("print-time", printTime(), allocator); doc.AddMember("retries", m_pools.retries(), allocator); doc.AddMember("retry-pause", m_pools.retryPause(), allocator); - - if (threadsMode() != Simple) { - Value threads(kArrayType); - - for (const IThread *thread : m_threads.list) { - threads.PushBack(thread->toConfig(doc), allocator); - } - - doc.AddMember("threads", threads, allocator); - } - else { - doc.AddMember("threads", threadsCount(), allocator); - } - - doc.AddMember("user-agent", m_userAgent.toJSON(), allocator); - doc.AddMember("syslog", isSyslog(), allocator); - doc.AddMember("watch", m_watch, allocator); + doc.AddMember("syslog", isSyslog(), allocator); + doc.AddMember("user-agent", m_userAgent.toJSON(), allocator); + doc.AddMember("watch", m_watch, allocator); } - - -bool xmrig::Config::finalize() -{ - Algorithm algorithm(Algorithm::RX_WOW); // FIXME algo - - if (!m_threads.cpu.empty()) { - m_threads.mode = Advanced; - - for (size_t i = 0; i < m_threads.cpu.size(); ++i) { - m_threads.list.push_back(CpuThreadLegacy::createFromData(i, algorithm, m_threads.cpu[i], m_cpu.priority(), !m_cpu.isHwAES())); - } - - return true; - } - - const CnHash::AlgoVariant av = getAlgoVariant(); - m_threads.mode = m_threads.count ? Simple : Automatic; - - const size_t size = CpuThreadLegacy::multiway(av) * CnAlgo<>::memory(algorithm) / 1024; // FIXME MEMORY - - if (!m_threads.count) { - m_threads.count = Cpu::info()->optimalThreadsCount(size, 100); - } -// else if (m_safe) { -// const size_t count = Cpu::info()->optimalThreadsCount(size, m_maxCpuUsage); -// if (m_threads.count > count) { -// m_threads.count = count; -// } -// } - - for (size_t i = 0; i < m_threads.count; ++i) { - m_threads.list.push_back(CpuThreadLegacy::createFromAV(i, algorithm, av, m_threads.mask, m_cpu.priority(), m_cpu.assembly())); - } - - m_shouldSave = m_threads.mode == Automatic; - - return true; -} - - -void xmrig::Config::setAlgoVariant(int av) -{ - if (av >= CnHash::AV_AUTO && av < CnHash::AV_MAX) { - m_algoVariant = static_cast(av); - } -} - - -void xmrig::Config::setThreads(const rapidjson::Value &threads) -{ - if (threads.IsArray()) { - m_threads.cpu.clear(); - - for (const rapidjson::Value &value : threads.GetArray()) { - if (!value.IsObject()) { - continue; - } - - if (value.HasMember("low_power_mode")) { - auto data = CpuThreadLegacy::parse(value); - - if (data.valid) { - m_threads.cpu.push_back(std::move(data)); - } - } - } - } - else if (threads.IsUint()) { - const unsigned count = threads.GetUint(); - if (count < 1024) { - m_threads.count = count; - } - } -} - - -xmrig::CnHash::AlgoVariant xmrig::Config::getAlgoVariant() const -{ -# ifdef XMRIG_ALGO_CN_LITE -// if (m_algorithm.algo() == xmrig::CRYPTONIGHT_LITE) { // FIXME -// return getAlgoVariantLite(); -// } -# endif - - if (m_algoVariant <= CnHash::AV_AUTO || m_algoVariant >= CnHash::AV_MAX) { - return Cpu::info()->hasAES() ? CnHash::AV_SINGLE : CnHash::AV_SINGLE_SOFT; - } - -// if (m_safe && !Cpu::info()->hasAES() && m_algoVariant <= AV_DOUBLE) { -// return static_cast(m_algoVariant + 2); -// } - - return m_algoVariant; -} - - -#ifdef XMRIG_ALGO_CN_LITE -xmrig::CnHash::AlgoVariant xmrig::Config::getAlgoVariantLite() const -{ - if (m_algoVariant <= CnHash::AV_AUTO || m_algoVariant >= CnHash::AV_MAX) { - return Cpu::info()->hasAES() ? CnHash::AV_DOUBLE : CnHash::AV_DOUBLE_SOFT; - } - -// if (m_safe && !Cpu::info()->hasAES() && m_algoVariant <= AV_DOUBLE) { -// return static_cast(m_algoVariant + 2); -// } - - return m_algoVariant; -} -#endif diff --git a/src/core/config/Config.h b/src/core/config/Config.h index aa547796..e6b5c735 100644 --- a/src/core/config/Config.h +++ b/src/core/config/Config.h @@ -27,13 +27,11 @@ #include -#include #include "backend/cpu/CpuConfig.h" #include "base/kernel/config/BaseConfig.h" #include "rapidjson/fwd.h" -#include "workers/CpuThreadLegacy.h" namespace xmrig { @@ -45,51 +43,17 @@ class IThread; class Config : public BaseConfig { public: - enum ThreadsMode { - Automatic, - Simple, - Advanced - }; - - Config(); bool read(const IJsonReader &reader, const char *fileName) override; void getJSON(rapidjson::Document &doc) const override; - inline CnHash::AlgoVariant algoVariant() const { return m_algoVariant; } - inline bool isShouldSave() const { return (m_shouldSave || m_upgrade || m_cpu.isShouldSave()) && isAutoSave(); } - inline const CpuConfig &cpu() const { return m_cpu; } - inline const std::vector &threads() const { return m_threads.list; } - inline int threadsCount() const { return static_cast(m_threads.list.size()); } - inline ThreadsMode threadsMode() const { return m_threads.mode; } + inline bool isShouldSave() const { return (m_shouldSave || m_upgrade || m_cpu.isShouldSave()) && isAutoSave(); } + inline const CpuConfig &cpu() const { return m_cpu; } private: - bool finalize(); - void setAlgoVariant(int av); - void setThreads(const rapidjson::Value &threads); - - CnHash::AlgoVariant getAlgoVariant() const; -# ifdef XMRIG_ALGO_CN_LITE - CnHash::AlgoVariant getAlgoVariantLite() const; -# endif - - struct Threads - { - inline Threads() : mask(-1L), count(0), mode(Automatic) {} - - int64_t mask; - size_t count; - std::vector cpu; - std::vector list; - ThreadsMode mode; - }; - - - CnHash::AlgoVariant m_algoVariant; - bool m_shouldSave; + bool m_shouldSave = false; CpuConfig m_cpu; - Threads m_threads; }; diff --git a/src/workers/CpuThreadLegacy.cpp b/src/workers/CpuThreadLegacy.cpp deleted file mode 100644 index b5d457c7..00000000 --- a/src/workers/CpuThreadLegacy.cpp +++ /dev/null @@ -1,219 +0,0 @@ -/* XMRig - * Copyright 2010 Jeff Garzik - * Copyright 2012-2014 pooler - * Copyright 2014 Lucas Jones - * Copyright 2014-2016 Wolf9466 - * Copyright 2016 Jay D Dee - * Copyright 2017-2018 XMR-Stak , - * Copyright 2018-2019 SChernykh - * Copyright 2016-2019 XMRig , - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include - - -#include "base/io/log/Log.h" -#include "crypto/cn/CnHash.h" -#include "crypto/common/Assembly.h" -#include "crypto/common/VirtualMemory.h" -#include "rapidjson/document.h" -#include "workers/CpuThreadLegacy.h" - - -xmrig::CpuThreadLegacy::CpuThreadLegacy(size_t index, Algorithm algorithm, CnHash::AlgoVariant av, Multiway multiway, int64_t affinity, int priority, bool softAES, bool prefetch, Assembly assembly) : - m_algorithm(algorithm), - m_av(av), - m_assembly(assembly), - m_prefetch(prefetch), - m_softAES(softAES), - m_priority(priority), - m_affinity(affinity), - m_multiway(multiway), - m_index(index) -{ -} - - -xmrig::cn_hash_fun xmrig::CpuThreadLegacy::fn(const Algorithm &algorithm) const -{ - return CnHash::fn(algorithm, m_av, m_assembly); -} - - - -bool xmrig::CpuThreadLegacy::isSoftAES(CnHash::AlgoVariant av) -{ - return av == CnHash::AV_SINGLE_SOFT || av == CnHash::AV_DOUBLE_SOFT || av > CnHash::AV_PENTA; -} - - -xmrig::CpuThreadLegacy *xmrig::CpuThreadLegacy::createFromAV(size_t index, const Algorithm &algorithm, CnHash::AlgoVariant av, int64_t affinity, int priority, Assembly assembly) -{ - assert(av > CnHash::AV_AUTO && av < CnHash::AV_MAX); - - int64_t cpuId = -1L; - - if (affinity != -1L) { - size_t idx = 0; - - for (size_t i = 0; i < 64; i++) { - if (!(affinity & (1ULL << i))) { - continue; - } - - if (idx == index) { - cpuId = i; - break; - } - - idx++; - } - } - - return new CpuThreadLegacy(index, algorithm, av, multiway(av), cpuId, priority, isSoftAES(av), false, assembly); -} - - -xmrig::CpuThreadLegacy *xmrig::CpuThreadLegacy::createFromData(size_t index, const Algorithm &algorithm, const CpuThreadLegacy::Data &data, int priority, bool softAES) -{ - int av = CnHash::AV_AUTO; - const Multiway multiway = data.multiway; - - if (multiway <= DoubleWay) { - av = softAES ? (multiway + 2) : multiway; - } - else { - av = softAES ? (multiway + 5) : (multiway + 2); - } - - assert(av > CnHash::AV_AUTO && av < CnHash::AV_MAX); - - return new CpuThreadLegacy(index, algorithm, static_cast(av), multiway, data.affinity, priority, softAES, false, data.assembly); -} - - -xmrig::CpuThreadLegacy::Data xmrig::CpuThreadLegacy::parse(const rapidjson::Value &object) -{ - Data data; - - const auto &multiway = object["low_power_mode"]; - if (multiway.IsBool()) { - data.multiway = multiway.IsTrue() ? DoubleWay : SingleWay; - data.valid = true; - } - else if (multiway.IsUint()) { - data.setMultiway(multiway.GetInt()); - } - - if (!data.valid) { - return data; - } - - const auto &affinity = object["affine_to_cpu"]; - if (affinity.IsUint64()) { - data.affinity = affinity.GetInt64(); - } - -# ifdef XMRIG_FEATURE_ASM - data.assembly = object["asm"]; -# endif - - return data; -} - - -xmrig::IThread::Multiway xmrig::CpuThreadLegacy::multiway(CnHash::AlgoVariant av) -{ - switch (av) { - case CnHash::AV_SINGLE: - case CnHash::AV_SINGLE_SOFT: - return SingleWay; - - case CnHash::AV_DOUBLE_SOFT: - case CnHash::AV_DOUBLE: - return DoubleWay; - - case CnHash::AV_TRIPLE_SOFT: - case CnHash::AV_TRIPLE: - return TripleWay; - - case CnHash::AV_QUAD_SOFT: - case CnHash::AV_QUAD: - return QuadWay; - - case CnHash::AV_PENTA_SOFT: - case CnHash::AV_PENTA: - return PentaWay; - - default: - break; - } - - return SingleWay; -} - - -#ifdef APP_DEBUG -void xmrig::CpuThreadLegacy::print() const -{ - LOG_DEBUG(GREEN_BOLD("CPU thread: ") " index " WHITE_BOLD("%zu") ", multiway " WHITE_BOLD("%d") ", av " WHITE_BOLD("%d") ",", - index(), static_cast(multiway()), static_cast(m_av)); - -# ifdef XMRIG_FEATURE_ASM - LOG_DEBUG(" assembly: %s, affine_to_cpu: %" PRId64, m_assembly.toString(), affinity()); -# else - LOG_DEBUG(" affine_to_cpu: %" PRId64, affinity()); -# endif -} -#endif - - -#ifdef XMRIG_FEATURE_API -rapidjson::Value xmrig::CpuThreadLegacy::toAPI(rapidjson::Document &doc) const -{ - using namespace rapidjson; - - Value obj(kObjectType); - auto &allocator = doc.GetAllocator(); - - obj.AddMember("type", "cpu", allocator); - obj.AddMember("av", m_av, allocator); - obj.AddMember("low_power_mode", multiway(), allocator); - obj.AddMember("affine_to_cpu", affinity(), allocator); - obj.AddMember("priority", priority(), allocator); - obj.AddMember("soft_aes", isSoftAES(), allocator); - - return obj; -} -#endif - - -rapidjson::Value xmrig::CpuThreadLegacy::toConfig(rapidjson::Document &doc) const -{ - using namespace rapidjson; - - Value obj(kObjectType); - auto &allocator = doc.GetAllocator(); - - obj.AddMember("low_power_mode", multiway(), allocator); - obj.AddMember("affine_to_cpu", affinity() == -1L ? Value(kFalseType) : Value(affinity()), allocator); - -# ifdef XMRIG_FEATURE_ASM - obj.AddMember("asm", m_assembly.toJSON(), allocator); -# endif - - return obj; -} diff --git a/src/workers/CpuThreadLegacy.h b/src/workers/CpuThreadLegacy.h deleted file mode 100644 index b803a8c4..00000000 --- a/src/workers/CpuThreadLegacy.h +++ /dev/null @@ -1,108 +0,0 @@ -/* XMRig - * Copyright 2010 Jeff Garzik - * Copyright 2012-2014 pooler - * Copyright 2014 Lucas Jones - * Copyright 2014-2016 Wolf9466 - * Copyright 2016 Jay D Dee - * Copyright 2017-2018 XMR-Stak , - * Copyright 2018-2019 SChernykh - * Copyright 2016-2019 XMRig , - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef XMRIG_CPUTHREADLEGACY_H -#define XMRIG_CPUTHREADLEGACY_H - - -#include "backend/common/interfaces/IThread.h" -#include "crypto/cn/CnHash.h" - - -struct cryptonight_ctx; - - -namespace xmrig { - - -class CpuThreadLegacy : public IThread -{ -public: - struct Data - { - inline Data() : valid(false), affinity(-1L), multiway(SingleWay) {} - - inline void setMultiway(int value) - { - if (value >= SingleWay && value <= PentaWay) { - multiway = static_cast(value); - valid = true; - } - } - - Assembly assembly; - bool valid; - int64_t affinity; - Multiway multiway; - }; - - - CpuThreadLegacy(size_t index, Algorithm algorithm, CnHash::AlgoVariant av, Multiway multiway, int64_t affinity, int priority, bool softAES, bool prefetch, Assembly assembly); - - cn_hash_fun fn(const Algorithm &algorithm) const; - - static bool isSoftAES(CnHash::AlgoVariant av); - static CpuThreadLegacy *createFromAV(size_t index, const Algorithm &algorithm, CnHash::AlgoVariant av, int64_t affinity, int priority, Assembly assembly); - static CpuThreadLegacy *createFromData(size_t index, const Algorithm &algorithm, const CpuThreadLegacy::Data &data, int priority, bool softAES); - static Data parse(const rapidjson::Value &object); - static Multiway multiway(CnHash::AlgoVariant av); - - inline bool isPrefetch() const { return m_prefetch; } - inline bool isSoftAES() const { return m_softAES; } - - inline Algorithm algorithm() const override { return m_algorithm; } - inline int priority() const override { return m_priority; } - inline int64_t affinity() const override { return m_affinity; } - inline Multiway multiway() const override { return m_multiway; } - inline size_t index() const override { return m_index; } - inline Type type() const override { return CPU; } - -protected: -# ifdef APP_DEBUG - void print() const override; -# endif - -# ifdef XMRIG_FEATURE_API - rapidjson::Value toAPI(rapidjson::Document &doc) const override; -# endif - - rapidjson::Value toConfig(rapidjson::Document &doc) const override; - -private: - const Algorithm m_algorithm; - const CnHash::AlgoVariant m_av; - const Assembly m_assembly; - const bool m_prefetch; - const bool m_softAES; - const int m_priority; - const int64_t m_affinity; - const Multiway m_multiway; - const size_t m_index; -}; - - -} /* namespace xmrig */ - - -#endif /* XMRIG_CPUTHREADLEGACY_H */ From 871bc3e1801c979a51e244b5c4485743684ccd62 Mon Sep 17 00:00:00 2001 From: XMRig Date: Thu, 18 Jul 2019 04:21:14 +0700 Subject: [PATCH 035/172] Fixed bugs. --- src/backend/common/Thread.h | 6 ++++-- src/backend/cpu/CpuWorker.cpp | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/backend/common/Thread.h b/src/backend/common/Thread.h index b62d880c..36367ece 100644 --- a/src/backend/common/Thread.h +++ b/src/backend/common/Thread.h @@ -29,11 +29,13 @@ #include +#include "backend/common/interfaces/IWorker.h" + + namespace xmrig { class IBackend; -class IWorker; template @@ -41,7 +43,7 @@ class Thread { public: inline Thread(IBackend *backend, size_t index, const T &config) : m_index(index), m_config(config), m_backend(backend) {} - inline ~Thread() { uv_thread_join(&m_thread); } + inline ~Thread() { uv_thread_join(&m_thread); delete m_worker; } inline const T &config() const { return m_config; } inline IBackend *backend() const { return m_backend; } diff --git a/src/backend/cpu/CpuWorker.cpp b/src/backend/cpu/CpuWorker.cpp index 356dfb1b..4318b8ce 100644 --- a/src/backend/cpu/CpuWorker.cpp +++ b/src/backend/cpu/CpuWorker.cpp @@ -158,7 +158,7 @@ void xmrig::CpuWorker::start() do { std::this_thread::sleep_for(std::chrono::milliseconds(200)); } - while (Nonce::isPaused()); + while (Nonce::isPaused() && Nonce::sequence(Nonce::CPU) > 0); if (Nonce::sequence(Nonce::CPU) == 0) { break; From f590cf58fb9f2b0899354402213087832610bf87 Mon Sep 17 00:00:00 2001 From: XMRig Date: Thu, 18 Jul 2019 19:11:45 +0700 Subject: [PATCH 036/172] Added support for threads restart if config changed. --- src/backend/common/Workers.cpp | 13 ++--- src/backend/common/Workers.h | 3 +- src/backend/common/interfaces/IBackend.h | 16 +++--- src/backend/cpu/CpuBackend.cpp | 73 ++++++++++-------------- src/backend/cpu/CpuBackend.h | 1 + src/backend/cpu/CpuConfig.cpp | 19 ++++++ src/backend/cpu/CpuConfig.h | 2 + src/backend/cpu/CpuLaunchData.cpp | 13 +++++ src/backend/cpu/CpuLaunchData.h | 4 ++ src/base/net/stratum/Pool.cpp | 3 +- src/core/Miner.cpp | 16 ++++++ src/core/Miner.h | 4 +- 12 files changed, 105 insertions(+), 62 deletions(-) diff --git a/src/backend/common/Workers.cpp b/src/backend/common/Workers.cpp index 629b564b..d70546d3 100644 --- a/src/backend/common/Workers.cpp +++ b/src/backend/common/Workers.cpp @@ -78,13 +78,6 @@ const xmrig::Hashrate *xmrig::Workers::hashrate() const } -template -void xmrig::Workers::add(const T &data) -{ - m_workers.push_back(new Thread(d_ptr->backend, m_workers.size(), data)); -} - - template void xmrig::Workers::setBackend(IBackend *backend) { @@ -93,8 +86,12 @@ void xmrig::Workers::setBackend(IBackend *backend) template -void xmrig::Workers::start() +void xmrig::Workers::start(const std::vector &data) { + for (const T &item : data) { + m_workers.push_back(new Thread(d_ptr->backend, m_workers.size(), item)); + } + d_ptr->hashrate = new Hashrate(m_workers.size()); for (Thread *worker : m_workers) { diff --git a/src/backend/common/Workers.h b/src/backend/common/Workers.h index c13f5e77..32d9458a 100644 --- a/src/backend/common/Workers.h +++ b/src/backend/common/Workers.h @@ -46,9 +46,8 @@ public: ~Workers(); const Hashrate *hashrate() const; - void add(const T &data); void setBackend(IBackend *backend); - void start(); + void start(const std::vector &data); void stop(); void tick(uint64_t ticks); diff --git a/src/backend/common/interfaces/IBackend.h b/src/backend/common/interfaces/IBackend.h index 8ad7bb53..6fe917cb 100644 --- a/src/backend/common/interfaces/IBackend.h +++ b/src/backend/common/interfaces/IBackend.h @@ -32,6 +32,7 @@ namespace xmrig { +class Algorithm; class Hashrate; class IWorker; class Job; @@ -43,13 +44,14 @@ class IBackend public: virtual ~IBackend() = default; - virtual const Hashrate *hashrate() const = 0; - virtual const String &profileName() const = 0; - virtual void printHashrate(bool details) = 0; - virtual void setJob(const Job &job) = 0; - virtual void start(IWorker *worker) = 0; - virtual void stop() = 0; - virtual void tick(uint64_t ticks) = 0; + virtual bool isEnabled(const Algorithm &algorithm) const = 0; + virtual const Hashrate *hashrate() const = 0; + virtual const String &profileName() const = 0; + virtual void printHashrate(bool details) = 0; + virtual void setJob(const Job &job) = 0; + virtual void start(IWorker *worker) = 0; + virtual void stop() = 0; + virtual void tick(uint64_t ticks) = 0; }; diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp index f325d0ff..15a0c359 100644 --- a/src/backend/cpu/CpuBackend.cpp +++ b/src/backend/cpu/CpuBackend.cpp @@ -85,40 +85,8 @@ public: } - inline bool isReady(const Algorithm &nextAlgo) const + inline void start() { - if (!algo.isValid()) { - return false; - } - - if (nextAlgo == algo) { - return true; - } - - const CpuThreads &nextThreads = controller->config()->cpu().threads().get(nextAlgo); - - return algo.memory() == nextAlgo.memory() - && threads.size() == nextThreads.size() - && std::equal(threads.begin(), threads.end(), nextThreads.begin()); - } - - - inline void start(const Job &job) - { - const CpuConfig &cpu = controller->config()->cpu(); - - algo = job.algorithm(); - profileName = cpu.threads().profileName(job.algorithm()); - threads = cpu.threads().get(profileName); - - if (profileName.isNull() || threads.empty()) { - workers.stop(); - - LOG_WARN(YELLOW_BOLD_S "CPU disabled, no suitable configuration for algo %s", job.algorithm().shortName()); - - return; - } - LOG_INFO(GREEN_BOLD("CPU") " use profile " BLUE_BG(WHITE_BOLD_S " %s ") WHITE_BOLD_S " (" CYAN_BOLD("%zu") WHITE_BOLD(" threads)") " scratchpad " CYAN_BOLD("%zu KB"), profileName.data(), threads.size(), @@ -131,20 +99,18 @@ public: status.memory = algo.memory(); status.threads = threads.size(); - for (const CpuThread &thread : threads) { - workers.add(CpuLaunchData(controller->miner(), algo, cpu, thread)); - - status.ways += static_cast(thread.intensity()); + for (const CpuLaunchData &data : threads) { + status.ways += static_cast(data.intensity); } - workers.start(); + workers.start(threads); } Algorithm algo; Controller *controller; - CpuThreads threads; LaunchStatus status; + std::vector threads; String profileName; uv_mutex_t mutex; Workers workers; @@ -167,6 +133,12 @@ xmrig::CpuBackend::~CpuBackend() } +bool xmrig::CpuBackend::isEnabled(const Algorithm &algorithm) const +{ + return !d_ptr->controller->config()->cpu().threads().get(algorithm).empty(); +} + + const xmrig::Hashrate *xmrig::CpuBackend::hashrate() const { return d_ptr->workers.hashrate(); @@ -190,10 +162,10 @@ void xmrig::CpuBackend::printHashrate(bool details) Log::print(WHITE_BOLD_S "| CPU THREAD | AFFINITY | 10s H/s | 60s H/s | 15m H/s |"); size_t i = 0; - for (const CpuThread &thread : d_ptr->threads) { + for (const CpuLaunchData &data : d_ptr->threads) { Log::print("| %13zu | %8" PRId64 " | %7s | %7s | %7s |", i, - thread.affinity(), + data.affinity, Hashrate::format(hashrate()->calc(i, Hashrate::ShortInterval), num, sizeof num / 3), Hashrate::format(hashrate()->calc(i, Hashrate::MediumInterval), num + 8, sizeof num / 3), Hashrate::format(hashrate()->calc(i, Hashrate::LargeInterval), num + 8 * 2, sizeof num / 3) @@ -206,11 +178,26 @@ void xmrig::CpuBackend::printHashrate(bool details) void xmrig::CpuBackend::setJob(const Job &job) { - if (d_ptr->isReady(job.algorithm())) { + const CpuConfig &cpu = d_ptr->controller->config()->cpu(); + + std::vector threads = cpu.get(d_ptr->controller->miner(), job.algorithm()); + if (d_ptr->threads.size() == threads.size() && std::equal(d_ptr->threads.begin(), d_ptr->threads.end(), threads.begin())) { return; } - d_ptr->start(job); + d_ptr->algo = job.algorithm(); + d_ptr->profileName = cpu.threads().profileName(job.algorithm()); + + if (d_ptr->profileName.isNull() || threads.empty()) { + d_ptr->workers.stop(); + + LOG_WARN(YELLOW_BOLD_S "CPU disabled, no suitable configuration for algo %s", job.algorithm().shortName()); + + return; + } + + d_ptr->threads = std::move(threads); + d_ptr->start(); } diff --git a/src/backend/cpu/CpuBackend.h b/src/backend/cpu/CpuBackend.h index aabccb49..543d4459 100644 --- a/src/backend/cpu/CpuBackend.h +++ b/src/backend/cpu/CpuBackend.h @@ -44,6 +44,7 @@ public: ~CpuBackend() override; protected: + bool isEnabled(const Algorithm &algorithm) const override; const Hashrate *hashrate() const override; const String &profileName() const override; void printHashrate(bool details) override; diff --git a/src/backend/cpu/CpuConfig.cpp b/src/backend/cpu/CpuConfig.cpp index b4a9c363..457f7ef4 100644 --- a/src/backend/cpu/CpuConfig.cpp +++ b/src/backend/cpu/CpuConfig.cpp @@ -100,6 +100,25 @@ rapidjson::Value xmrig::CpuConfig::toJSON(rapidjson::Document &doc) const } +std::vector xmrig::CpuConfig::get(const Miner *miner, const Algorithm &algorithm) const +{ + std::vector out; + const std::vector &threads = m_threads.get(algorithm); + + if (threads.empty()) { + return out; + } + + out.reserve(threads.size()); + + for (const CpuThread &thread : threads) { + out.push_back(CpuLaunchData(miner, algorithm, *this, thread)); + } + + return out; +} + + void xmrig::CpuConfig::read(const rapidjson::Value &value) { if (value.IsObject()) { diff --git a/src/backend/cpu/CpuConfig.h b/src/backend/cpu/CpuConfig.h index 88222ab1..8ff8b77c 100644 --- a/src/backend/cpu/CpuConfig.h +++ b/src/backend/cpu/CpuConfig.h @@ -27,6 +27,7 @@ #include "backend/common/Threads.h" +#include "backend/cpu/CpuLaunchData.h" #include "backend/cpu/CpuThread.h" #include "crypto/common/Assembly.h" @@ -47,6 +48,7 @@ public: bool isHwAES() const; rapidjson::Value toJSON(rapidjson::Document &doc) const; + std::vector get(const Miner *miner, const Algorithm &algorithm) const; void read(const rapidjson::Value &value); inline bool isEnabled() const { return m_enabled; } diff --git a/src/backend/cpu/CpuLaunchData.cpp b/src/backend/cpu/CpuLaunchData.cpp index 68b8e7ae..6fa458aa 100644 --- a/src/backend/cpu/CpuLaunchData.cpp +++ b/src/backend/cpu/CpuLaunchData.cpp @@ -41,6 +41,19 @@ xmrig::CpuLaunchData::CpuLaunchData(const Miner *miner, const Algorithm &algorit } +bool xmrig::CpuLaunchData::isEqual(const CpuLaunchData &other) const +{ + return (algorithm.memory() == other.algorithm.memory() + && assembly == other.assembly + && hugePages == other.hugePages + && hwAES == other.hwAES + && intensity == other.intensity + && priority == other.priority + && affinity == other.affinity + ); +} + + xmrig::CnHash::AlgoVariant xmrig::CpuLaunchData::av() const { if (intensity <= 2) { diff --git a/src/backend/cpu/CpuLaunchData.h b/src/backend/cpu/CpuLaunchData.h index 208a68b7..bb18816a 100644 --- a/src/backend/cpu/CpuLaunchData.h +++ b/src/backend/cpu/CpuLaunchData.h @@ -46,10 +46,14 @@ class CpuLaunchData public: CpuLaunchData(const Miner *miner, const Algorithm &algorithm, const CpuConfig &config, const CpuThread &thread); + bool isEqual(const CpuLaunchData &other) const; CnHash::AlgoVariant av() const; inline constexpr static Nonce::Backend backend() { return Nonce::CPU; } + inline bool operator!=(const CpuLaunchData &other) const { return !isEqual(other); } + inline bool operator==(const CpuLaunchData &other) const { return isEqual(other); } + const Algorithm algorithm; const Assembly assembly; const bool hugePages; diff --git a/src/base/net/stratum/Pool.cpp b/src/base/net/stratum/Pool.cpp index b11e1159..4d15ea47 100644 --- a/src/base/net/stratum/Pool.cpp +++ b/src/base/net/stratum/Pool.cpp @@ -192,7 +192,8 @@ bool xmrig::Pool::isEqual(const Pool &other) const && m_rigId == other.m_rigId && m_url == other.m_url && m_user == other.m_user - && m_pollInterval == other.m_pollInterval); + && m_pollInterval == other.m_pollInterval + ); } diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index 40321662..4135e8ab 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -99,6 +99,8 @@ public: xmrig::Miner::Miner(Controller *controller) : d_ptr(new MinerPrivate(controller)) { + controller->addListener(this); + d_ptr->timer = new Timer(this); d_ptr->backends.push_back(new CpuBackend(controller)); @@ -218,6 +220,20 @@ void xmrig::Miner::stop() } +void xmrig::Miner::onConfigChanged(Config *config, Config *previousConfig) +{ + if (config->pools() != previousConfig->pools() && config->pools().active() > 0) { + return; + } + + const Job job = this->job(); + + for (IBackend *backend : d_ptr->backends) { + backend->setJob(job); + } +} + + void xmrig::Miner::onTimer(const Timer *) { double maxHashrate = 0.0; diff --git a/src/core/Miner.h b/src/core/Miner.h index f32524a7..23497eae 100644 --- a/src/core/Miner.h +++ b/src/core/Miner.h @@ -29,6 +29,7 @@ #include +#include "base/kernel/interfaces/IBaseListener.h" #include "base/kernel/interfaces/ITimerListener.h" @@ -41,7 +42,7 @@ class MinerPrivate; class IBackend; -class Miner : public ITimerListener +class Miner : public ITimerListener, public IBaseListener { public: Miner(Controller *controller); @@ -57,6 +58,7 @@ public: void stop(); protected: + void onConfigChanged(Config *config, Config *previousConfig) override; void onTimer(const Timer *timer) override; private: From 0ab26a16193e79d7ddf848e35d390fdae5469c23 Mon Sep 17 00:00:00 2001 From: XMRig Date: Thu, 18 Jul 2019 22:35:15 +0700 Subject: [PATCH 037/172] Restored algo field in login request. --- src/backend/cpu/CpuWorker.cpp | 4 ++ .../kernel/interfaces/IStrategyListener.h | 3 +- src/base/net/stratum/Client.cpp | 13 ----- .../stratum/strategies/FailoverStrategy.cpp | 8 +++- .../net/stratum/strategies/FailoverStrategy.h | 6 +-- .../stratum/strategies/SinglePoolStrategy.cpp | 8 +++- .../stratum/strategies/SinglePoolStrategy.h | 6 +-- src/core/Miner.cpp | 48 +++++++++++++++++-- src/core/Miner.h | 2 + src/crypto/common/Algorithm.h | 2 +- src/net/Network.cpp | 15 ++++++ src/net/Network.h | 1 + src/net/strategies/DonateStrategy.cpp | 43 ++++++++++++++--- src/net/strategies/DonateStrategy.h | 5 +- 14 files changed, 129 insertions(+), 35 deletions(-) diff --git a/src/backend/cpu/CpuWorker.cpp b/src/backend/cpu/CpuWorker.cpp index 4318b8ce..e35c5155 100644 --- a/src/backend/cpu/CpuWorker.cpp +++ b/src/backend/cpu/CpuWorker.cpp @@ -174,6 +174,10 @@ void xmrig::CpuWorker::start() const Job &job = m_job.currentJob(); + if (job.algorithm().memory() != m_algorithm.memory()) { + break; + } + # ifdef XMRIG_ALGO_RANDOMX if (job.algorithm().family() == Algorithm::RANDOM_X) { randomx_calculate_hash(m_vm->get(), m_job.blob(), job.size(), m_hash); diff --git a/src/base/kernel/interfaces/IStrategyListener.h b/src/base/kernel/interfaces/IStrategyListener.h index 2e63449b..01e668d4 100644 --- a/src/base/kernel/interfaces/IStrategyListener.h +++ b/src/base/kernel/interfaces/IStrategyListener.h @@ -26,7 +26,7 @@ #define XMRIG_ISTRATEGYLISTENER_H -#include +#include "rapidjson/fwd.h" namespace xmrig { @@ -45,6 +45,7 @@ public: virtual void onActive(IStrategy *strategy, IClient *client) = 0; virtual void onJob(IStrategy *strategy, IClient *client, const Job &job) = 0; + virtual void onLogin(IStrategy *strategy, IClient *client, rapidjson::Document &doc, rapidjson::Value ¶ms) = 0; virtual void onPause(IStrategy *strategy) = 0; virtual void onResultAccepted(IStrategy *strategy, IClient *client, const SubmitResult &result, const char *error) = 0; }; diff --git a/src/base/net/stratum/Client.cpp b/src/base/net/stratum/Client.cpp index c1519573..0be86eca 100644 --- a/src/base/net/stratum/Client.cpp +++ b/src/base/net/stratum/Client.cpp @@ -575,19 +575,6 @@ void xmrig::Client::login() params.AddMember("rigid", m_pool.rigId().toJSON(), allocator); } -//# ifdef XMRIG_PROXY_PROJECT FIXME -// if (m_pool.algorithm().variant() != xmrig::VARIANT_AUTO) -//# endif -// { -// Value algo(kArrayType); - -// for (const auto &a : m_pool.algorithms()) { -// algo.PushBack(StringRef(a.shortName()), allocator); -// } - -// params.AddMember("algo", algo, allocator); -// } - m_listener->onLogin(this, doc, params); JsonRequest::create(doc, 1, "login", params); diff --git a/src/base/net/stratum/strategies/FailoverStrategy.cpp b/src/base/net/stratum/strategies/FailoverStrategy.cpp index 9545e9e1..4a35f3a5 100644 --- a/src/base/net/stratum/strategies/FailoverStrategy.cpp +++ b/src/base/net/stratum/strategies/FailoverStrategy.cpp @@ -113,7 +113,7 @@ void xmrig::FailoverStrategy::resume() } -void xmrig::FailoverStrategy::setAlgo(const xmrig::Algorithm &algo) +void xmrig::FailoverStrategy::setAlgo(const Algorithm &algo) { for (IClient *client : m_pools) { client->setAlgo(algo); @@ -163,6 +163,12 @@ void xmrig::FailoverStrategy::onClose(IClient *client, int failures) } +void xmrig::FailoverStrategy::onLogin(IClient *client, rapidjson::Document &doc, rapidjson::Value ¶ms) +{ + m_listener->onLogin(this, client, doc, params); +} + + void xmrig::FailoverStrategy::onJobReceived(IClient *client, const Job &job, const rapidjson::Value &) { if (m_active == client->id()) { diff --git a/src/base/net/stratum/strategies/FailoverStrategy.h b/src/base/net/stratum/strategies/FailoverStrategy.h index b1fe8bac..5336a634 100644 --- a/src/base/net/stratum/strategies/FailoverStrategy.h +++ b/src/base/net/stratum/strategies/FailoverStrategy.h @@ -51,9 +51,8 @@ public: void add(const Pool &pool); protected: - inline bool isActive() const override { return m_active >= 0; } - inline IClient *client() const override { return isActive() ? active() : m_pools[m_index]; } - inline void onLogin(IClient *, rapidjson::Document &, rapidjson::Value &) override {} + inline bool isActive() const override { return m_active >= 0; } + inline IClient *client() const override { return isActive() ? active() : m_pools[m_index]; } int64_t submit(const JobResult &result) override; void connect() override; @@ -64,6 +63,7 @@ protected: void onClose(IClient *client, int failures) override; void onJobReceived(IClient *client, const Job &job, const rapidjson::Value ¶ms) override; + void onLogin(IClient *client, rapidjson::Document &doc, rapidjson::Value ¶ms) override; void onLoginSuccess(IClient *client) override; void onResultAccepted(IClient *client, const SubmitResult &result, const char *error) override; diff --git a/src/base/net/stratum/strategies/SinglePoolStrategy.cpp b/src/base/net/stratum/strategies/SinglePoolStrategy.cpp index 6c6a6fc1..5f09d174 100644 --- a/src/base/net/stratum/strategies/SinglePoolStrategy.cpp +++ b/src/base/net/stratum/strategies/SinglePoolStrategy.cpp @@ -84,7 +84,7 @@ void xmrig::SinglePoolStrategy::resume() } -void xmrig::SinglePoolStrategy::setAlgo(const xmrig::Algorithm &algo) +void xmrig::SinglePoolStrategy::setAlgo(const Algorithm &algo) { m_client->setAlgo(algo); } @@ -119,6 +119,12 @@ void xmrig::SinglePoolStrategy::onJobReceived(IClient *client, const Job &job, c } +void xmrig::SinglePoolStrategy::onLogin(IClient *client, rapidjson::Document &doc, rapidjson::Value ¶ms) +{ + m_listener->onLogin(this, client, doc, params); +} + + void xmrig::SinglePoolStrategy::onLoginSuccess(IClient *client) { m_active = true; diff --git a/src/base/net/stratum/strategies/SinglePoolStrategy.h b/src/base/net/stratum/strategies/SinglePoolStrategy.h index af0bd7d6..04eef40e 100644 --- a/src/base/net/stratum/strategies/SinglePoolStrategy.h +++ b/src/base/net/stratum/strategies/SinglePoolStrategy.h @@ -45,9 +45,8 @@ public: ~SinglePoolStrategy() override; protected: - inline bool isActive() const override { return m_active; } - inline IClient *client() const override { return m_client; } - inline void onLogin(IClient *, rapidjson::Document &, rapidjson::Value &) override {} + inline bool isActive() const override { return m_active; } + inline IClient *client() const override { return m_client; } int64_t submit(const JobResult &result) override; void connect() override; @@ -58,6 +57,7 @@ protected: void onClose(IClient *client, int failures) override; void onJobReceived(IClient *client, const Job &job, const rapidjson::Value ¶ms) override; + void onLogin(IClient *client, rapidjson::Document &doc, rapidjson::Value ¶ms) override; void onLoginSuccess(IClient *client) override; void onResultAccepted(IClient *client, const SubmitResult &result, const char *error) override; diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index 4135e8ab..891a0f34 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -62,17 +62,46 @@ public: } + bool isEnabled(const Algorithm &algorithm) const + { + for (IBackend *backend : backends) { + if (backend->isEnabled(algorithm)) { + return true; + } + } + + return false; + } + + + inline void rebuild() + { + algorithms.clear(); + + for (int i = 0; i < Algorithm::MAX; ++i) { + const Algorithm algo(static_cast(i)); + + if (isEnabled(algo)) { + algorithms.push_back(algo); + } + } + } + + inline void handleJobChange() { active = true; - if (enabled) { - Nonce::pause(false);; - } for (IBackend *backend : backends) { backend->setJob(job); } + if (enabled) { + Nonce::pause(false);; + } + + Nonce::reset(job.index()); + if (ticks == 0) { ticks++; timer->start(500, 500); @@ -80,6 +109,7 @@ public: } + Algorithms algorithms; bool active = false; bool enabled = true; Controller *controller; @@ -104,6 +134,8 @@ xmrig::Miner::Miner(Controller *controller) d_ptr->timer = new Timer(this); d_ptr->backends.push_back(new CpuBackend(controller)); + + d_ptr->rebuild(); } @@ -119,6 +151,12 @@ bool xmrig::Miner::isEnabled() const } +const xmrig::Algorithms &xmrig::Miner::algorithms() const +{ + return d_ptr->algorithms; +} + + const std::vector &xmrig::Miner::backends() const { return d_ptr->backends; @@ -202,8 +240,6 @@ void xmrig::Miner::setJob(const Job &job, bool donate) d_ptr->job = job; d_ptr->job.setIndex(index); - Nonce::reset(index); - uv_rwlock_wrunlock(&d_ptr->rwlock); d_ptr->handleJobChange(); @@ -222,6 +258,8 @@ void xmrig::Miner::stop() void xmrig::Miner::onConfigChanged(Config *config, Config *previousConfig) { + d_ptr->rebuild(); + if (config->pools() != previousConfig->pools() && config->pools().active() > 0) { return; } diff --git a/src/core/Miner.h b/src/core/Miner.h index 23497eae..dd195e29 100644 --- a/src/core/Miner.h +++ b/src/core/Miner.h @@ -31,6 +31,7 @@ #include "base/kernel/interfaces/IBaseListener.h" #include "base/kernel/interfaces/ITimerListener.h" +#include "crypto/common/Algorithm.h" namespace xmrig { @@ -49,6 +50,7 @@ public: ~Miner() override; bool isEnabled() const; + const Algorithms &algorithms() const; const std::vector &backends() const; Job job() const; void pause(); diff --git a/src/crypto/common/Algorithm.h b/src/crypto/common/Algorithm.h index a6ec22be..a1d8ded2 100644 --- a/src/crypto/common/Algorithm.h +++ b/src/crypto/common/Algorithm.h @@ -115,7 +115,7 @@ private: }; -typedef std::vector Algorithms; +typedef std::vector Algorithms; } /* namespace xmrig */ diff --git a/src/net/Network.cpp b/src/net/Network.cpp index 6622a080..a1c1ec8d 100644 --- a/src/net/Network.cpp +++ b/src/net/Network.cpp @@ -154,6 +154,21 @@ void xmrig::Network::onJobResult(const JobResult &result) } +void xmrig::Network::onLogin(IStrategy *, IClient *, rapidjson::Document &doc, rapidjson::Value ¶ms) +{ + using namespace rapidjson; + auto &allocator = doc.GetAllocator(); + + Value algo(kArrayType); + + for (const auto &a : m_controller->miner()->algorithms()) { + algo.PushBack(StringRef(a.shortName()), allocator); + } + + params.AddMember("algo", algo, allocator); +} + + void xmrig::Network::onPause(IStrategy *strategy) { if (m_donate && m_donate == strategy) { diff --git a/src/net/Network.h b/src/net/Network.h index eaec9472..bf61a9b6 100644 --- a/src/net/Network.h +++ b/src/net/Network.h @@ -63,6 +63,7 @@ protected: void onConfigChanged(Config *config, Config *previousConfig) override; void onJob(IStrategy *strategy, IClient *client, const Job &job) override; void onJobResult(const JobResult &result) override; + void onLogin(IStrategy *strategy, IClient *client, rapidjson::Document &doc, rapidjson::Value ¶ms) override; void onPause(IStrategy *strategy) override; void onRequest(IApiRequest &request) override; void onResultAccepted(IStrategy *strategy, IClient *client, const SubmitResult &result, const char *error) override; diff --git a/src/net/strategies/DonateStrategy.cpp b/src/net/strategies/DonateStrategy.cpp index 2d0a5b43..4393cd46 100644 --- a/src/net/strategies/DonateStrategy.cpp +++ b/src/net/strategies/DonateStrategy.cpp @@ -23,7 +23,9 @@ */ +#include #include +#include #include "base/kernel/Platform.h" @@ -35,6 +37,7 @@ #include "base/tools/Timer.h" #include "core/config/Config.h" #include "core/Controller.h" +#include "core/Miner.h" #include "crypto/common/keccak.h" #include "net/Network.h" #include "net/strategies/DonateStrategy.h" @@ -57,10 +60,10 @@ static const char *kDonateHostTls = "donate.ssl.xmrig.com"; xmrig::DonateStrategy::DonateStrategy(Controller *controller, IStrategyListener *listener) : m_tls(false), m_userId(), - m_proxy(nullptr), m_donateTime(static_cast(controller->config()->pools().donateLevel()) * 60 * 1000), m_idleTime((100 - static_cast(controller->config()->pools().donateLevel())) * 60 * 1000), m_controller(controller), + m_proxy(nullptr), m_strategy(nullptr), m_listener(listener), m_state(STATE_NEW), @@ -78,10 +81,6 @@ xmrig::DonateStrategy::DonateStrategy(Controller *controller, IStrategyListener # endif m_pools.push_back(Pool(kDonateHost, 3333, m_userId, nullptr, 0, true)); -// for (Pool &pool : m_pools) { -// pool.adjust(Algorithm()); // FIXME -// } - if (m_pools.size() > 1) { m_strategy = new FailoverStrategy(m_pools, 1, 2, this, true); } @@ -129,6 +128,8 @@ void xmrig::DonateStrategy::connect() void xmrig::DonateStrategy::setAlgo(const xmrig::Algorithm &algo) { + m_algorithm = algo; + m_strategy->setAlgo(algo); } @@ -185,13 +186,14 @@ void xmrig::DonateStrategy::onClose(IClient *, int failures) void xmrig::DonateStrategy::onLogin(IClient *, rapidjson::Document &doc, rapidjson::Value ¶ms) { + using namespace rapidjson; auto &allocator = doc.GetAllocator(); # ifdef XMRIG_FEATURE_TLS if (m_tls) { char buf[40] = { 0 }; snprintf(buf, sizeof(buf), "stratum+ssl://%s", m_pools[0].url().data()); - params.AddMember("url", rapidjson::Value(buf, allocator), allocator); + params.AddMember("url", Value(buf, allocator), allocator); } else { params.AddMember("url", m_pools[1].url().toJSON(), allocator); @@ -199,6 +201,14 @@ void xmrig::DonateStrategy::onLogin(IClient *, rapidjson::Document &doc, rapidjs # else params.AddMember("url", m_pools[0].url().toJSON(), allocator); # endif + + setAlgorithms(doc, params); +} + + +void xmrig::DonateStrategy::onLogin(IStrategy *, IClient *, rapidjson::Document &doc, rapidjson::Value ¶ms) +{ + setAlgorithms(doc, params); } @@ -250,6 +260,27 @@ void xmrig::DonateStrategy::idle(double min, double max) } +void xmrig::DonateStrategy::setAlgorithms(rapidjson::Document &doc, rapidjson::Value ¶ms) +{ + using namespace rapidjson; + auto &allocator = doc.GetAllocator(); + + Algorithms algorithms = m_controller->miner()->algorithms(); + const size_t index = static_cast(std::distance(algorithms.begin(), std::find(algorithms.begin(), algorithms.end(), m_algorithm))); + if (index > 0 && index < algorithms.size()) { + std::swap(algorithms[0], algorithms[index]); + } + + Value algo(kArrayType); + + for (const auto &a : algorithms) { + algo.PushBack(StringRef(a.shortName()), allocator); + } + + params.AddMember("algo", algo, allocator); +} + + void xmrig::DonateStrategy::setJob(IClient *client, const Job &job) { if (isActive()) { diff --git a/src/net/strategies/DonateStrategy.h b/src/net/strategies/DonateStrategy.h index c9fc312d..5350aefa 100644 --- a/src/net/strategies/DonateStrategy.h +++ b/src/net/strategies/DonateStrategy.h @@ -70,6 +70,7 @@ protected: void onClose(IClient *client, int failures) override; void onLogin(IClient *client, rapidjson::Document &doc, rapidjson::Value ¶ms) override; + void onLogin(IStrategy *strategy, IClient *client, rapidjson::Document &doc, rapidjson::Value ¶ms) override; void onLoginSuccess(IClient *client) override; void onTimer(const Timer *timer) override; @@ -87,16 +88,18 @@ private: Client *createProxy(); void idle(double min, double max); + void setAlgorithms(rapidjson::Document &doc, rapidjson::Value ¶ms); void setJob(IClient *client, const Job &job); void setResult(IClient *client, const SubmitResult &result, const char *error); void setState(State state); + Algorithm m_algorithm; bool m_tls; char m_userId[65]; - IClient *m_proxy; const uint64_t m_donateTime; const uint64_t m_idleTime; Controller *m_controller; + IClient *m_proxy; IStrategy *m_strategy; IStrategyListener *m_listener; State m_state; From 88edde804fd750848f138a1d864fe1f090786332 Mon Sep 17 00:00:00 2001 From: XMRig Date: Thu, 18 Jul 2019 23:48:16 +0700 Subject: [PATCH 038/172] Fixed duplicated shares after donation round. --- src/core/Miner.cpp | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index 891a0f34..1f7f77e9 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -88,7 +88,7 @@ public: } - inline void handleJobChange() + inline void handleJobChange(bool reset) { active = true; @@ -96,12 +96,17 @@ public: backend->setJob(job); } + if (reset) { + Nonce::reset(job.index()); + } + else { + Nonce::touch(); + } + if (enabled) { Nonce::pause(false);; } - Nonce::reset(job.index()); - if (ticks == 0) { ticks++; timer->start(500, 500); @@ -116,6 +121,7 @@ public: double maxHashrate = 0.0; Job job; std::vector backends; + String userJobId; Timer *timer = nullptr; uint64_t ticks = 0; uv_rwlock_t rwlock; @@ -236,13 +242,18 @@ void xmrig::Miner::setJob(const Job &job, bool donate) uv_rwlock_wrlock(&d_ptr->rwlock); const uint8_t index = donate ? 1 : 0; + const bool reset = !(d_ptr->job.index() == 1 && index == 0 && d_ptr->userJobId == job.id()); d_ptr->job = job; d_ptr->job.setIndex(index); + if (index == 0) { + d_ptr->userJobId = job.id(); + } + uv_rwlock_wrunlock(&d_ptr->rwlock); - d_ptr->handleJobChange(); + d_ptr->handleJobChange(reset); } From 691b2fabbf2817f2a1e8c5b69ce22fe9bd13042d Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 19 Jul 2019 00:39:27 +0700 Subject: [PATCH 039/172] Restored algorithm verification. --- src/base/kernel/interfaces/IClientListener.h | 2 + .../kernel/interfaces/IStrategyListener.h | 2 + src/base/net/stratum/Client.cpp | 39 ++++++++----------- src/base/net/stratum/Client.h | 2 +- .../stratum/strategies/FailoverStrategy.cpp | 6 +++ .../net/stratum/strategies/FailoverStrategy.h | 1 + .../stratum/strategies/SinglePoolStrategy.cpp | 6 +++ .../stratum/strategies/SinglePoolStrategy.h | 1 + src/core/Miner.cpp | 6 +++ src/core/Miner.h | 1 + src/net/Network.cpp | 10 +++++ src/net/Network.h | 1 + src/net/strategies/DonateStrategy.h | 2 + 13 files changed, 56 insertions(+), 23 deletions(-) diff --git a/src/base/kernel/interfaces/IClientListener.h b/src/base/kernel/interfaces/IClientListener.h index de4dd81d..3583be5a 100644 --- a/src/base/kernel/interfaces/IClientListener.h +++ b/src/base/kernel/interfaces/IClientListener.h @@ -35,6 +35,7 @@ namespace xmrig { +class Algorithm; class IClient; class Job; class SubmitResult; @@ -50,6 +51,7 @@ public: virtual void onLogin(IClient *client, rapidjson::Document &doc, rapidjson::Value ¶ms) = 0; virtual void onLoginSuccess(IClient *client) = 0; virtual void onResultAccepted(IClient *client, const SubmitResult &result, const char *error) = 0; + virtual void onVerifyAlgorithm(const IClient *client, const Algorithm &algorithm, bool *ok) = 0; }; diff --git a/src/base/kernel/interfaces/IStrategyListener.h b/src/base/kernel/interfaces/IStrategyListener.h index 01e668d4..8b88b506 100644 --- a/src/base/kernel/interfaces/IStrategyListener.h +++ b/src/base/kernel/interfaces/IStrategyListener.h @@ -32,6 +32,7 @@ namespace xmrig { +class Algorithm; class IClient; class IStrategy; class Job; @@ -48,6 +49,7 @@ public: virtual void onLogin(IStrategy *strategy, IClient *client, rapidjson::Document &doc, rapidjson::Value ¶ms) = 0; virtual void onPause(IStrategy *strategy) = 0; virtual void onResultAccepted(IStrategy *strategy, IClient *client, const SubmitResult &result, const char *error) = 0; + virtual void onVerifyAlgorithm(IStrategy *strategy, const IClient *client, const Algorithm &algorithm, bool *ok) = 0; }; diff --git a/src/base/net/stratum/Client.cpp b/src/base/net/stratum/Client.cpp index 0be86eca..618e132c 100644 --- a/src/base/net/stratum/Client.cpp +++ b/src/base/net/stratum/Client.cpp @@ -330,14 +330,15 @@ bool xmrig::Client::parseJob(const rapidjson::Value ¶ms, int *code) return false; } - if (params.HasMember("algo")) { - job.setAlgorithm(params["algo"].GetString()); + const char *algo = Json::getString(params, "algo"); + if (algo) { + job.setAlgorithm(algo); } job.setSeedHash(Json::getString(params, "seed_hash")); job.setHeight(Json::getUint64(params, "height")); - if (!verifyAlgorithm(job.algorithm())) { + if (!verifyAlgorithm(job.algorithm(), algo)) { *code = 6; close(); @@ -415,30 +416,24 @@ bool xmrig::Client::send(BIO *bio) } -bool xmrig::Client::verifyAlgorithm(const Algorithm &algorithm) const +bool xmrig::Client::verifyAlgorithm(const Algorithm &algorithm, const char *algo) const { -//# ifdef XMRIG_PROXY_PROJECT -// if (m_pool.algorithm().variant() == VARIANT_AUTO || m_id == -1) { -// return true; -// } -//# endif + if (!algorithm.isValid()) { + if (!isQuiet()) { + LOG_ERR("[%s] Unknown/unsupported algorithm \"%s\" detected, reconnect", url(), algo); + } -// if (m_pool.algorithm() == algorithm) { // FIXME -// return true; -// } + return false; + } -// if (isQuiet()) { -// return false; -// } + bool ok = true; + m_listener->onVerifyAlgorithm(this, algorithm, &ok); -// if (algorithm.isValid()) { -// LOG_ERR("Incompatible algorithm \"%s\" detected, reconnect", algorithm.name()); -// } -// else { -// LOG_ERR("Unknown/unsupported algorithm detected, reconnect"); -// } + if (!ok && !isQuiet()) { + LOG_ERR("[%s] Incompatible/disabled algorithm \"%s\" detected, reconnect", url(), algorithm.shortName()); + } - return true; + return ok; } diff --git a/src/base/net/stratum/Client.h b/src/base/net/stratum/Client.h index 841e0e0b..46030aba 100644 --- a/src/base/net/stratum/Client.h +++ b/src/base/net/stratum/Client.h @@ -92,7 +92,7 @@ private: bool parseJob(const rapidjson::Value ¶ms, int *code); bool parseLogin(const rapidjson::Value &result, int *code); bool send(BIO *bio); - bool verifyAlgorithm(const Algorithm &algorithm) const; + bool verifyAlgorithm(const Algorithm &algorithm, const char *algo) const; int resolve(const String &host); int64_t send(const rapidjson::Document &doc); int64_t send(size_t size); diff --git a/src/base/net/stratum/strategies/FailoverStrategy.cpp b/src/base/net/stratum/strategies/FailoverStrategy.cpp index 4a35f3a5..48be2ba3 100644 --- a/src/base/net/stratum/strategies/FailoverStrategy.cpp +++ b/src/base/net/stratum/strategies/FailoverStrategy.cpp @@ -202,3 +202,9 @@ void xmrig::FailoverStrategy::onResultAccepted(IClient *client, const SubmitResu { m_listener->onResultAccepted(this, client, result, error); } + + +void xmrig::FailoverStrategy::onVerifyAlgorithm(const IClient *client, const Algorithm &algorithm, bool *ok) +{ + m_listener->onVerifyAlgorithm(this, client, algorithm, ok); +} diff --git a/src/base/net/stratum/strategies/FailoverStrategy.h b/src/base/net/stratum/strategies/FailoverStrategy.h index 5336a634..283d4916 100644 --- a/src/base/net/stratum/strategies/FailoverStrategy.h +++ b/src/base/net/stratum/strategies/FailoverStrategy.h @@ -66,6 +66,7 @@ protected: void onLogin(IClient *client, rapidjson::Document &doc, rapidjson::Value ¶ms) override; void onLoginSuccess(IClient *client) override; void onResultAccepted(IClient *client, const SubmitResult &result, const char *error) override; + void onVerifyAlgorithm(const IClient *client, const Algorithm &algorithm, bool *ok) override; private: inline IClient *active() const { return m_pools[static_cast(m_active)]; } diff --git a/src/base/net/stratum/strategies/SinglePoolStrategy.cpp b/src/base/net/stratum/strategies/SinglePoolStrategy.cpp index 5f09d174..c923e1c2 100644 --- a/src/base/net/stratum/strategies/SinglePoolStrategy.cpp +++ b/src/base/net/stratum/strategies/SinglePoolStrategy.cpp @@ -136,3 +136,9 @@ void xmrig::SinglePoolStrategy::onResultAccepted(IClient *client, const SubmitRe { m_listener->onResultAccepted(this, client, result, error); } + + +void xmrig::SinglePoolStrategy::onVerifyAlgorithm(const IClient *client, const Algorithm &algorithm, bool *ok) +{ + m_listener->onVerifyAlgorithm(this, client, algorithm, ok); +} diff --git a/src/base/net/stratum/strategies/SinglePoolStrategy.h b/src/base/net/stratum/strategies/SinglePoolStrategy.h index 04eef40e..ea808193 100644 --- a/src/base/net/stratum/strategies/SinglePoolStrategy.h +++ b/src/base/net/stratum/strategies/SinglePoolStrategy.h @@ -60,6 +60,7 @@ protected: void onLogin(IClient *client, rapidjson::Document &doc, rapidjson::Value ¶ms) override; void onLoginSuccess(IClient *client) override; void onResultAccepted(IClient *client, const SubmitResult &result, const char *error) override; + void onVerifyAlgorithm(const IClient *client, const Algorithm &algorithm, bool *ok) override; private: bool m_active; diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index 1f7f77e9..b64d6b95 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -157,6 +157,12 @@ bool xmrig::Miner::isEnabled() const } +bool xmrig::Miner::isEnabled(const Algorithm &algorithm) const +{ + return std::find(d_ptr->algorithms.begin(), d_ptr->algorithms.end(), algorithm) != d_ptr->algorithms.end(); +} + + const xmrig::Algorithms &xmrig::Miner::algorithms() const { return d_ptr->algorithms; diff --git a/src/core/Miner.h b/src/core/Miner.h index dd195e29..333f9250 100644 --- a/src/core/Miner.h +++ b/src/core/Miner.h @@ -50,6 +50,7 @@ public: ~Miner() override; bool isEnabled() const; + bool isEnabled(const Algorithm &algorithm) const; const Algorithms &algorithms() const; const std::vector &backends() const; Job job() const; diff --git a/src/net/Network.cpp b/src/net/Network.cpp index a1c1ec8d..eaff6748 100644 --- a/src/net/Network.cpp +++ b/src/net/Network.cpp @@ -213,6 +213,16 @@ void xmrig::Network::onResultAccepted(IStrategy *, IClient *, const SubmitResult } +void xmrig::Network::onVerifyAlgorithm(IStrategy *, const IClient *, const Algorithm &algorithm, bool *ok) +{ + if (!m_controller->miner()->isEnabled(algorithm)) { + *ok = false; + + return; + } +} + + void xmrig::Network::setJob(IClient *client, const Job &job, bool donate) { if (job.height()) { diff --git a/src/net/Network.h b/src/net/Network.h index bf61a9b6..68967713 100644 --- a/src/net/Network.h +++ b/src/net/Network.h @@ -67,6 +67,7 @@ protected: void onPause(IStrategy *strategy) override; void onRequest(IApiRequest &request) override; void onResultAccepted(IStrategy *strategy, IClient *client, const SubmitResult &result, const char *error) override; + void onVerifyAlgorithm(IStrategy *strategy, const IClient *client, const Algorithm &algorithm, bool *ok) override; private: constexpr static int kTickInterval = 1 * 1000; diff --git a/src/net/strategies/DonateStrategy.h b/src/net/strategies/DonateStrategy.h index 5350aefa..134127bf 100644 --- a/src/net/strategies/DonateStrategy.h +++ b/src/net/strategies/DonateStrategy.h @@ -57,6 +57,8 @@ protected: inline void onJobReceived(IClient *client, const Job &job, const rapidjson::Value &) override { setJob(client, job); } inline void onResultAccepted(IClient *client, const SubmitResult &result, const char *error) override { setResult(client, result, error); } inline void onResultAccepted(IStrategy *, IClient *client, const SubmitResult &result, const char *error) override { setResult(client, result, error); } + inline void onVerifyAlgorithm(const IClient *, const Algorithm &, bool *) override {} + inline void onVerifyAlgorithm(IStrategy *, const IClient *, const Algorithm &, bool *) override {} inline void resume() override {} int64_t submit(const JobResult &result) override; From d9164c0b7bc30dd8d6bf3dbfdd442f9fde87c13d Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 19 Jul 2019 02:24:37 +0700 Subject: [PATCH 040/172] Restored "GET /1/summary" endpoint. --- src/api/Api.cpp | 10 +-- src/api/interfaces/IApiListener.h | 2 + src/api/interfaces/IApiRequest.h | 12 ++- src/api/requests/ApiRequest.cpp | 3 +- src/api/requests/ApiRequest.h | 7 +- src/api/requests/HttpApiRequest.cpp | 11 +++ src/api/v1/ApiRouter.cpp | 70 +----------------- src/api/v1/ApiRouter.h | 4 +- src/backend/common/Hashrate.cpp | 15 +++- src/backend/common/Hashrate.h | 4 + src/core/Miner.cpp | 111 ++++++++++++++++++++++++++++ src/core/Miner.h | 7 +- src/net/Network.cpp | 42 ++++++----- src/net/Network.h | 9 ++- 14 files changed, 199 insertions(+), 108 deletions(-) diff --git a/src/api/Api.cpp b/src/api/Api.cpp index a1aeb4c2..11fc2a69 100644 --- a/src/api/Api.cpp +++ b/src/api/Api.cpp @@ -120,7 +120,7 @@ void xmrig::Api::exec(IApiRequest &request) { using namespace rapidjson; - if (request.method() == IApiRequest::METHOD_GET && (request.url() == "/1/summary" || request.url() == "/api.json")) { + if (request.type() == IApiRequest::REQ_SUMMARY) { auto &allocator = request.doc().GetAllocator(); request.accept(); @@ -145,14 +145,6 @@ void xmrig::Api::exec(IApiRequest &request) features.PushBack("tls", allocator); # endif request.reply().AddMember("features", features, allocator); - - Value algorithms(kArrayType); - - for (int i = 0; i < Algorithm::MAX; ++i) { - algorithms.PushBack(StringRef(Algorithm(static_cast(i)).shortName()), allocator); - } - - request.reply().AddMember("algorithms", algorithms, allocator); } for (IApiListener *listener : m_listeners) { diff --git a/src/api/interfaces/IApiListener.h b/src/api/interfaces/IApiListener.h index 7897e375..bbf153a6 100644 --- a/src/api/interfaces/IApiListener.h +++ b/src/api/interfaces/IApiListener.h @@ -35,7 +35,9 @@ class IApiListener public: virtual ~IApiListener() = default; +# ifdef XMRIG_FEATURE_API virtual void onRequest(IApiRequest &request) = 0; +# endif }; diff --git a/src/api/interfaces/IApiRequest.h b/src/api/interfaces/IApiRequest.h index 2c2f5634..8e65a921 100644 --- a/src/api/interfaces/IApiRequest.h +++ b/src/api/interfaces/IApiRequest.h @@ -4,7 +4,9 @@ * Copyright 2014 Lucas Jones * Copyright 2014-2016 Wolf9466 * Copyright 2016 Jay D Dee - * Copyright 2016-2018 XMRig + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -50,6 +52,12 @@ public: }; + enum RequestType { + REQ_UNKNOWN, + REQ_SUMMARY + }; + + virtual ~IApiRequest() = default; virtual bool isDone() const = 0; @@ -57,9 +65,11 @@ public: virtual bool isRestricted() const = 0; virtual const rapidjson::Value &json() const = 0; virtual const String &url() const = 0; + virtual int version() const = 0; virtual Method method() const = 0; virtual rapidjson::Document &doc() = 0; virtual rapidjson::Value &reply() = 0; + virtual RequestType type() const = 0; virtual Source source() const = 0; virtual void accept() = 0; virtual void done(int status) = 0; diff --git a/src/api/requests/ApiRequest.cpp b/src/api/requests/ApiRequest.cpp index c092a334..3812e419 100644 --- a/src/api/requests/ApiRequest.cpp +++ b/src/api/requests/ApiRequest.cpp @@ -28,8 +28,7 @@ xmrig::ApiRequest::ApiRequest(Source source, bool restricted) : m_restricted(restricted), - m_source(source), - m_state(STATE_NEW) + m_source(source) { } diff --git a/src/api/requests/ApiRequest.h b/src/api/requests/ApiRequest.h index 1754aa9c..05716e29 100644 --- a/src/api/requests/ApiRequest.h +++ b/src/api/requests/ApiRequest.h @@ -43,10 +43,15 @@ protected: inline bool isDone() const override { return m_state == STATE_DONE; } inline bool isNew() const override { return m_state == STATE_NEW; } inline bool isRestricted() const override { return m_restricted; } + inline int version() const override { return m_version; } + inline RequestType type() const override { return m_type; } inline Source source() const override { return m_source; } inline void accept() override { m_state = STATE_ACCEPTED; } inline void done(int) override { m_state = STATE_DONE; } + int m_version = 1; + RequestType m_type = REQ_UNKNOWN; + private: enum State { STATE_NEW, @@ -56,7 +61,7 @@ private: bool m_restricted; Source m_source; - State m_state; + State m_state = STATE_NEW; }; diff --git a/src/api/requests/HttpApiRequest.cpp b/src/api/requests/HttpApiRequest.cpp index e4f2de1e..b4dc1810 100644 --- a/src/api/requests/HttpApiRequest.cpp +++ b/src/api/requests/HttpApiRequest.cpp @@ -35,6 +35,17 @@ xmrig::HttpApiRequest::HttpApiRequest(const HttpData &req, bool restricted) : m_res(req.id()), m_url(req.url.c_str()) { + if (method() == METHOD_GET) { + if (url() == "/1/summary" || url() == "/2/summary" || url() == "/api.json") { + m_type = REQ_SUMMARY; + } + } + + if (url().size() > 4) { + if (memcmp(url().data(), "/2/", 3) == 0) { + m_version = 2; + } + } } diff --git a/src/api/v1/ApiRouter.cpp b/src/api/v1/ApiRouter.cpp index 2a5bd3d0..0e1080a1 100644 --- a/src/api/v1/ApiRouter.cpp +++ b/src/api/v1/ApiRouter.cpp @@ -38,18 +38,6 @@ #include "version.h" -static inline rapidjson::Value normalize(double d) -{ - using namespace rapidjson; - - if (!std::isnormal(d)) { - return Value(kNullType); - } - - return Value(floor(d * 100.0) / 100.0); -} - - xmrig::ApiRouter::ApiRouter(Base *base) : m_base(base) { @@ -64,12 +52,7 @@ xmrig::ApiRouter::~ApiRouter() void xmrig::ApiRouter::onRequest(IApiRequest &request) { if (request.method() == IApiRequest::METHOD_GET) { - if (request.url() == "/1/summary" || request.url() == "/api.json") { - request.accept(); - getMiner(request.reply(), request.doc()); -// getHashrate(request.reply(), request.doc()); - } - else if (request.url() == "/1/threads") { + if (request.url() == "/1/threads") { request.accept(); getThreads(request.reply(), request.doc()); } @@ -96,57 +79,6 @@ void xmrig::ApiRouter::onRequest(IApiRequest &request) } -//void xmrig::ApiRouter::getHashrate(rapidjson::Value &reply, rapidjson::Document &doc) const -//{ -// using namespace rapidjson; -// auto &allocator = doc.GetAllocator(); - -// Value hashrate(kObjectType); -// Value total(kArrayType); -// Value threads(kArrayType); - -// const Hashrate *hr = WorkersLegacy::hashrate(); - -// total.PushBack(normalize(hr->calc(Hashrate::ShortInterval)), allocator); -// total.PushBack(normalize(hr->calc(Hashrate::MediumInterval)), allocator); -// total.PushBack(normalize(hr->calc(Hashrate::LargeInterval)), allocator); - -// for (size_t i = 0; i < WorkersLegacy::threads(); i++) { -// Value thread(kArrayType); -// thread.PushBack(normalize(hr->calc(i, Hashrate::ShortInterval)), allocator); -// thread.PushBack(normalize(hr->calc(i, Hashrate::MediumInterval)), allocator); -// thread.PushBack(normalize(hr->calc(i, Hashrate::LargeInterval)), allocator); - -// threads.PushBack(thread, allocator); -// } - -// hashrate.AddMember("total", total, allocator); -// hashrate.AddMember("highest", normalize(hr->highest()), allocator); -// hashrate.AddMember("threads", threads, allocator); -// reply.AddMember("hashrate", hashrate, allocator); -//} - - -void xmrig::ApiRouter::getMiner(rapidjson::Value &reply, rapidjson::Document &doc) const -{ - using namespace rapidjson; - auto &allocator = doc.GetAllocator(); - - Value cpu(kObjectType); - cpu.AddMember("brand", StringRef(Cpu::info()->brand()), allocator); - cpu.AddMember("aes", Cpu::info()->hasAES(), allocator); - cpu.AddMember("x64", Cpu::info()->isX64(), allocator); - cpu.AddMember("sockets", Cpu::info()->sockets(), allocator); - - reply.AddMember("version", APP_VERSION, allocator); - reply.AddMember("kind", APP_KIND, allocator); - reply.AddMember("ua", StringRef(Platform::userAgent()), allocator); - reply.AddMember("cpu", cpu, allocator); - reply.AddMember("hugepages", false, allocator); // FIXME hugepages - reply.AddMember("donate_level", m_base->config()->pools().donateLevel(), allocator); -} - - void xmrig::ApiRouter::getThreads(rapidjson::Value &reply, rapidjson::Document &doc) const { // using namespace rapidjson; diff --git a/src/api/v1/ApiRouter.h b/src/api/v1/ApiRouter.h index e2b9bd25..ec468d86 100644 --- a/src/api/v1/ApiRouter.h +++ b/src/api/v1/ApiRouter.h @@ -39,7 +39,7 @@ namespace xmrig { class Base; -class ApiRouter : public xmrig::IApiListener +class ApiRouter : public IApiListener { public: ApiRouter(Base *base); @@ -49,8 +49,6 @@ protected: void onRequest(IApiRequest &request) override; private: -// void getHashrate(rapidjson::Value &reply, rapidjson::Document &doc) const; - void getMiner(rapidjson::Value &reply, rapidjson::Document &doc) const; void getThreads(rapidjson::Value &reply, rapidjson::Document &doc) const; Base *m_base; diff --git a/src/backend/common/Hashrate.cpp b/src/backend/common/Hashrate.cpp index 6ffd45b7..a9c63733 100644 --- a/src/backend/common/Hashrate.cpp +++ b/src/backend/common/Hashrate.cpp @@ -29,9 +29,10 @@ #include +#include "backend/common/Hashrate.h" #include "base/tools/Chrono.h" #include "base/tools/Handle.h" -#include "backend/common/Hashrate.h" +#include "rapidjson/document.h" inline static const char *format(double h, char *buf, size_t size) @@ -162,3 +163,15 @@ const char *xmrig::Hashrate::format(double h, char *buf, size_t size) { return ::format(h, buf, size); } + + +rapidjson::Value xmrig::Hashrate::normalize(double d) +{ + using namespace rapidjson; + + if (!std::isnormal(d)) { + return Value(kNullType); + } + + return Value(floor(d * 100.0) / 100.0); +} diff --git a/src/backend/common/Hashrate.h b/src/backend/common/Hashrate.h index 2187c0be..0674c6ab 100644 --- a/src/backend/common/Hashrate.h +++ b/src/backend/common/Hashrate.h @@ -30,6 +30,9 @@ #include +#include "rapidjson/fwd.h" + + namespace xmrig { @@ -53,6 +56,7 @@ public: inline size_t threads() const { return m_threads; } static const char *format(double h, char *buf, size_t size); + static rapidjson::Value normalize(double d); private: constexpr static size_t kBucketSize = 2 << 11; diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index b64d6b95..7114baf9 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -28,14 +28,24 @@ #include "backend/common/Hashrate.h" +#include "backend/cpu/Cpu.h" #include "backend/cpu/CpuBackend.h" #include "base/io/log/Log.h" +#include "base/kernel/Platform.h" #include "base/net/stratum/Job.h" #include "base/tools/Timer.h" #include "core/config/Config.h" #include "core/Controller.h" #include "core/Miner.h" #include "crypto/common/Nonce.h" +#include "rapidjson/document.h" +#include "version.h" + + +#ifdef XMRIG_FEATURE_API +# include "api/Api.h" +# include "api/interfaces/IApiRequest.h" +#endif namespace xmrig { @@ -114,6 +124,90 @@ public: } +# ifdef XMRIG_FEATURE_API + void getMiner(rapidjson::Value &reply, rapidjson::Document &doc, int version) const + { + using namespace rapidjson; + auto &allocator = doc.GetAllocator(); + + Value cpu(kObjectType); + cpu.AddMember("brand", StringRef(Cpu::info()->brand()), allocator); + cpu.AddMember("aes", Cpu::info()->hasAES(), allocator); + cpu.AddMember("x64", Cpu::info()->isX64(), allocator); + cpu.AddMember("sockets", Cpu::info()->sockets(), allocator); + + reply.AddMember("version", APP_VERSION, allocator); + reply.AddMember("kind", APP_KIND, allocator); + reply.AddMember("ua", StringRef(Platform::userAgent()), allocator); + reply.AddMember("cpu", cpu, allocator); + + if (version == 1) { + reply.AddMember("hugepages", false, allocator); + } + + reply.AddMember("donate_level", controller->config()->pools().donateLevel(), allocator); + + Value algo(kArrayType); + + for (const Algorithm &a : algorithms) { + algo.PushBack(StringRef(a.shortName()), allocator); + } + + reply.AddMember("algorithms", algo, allocator); + } + + + void getHashrate(rapidjson::Value &reply, rapidjson::Document &doc, int version) const + { + using namespace rapidjson; + auto &allocator = doc.GetAllocator(); + + Value hashrate(kObjectType); + Value total(kArrayType); + Value threads(kArrayType); + + double t[3] = { 0.0 }; + + for (IBackend *backend : backends) { + const Hashrate *hr = backend->hashrate(); + if (!hr) { + continue; + } + + t[0] += hr->calc(Hashrate::ShortInterval); + t[1] += hr->calc(Hashrate::MediumInterval); + t[2] += hr->calc(Hashrate::LargeInterval); + + if (version > 1) { + continue; + } + + for (size_t i = 0; i < hr->threads(); i++) { + Value thread(kArrayType); + thread.PushBack(Hashrate::normalize(hr->calc(i, Hashrate::ShortInterval)), allocator); + thread.PushBack(Hashrate::normalize(hr->calc(i, Hashrate::MediumInterval)), allocator); + thread.PushBack(Hashrate::normalize(hr->calc(i, Hashrate::LargeInterval)), allocator); + + threads.PushBack(thread, allocator); + } + } + + total.PushBack(Hashrate::normalize(t[0]), allocator); + total.PushBack(Hashrate::normalize(t[1]), allocator); + total.PushBack(Hashrate::normalize(t[2]), allocator); + + hashrate.AddMember("total", total, allocator); + hashrate.AddMember("highest", Hashrate::normalize(maxHashrate), allocator); + + if (version == 1) { + hashrate.AddMember("threads", threads, allocator); + } + + reply.AddMember("hashrate", hashrate, allocator); + } +# endif + + Algorithms algorithms; bool active = false; bool enabled = true; @@ -137,6 +231,10 @@ xmrig::Miner::Miner(Controller *controller) { controller->addListener(this); +# ifdef XMRIG_FEATURE_API + controller->api()->addListener(this); +# endif + d_ptr->timer = new Timer(this); d_ptr->backends.push_back(new CpuBackend(controller)); @@ -309,3 +407,16 @@ void xmrig::Miner::onTimer(const Timer *) d_ptr->ticks++; } + + +#ifdef XMRIG_FEATURE_API +void xmrig::Miner::onRequest(IApiRequest &request) +{ + if (request.type() == IApiRequest::REQ_SUMMARY) { + request.accept(); + + d_ptr->getMiner(request.reply(), request.doc(), request.version()); + d_ptr->getHashrate(request.reply(), request.doc(), request.version()); + } +} +#endif diff --git a/src/core/Miner.h b/src/core/Miner.h index 333f9250..035c0205 100644 --- a/src/core/Miner.h +++ b/src/core/Miner.h @@ -29,6 +29,7 @@ #include +#include "api/interfaces/IApiListener.h" #include "base/kernel/interfaces/IBaseListener.h" #include "base/kernel/interfaces/ITimerListener.h" #include "crypto/common/Algorithm.h" @@ -43,7 +44,7 @@ class MinerPrivate; class IBackend; -class Miner : public ITimerListener, public IBaseListener +class Miner : public ITimerListener, public IBaseListener, public IApiListener { public: Miner(Controller *controller); @@ -64,6 +65,10 @@ protected: void onConfigChanged(Config *config, Config *previousConfig) override; void onTimer(const Timer *timer) override; +# ifdef XMRIG_FEATURE_API + void onRequest(IApiRequest &request) override; +# endif + private: MinerPrivate *d_ptr; }; diff --git a/src/net/Network.cpp b/src/net/Network.cpp index eaff6748..5ee00388 100644 --- a/src/net/Network.cpp +++ b/src/net/Network.cpp @@ -185,19 +185,6 @@ void xmrig::Network::onPause(IStrategy *strategy) } -void xmrig::Network::onRequest(IApiRequest &request) -{ -# ifdef XMRIG_FEATURE_API - if (request.method() == IApiRequest::METHOD_GET && (request.url() == "/1/summary" || request.url() == "/api.json")) { - request.accept(); - - getResults(request.reply(), request.doc()); - getConnection(request.reply(), request.doc()); - } -# endif -} - - void xmrig::Network::onResultAccepted(IStrategy *, IClient *, const SubmitResult &result, const char *error) { m_state.add(result, error); @@ -223,6 +210,19 @@ void xmrig::Network::onVerifyAlgorithm(IStrategy *, const IClient *, const Algor } +#ifdef XMRIG_FEATURE_API +void xmrig::Network::onRequest(IApiRequest &request) +{ + if (request.type() == IApiRequest::REQ_SUMMARY) { + request.accept(); + + getResults(request.reply(), request.doc(), request.version()); + getConnection(request.reply(), request.doc(), request.version()); + } +} +#endif + + void xmrig::Network::setJob(IClient *client, const Job &job, bool donate) { if (job.height()) { @@ -256,7 +256,7 @@ void xmrig::Network::tick() #ifdef XMRIG_FEATURE_API -void xmrig::Network::getConnection(rapidjson::Value &reply, rapidjson::Document &doc) const +void xmrig::Network::getConnection(rapidjson::Value &reply, rapidjson::Document &doc, int version) const { using namespace rapidjson; auto &allocator = doc.GetAllocator(); @@ -271,13 +271,16 @@ void xmrig::Network::getConnection(rapidjson::Value &reply, rapidjson::Document connection.AddMember("failures", m_state.failures, allocator); connection.AddMember("tls", m_state.tls().toJSON(), allocator); connection.AddMember("tls-fingerprint", m_state.fingerprint().toJSON(), allocator); - connection.AddMember("error_log", Value(kArrayType), allocator); + + if (version == 1) { + connection.AddMember("error_log", Value(kArrayType), allocator); + } reply.AddMember("connection", connection, allocator); } -void xmrig::Network::getResults(rapidjson::Value &reply, rapidjson::Document &doc) const +void xmrig::Network::getResults(rapidjson::Value &reply, rapidjson::Document &doc, int version) const { using namespace rapidjson; auto &allocator = doc.GetAllocator(); @@ -295,8 +298,11 @@ void xmrig::Network::getResults(rapidjson::Value &reply, rapidjson::Document &do best.PushBack(m_state.topDiff[i], allocator); } - results.AddMember("best", best, allocator); - results.AddMember("error_log", Value(kArrayType), allocator); + results.AddMember("best", best, allocator); + + if (version == 1) { + results.AddMember("error_log", Value(kArrayType), allocator); + } reply.AddMember("results", results, allocator); } diff --git a/src/net/Network.h b/src/net/Network.h index 68967713..ddf6d6f3 100644 --- a/src/net/Network.h +++ b/src/net/Network.h @@ -65,10 +65,13 @@ protected: void onJobResult(const JobResult &result) override; void onLogin(IStrategy *strategy, IClient *client, rapidjson::Document &doc, rapidjson::Value ¶ms) override; void onPause(IStrategy *strategy) override; - void onRequest(IApiRequest &request) override; void onResultAccepted(IStrategy *strategy, IClient *client, const SubmitResult &result, const char *error) override; void onVerifyAlgorithm(IStrategy *strategy, const IClient *client, const Algorithm &algorithm, bool *ok) override; +# ifdef XMRIG_FEATURE_API + void onRequest(IApiRequest &request) override; +# endif + private: constexpr static int kTickInterval = 1 * 1000; @@ -76,8 +79,8 @@ private: void tick(); # ifdef XMRIG_FEATURE_API - void getConnection(rapidjson::Value &reply, rapidjson::Document &doc) const; - void getResults(rapidjson::Value &reply, rapidjson::Document &doc) const; + void getConnection(rapidjson::Value &reply, rapidjson::Document &doc, int version) const; + void getResults(rapidjson::Value &reply, rapidjson::Document &doc, int version) const; # endif Controller *m_controller; From fa2c9df075c292e23efa35603dc17eae2ca82587 Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 19 Jul 2019 02:39:00 +0700 Subject: [PATCH 041/172] Implemented "enabled" field for CPU backend; --- src/backend/common/interfaces/IBackend.h | 1 + src/backend/cpu/CpuBackend.cpp | 12 ++++++++++++ src/backend/cpu/CpuBackend.h | 1 + 3 files changed, 14 insertions(+) diff --git a/src/backend/common/interfaces/IBackend.h b/src/backend/common/interfaces/IBackend.h index 6fe917cb..ac97759b 100644 --- a/src/backend/common/interfaces/IBackend.h +++ b/src/backend/common/interfaces/IBackend.h @@ -44,6 +44,7 @@ class IBackend public: virtual ~IBackend() = default; + virtual bool isEnabled() const = 0; virtual bool isEnabled(const Algorithm &algorithm) const = 0; virtual const Hashrate *hashrate() const = 0; virtual const String &profileName() const = 0; diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp index 15a0c359..8ab312f7 100644 --- a/src/backend/cpu/CpuBackend.cpp +++ b/src/backend/cpu/CpuBackend.cpp @@ -133,6 +133,12 @@ xmrig::CpuBackend::~CpuBackend() } +bool xmrig::CpuBackend::isEnabled() const +{ + return d_ptr->controller->config()->cpu().isEnabled(); +} + + bool xmrig::CpuBackend::isEnabled(const Algorithm &algorithm) const { return !d_ptr->controller->config()->cpu().threads().get(algorithm).empty(); @@ -178,6 +184,12 @@ void xmrig::CpuBackend::printHashrate(bool details) void xmrig::CpuBackend::setJob(const Job &job) { + if (!isEnabled()) { + d_ptr->workers.stop(); + d_ptr->threads.clear(); + return; + } + const CpuConfig &cpu = d_ptr->controller->config()->cpu(); std::vector threads = cpu.get(d_ptr->controller->miner(), job.algorithm()); diff --git a/src/backend/cpu/CpuBackend.h b/src/backend/cpu/CpuBackend.h index 543d4459..af59c345 100644 --- a/src/backend/cpu/CpuBackend.h +++ b/src/backend/cpu/CpuBackend.h @@ -44,6 +44,7 @@ public: ~CpuBackend() override; protected: + bool isEnabled() const override; bool isEnabled(const Algorithm &algorithm) const override; const Hashrate *hashrate() const override; const String &profileName() const override; From 1d78e7d60dab7fb03300715e07ef08c8c96575fe Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 19 Jul 2019 04:22:21 +0700 Subject: [PATCH 042/172] "GET /1/threads" replaced to "GET /2/backends". --- src/api/v1/ApiRouter.cpp | 36 +------- src/api/v1/ApiRouter.h | 2 - src/backend/common/interfaces/IBackend.h | 26 ++++-- src/backend/cpu/Cpu.h | 2 + src/backend/cpu/CpuBackend.cpp | 104 +++++++++++++++++++++-- src/backend/cpu/CpuBackend.h | 5 ++ src/core/Miner.cpp | 28 +++++- src/crypto/cn/CnHash.cpp | 2 +- src/crypto/rx/Rx.cpp | 11 +++ src/crypto/rx/Rx.h | 1 + 10 files changed, 159 insertions(+), 58 deletions(-) diff --git a/src/api/v1/ApiRouter.cpp b/src/api/v1/ApiRouter.cpp index 0e1080a1..21e69f2d 100644 --- a/src/api/v1/ApiRouter.cpp +++ b/src/api/v1/ApiRouter.cpp @@ -52,11 +52,7 @@ xmrig::ApiRouter::~ApiRouter() void xmrig::ApiRouter::onRequest(IApiRequest &request) { if (request.method() == IApiRequest::METHOD_GET) { - if (request.url() == "/1/threads") { - request.accept(); - getThreads(request.reply(), request.doc()); - } - else if (request.url() == "/1/config") { + if (request.url() == "/1/config") { if (request.isRestricted()) { return request.done(403); } @@ -77,33 +73,3 @@ void xmrig::ApiRouter::onRequest(IApiRequest &request) } } } - - -void xmrig::ApiRouter::getThreads(rapidjson::Value &reply, rapidjson::Document &doc) const -{ -// using namespace rapidjson; -// auto &allocator = doc.GetAllocator(); -// const Hashrate *hr = WorkersLegacy::hashrate(); - -// WorkersLegacy::threadsSummary(doc); - -// const std::vector &threads = m_base->config()->threads(); -// Value list(kArrayType); - -// size_t i = 0; -// for (const xmrig::IThread *thread : threads) { -// Value value = thread->toAPI(doc); - -// Value hashrate(kArrayType); -// hashrate.PushBack(normalize(hr->calc(i, Hashrate::ShortInterval)), allocator); -// hashrate.PushBack(normalize(hr->calc(i, Hashrate::MediumInterval)), allocator); -// hashrate.PushBack(normalize(hr->calc(i, Hashrate::LargeInterval)), allocator); - -// i++; - -// value.AddMember("hashrate", hashrate, allocator); -// list.PushBack(value, allocator); -// } - -// reply.AddMember("threads", list, allocator); -} diff --git a/src/api/v1/ApiRouter.h b/src/api/v1/ApiRouter.h index ec468d86..008f5bc0 100644 --- a/src/api/v1/ApiRouter.h +++ b/src/api/v1/ApiRouter.h @@ -49,8 +49,6 @@ protected: void onRequest(IApiRequest &request) override; private: - void getThreads(rapidjson::Value &reply, rapidjson::Document &doc) const; - Base *m_base; }; diff --git a/src/backend/common/interfaces/IBackend.h b/src/backend/common/interfaces/IBackend.h index ac97759b..e19e00ba 100644 --- a/src/backend/common/interfaces/IBackend.h +++ b/src/backend/common/interfaces/IBackend.h @@ -29,6 +29,9 @@ #include +#include "rapidjson/fwd.h" + + namespace xmrig { @@ -44,15 +47,20 @@ class IBackend public: virtual ~IBackend() = default; - virtual bool isEnabled() const = 0; - virtual bool isEnabled(const Algorithm &algorithm) const = 0; - virtual const Hashrate *hashrate() const = 0; - virtual const String &profileName() const = 0; - virtual void printHashrate(bool details) = 0; - virtual void setJob(const Job &job) = 0; - virtual void start(IWorker *worker) = 0; - virtual void stop() = 0; - virtual void tick(uint64_t ticks) = 0; + virtual bool isEnabled() const = 0; + virtual bool isEnabled(const Algorithm &algorithm) const = 0; + virtual const Hashrate *hashrate() const = 0; + virtual const String &profileName() const = 0; + virtual const String &type() const = 0; + virtual void printHashrate(bool details) = 0; + virtual void setJob(const Job &job) = 0; + virtual void start(IWorker *worker) = 0; + virtual void stop() = 0; + virtual void tick(uint64_t ticks) = 0; + +# ifdef XMRIG_FEATURE_API + virtual rapidjson::Value toJSON(rapidjson::Document &doc) const = 0; +# endif }; diff --git a/src/backend/cpu/Cpu.h b/src/backend/cpu/Cpu.h index 9c8afced..23cf37e6 100644 --- a/src/backend/cpu/Cpu.h +++ b/src/backend/cpu/Cpu.h @@ -38,6 +38,8 @@ public: static ICpuInfo *info(); static void init(); static void release(); + + inline static Assembly::Id assembly(Assembly::Id hint) { return hint == Assembly::AUTO ? Cpu::info()->assembly() : hint; } }; diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp index 8ab312f7..165ed42e 100644 --- a/src/backend/cpu/CpuBackend.cpp +++ b/src/backend/cpu/CpuBackend.cpp @@ -29,6 +29,7 @@ #include "backend/common/Hashrate.h" #include "backend/common/interfaces/IWorker.h" #include "backend/common/Workers.h" +#include "backend/cpu/Cpu.h" #include "backend/cpu/CpuBackend.h" #include "base/io/log/Log.h" #include "base/net/stratum/Job.h" @@ -37,6 +38,9 @@ #include "core/config/Config.h" #include "core/Controller.h" #include "crypto/common/VirtualMemory.h" +#include "crypto/rx/Rx.h" +#include "crypto/rx/RxDataset.h" +#include "rapidjson/document.h" namespace xmrig { @@ -45,6 +49,9 @@ namespace xmrig { extern template class Threads; +static const String kType = "cpu"; + + struct LaunchStatus { public: @@ -59,13 +66,13 @@ public: ts = Chrono::steadyMSecs(); } - size_t hugePages; - size_t memory; - size_t pages; - size_t started; - size_t threads; - size_t ways; - uint64_t ts; + size_t hugePages = 0; + size_t memory = 0; + size_t pages = 0; + size_t started = 0; + size_t threads = 0; + size_t ways = 0; + uint64_t ts = 0; }; @@ -157,6 +164,12 @@ const xmrig::String &xmrig::CpuBackend::profileName() const } +const xmrig::String &xmrig::CpuBackend::type() const +{ + return kType; +} + + void xmrig::CpuBackend::printHashrate(bool details) { if (!details || !hashrate()) { @@ -251,3 +264,80 @@ void xmrig::CpuBackend::tick(uint64_t ticks) { d_ptr->workers.tick(ticks); } + + +#ifdef XMRIG_FEATURE_API +rapidjson::Value xmrig::CpuBackend::toJSON(rapidjson::Document &doc) const +{ + using namespace rapidjson; + auto &allocator = doc.GetAllocator(); + const CpuConfig &cpu = d_ptr->controller->config()->cpu(); + + Value out(kObjectType); + out.AddMember("type", type().toJSON(), allocator); + out.AddMember("enabled", isEnabled(), allocator); + out.AddMember("algo", d_ptr->algo.toJSON(), allocator); + out.AddMember("profile", profileName().toJSON(), allocator); + out.AddMember("hw-aes", cpu.isHwAES(), allocator); + out.AddMember("priority", cpu.priority(), allocator); + +# ifdef XMRIG_FEATURE_ASM + const Assembly assembly = Cpu::assembly(cpu.assembly()); + out.AddMember("asm", assembly.toJSON(), allocator); +# else + out.AddMember("asm", false, allocator); +# endif + + uv_mutex_lock(&d_ptr->mutex); + uint64_t pages[2] = { d_ptr->status.hugePages, d_ptr->status.pages }; + const size_t ways = d_ptr->status.ways; + uv_mutex_unlock(&d_ptr->mutex); + +# ifdef XMRIG_ALGO_RANDOMX + if (d_ptr->algo.family() == Algorithm::RANDOM_X) { + RxDataset *dataset = Rx::dataset(); + if (dataset) { + const auto rxPages = dataset->hugePages(); + pages[0] += rxPages.first; + pages[1] += rxPages.second; + } + } +# endif + + rapidjson::Value hugepages(rapidjson::kArrayType); + hugepages.PushBack(pages[0], allocator); + hugepages.PushBack(pages[1], allocator); + + out.AddMember("hugepages", hugepages, allocator); + out.AddMember("memory", d_ptr->algo.isValid() ? (ways * d_ptr->algo.memory()) : 0, allocator); + + if (d_ptr->threads.empty() || !hashrate()) { + return out; + } + + Value threads(kArrayType); + const Hashrate *hr = hashrate(); + + size_t i = 0; + for (const CpuLaunchData &data : d_ptr->threads) { + Value thread(kObjectType); + thread.AddMember("intensity", data.intensity, allocator); + thread.AddMember("affinity", data.affinity, allocator); + thread.AddMember("av", data.av(), allocator); + + Value hashrate(kArrayType); + hashrate.PushBack(Hashrate::normalize(hr->calc(i, Hashrate::ShortInterval)), allocator); + hashrate.PushBack(Hashrate::normalize(hr->calc(i, Hashrate::MediumInterval)), allocator); + hashrate.PushBack(Hashrate::normalize(hr->calc(i, Hashrate::LargeInterval)), allocator); + + i++; + + thread.AddMember("hashrate", hashrate, allocator); + threads.PushBack(thread, allocator); + } + + out.AddMember("threads", threads, allocator); + + return out; +} +#endif diff --git a/src/backend/cpu/CpuBackend.h b/src/backend/cpu/CpuBackend.h index af59c345..613e7cb6 100644 --- a/src/backend/cpu/CpuBackend.h +++ b/src/backend/cpu/CpuBackend.h @@ -48,12 +48,17 @@ protected: bool isEnabled(const Algorithm &algorithm) const override; const Hashrate *hashrate() const override; const String &profileName() const override; + const String &type() const override; void printHashrate(bool details) override; void setJob(const Job &job) override; void start(IWorker *worker) override; void stop() override; void tick(uint64_t ticks) override; +# ifdef XMRIG_FEATURE_API + rapidjson::Value toJSON(rapidjson::Document &doc) const override; +# endif + private: CpuBackendPrivate *d_ptr; }; diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index 7114baf9..f6acde12 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -205,6 +205,19 @@ public: reply.AddMember("hashrate", hashrate, allocator); } + + + void getBackends(rapidjson::Value &reply, rapidjson::Document &doc) const + { + using namespace rapidjson; + auto &allocator = doc.GetAllocator(); + + reply.SetArray(); + + for (IBackend *backend : backends) { + reply.PushBack(backend->toJSON(doc), allocator); + } + } # endif @@ -412,11 +425,18 @@ void xmrig::Miner::onTimer(const Timer *) #ifdef XMRIG_FEATURE_API void xmrig::Miner::onRequest(IApiRequest &request) { - if (request.type() == IApiRequest::REQ_SUMMARY) { - request.accept(); + if (request.method() == IApiRequest::METHOD_GET) { + if (request.type() == IApiRequest::REQ_SUMMARY) { + request.accept(); - d_ptr->getMiner(request.reply(), request.doc(), request.version()); - d_ptr->getHashrate(request.reply(), request.doc(), request.version()); + d_ptr->getMiner(request.reply(), request.doc(), request.version()); + d_ptr->getHashrate(request.reply(), request.doc(), request.version()); + } + else if (request.url() == "/2/backends") { + request.accept(); + + d_ptr->getBackends(request.reply(), request.doc()); + } } } #endif diff --git a/src/crypto/cn/CnHash.cpp b/src/crypto/cn/CnHash.cpp index 40f4fbba..a2f8880c 100644 --- a/src/crypto/cn/CnHash.cpp +++ b/src/crypto/cn/CnHash.cpp @@ -262,7 +262,7 @@ xmrig::cn_hash_fun xmrig::CnHash::fn(const Algorithm &algorithm, AlgoVariant av, } # ifdef XMRIG_FEATURE_ASM - cn_hash_fun fun = cnHash.m_map[algorithm][av][assembly == Assembly::AUTO ? Cpu::info()->assembly() : assembly]; + cn_hash_fun fun = cnHash.m_map[algorithm][av][Cpu::assembly(assembly)]; if (fun) { return fun; } diff --git a/src/crypto/rx/Rx.cpp b/src/crypto/rx/Rx.cpp index 7f482034..4125d81f 100644 --- a/src/crypto/rx/Rx.cpp +++ b/src/crypto/rx/Rx.cpp @@ -74,6 +74,17 @@ static const char *tag = BLUE_BG(" rx "); } // namespace xmrig + +xmrig::RxDataset *xmrig::Rx::dataset() +{ + d_ptr->lock(); + RxDataset *dataset = d_ptr->dataset; + d_ptr->unlock(); + + return dataset; +} + + xmrig::RxDataset *xmrig::Rx::dataset(const uint8_t *seed, const Algorithm &algorithm, bool hugePages) { d_ptr->lock(); diff --git a/src/crypto/rx/Rx.h b/src/crypto/rx/Rx.h index c9d068c6..63bb2e14 100644 --- a/src/crypto/rx/Rx.h +++ b/src/crypto/rx/Rx.h @@ -42,6 +42,7 @@ class RxDataset; class Rx { public: + static RxDataset *dataset(); static RxDataset *dataset(const uint8_t *seed, const Algorithm &algorithm, bool hugePages = true); static void stop(); }; From ca7fb33848b9299b9b704e544afcf3abb25089d0 Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 19 Jul 2019 04:41:48 +0700 Subject: [PATCH 043/172] Removed class ApiRouter. --- CMakeLists.txt | 2 -- src/api/Api.cpp | 6 ---- src/api/Api.h | 2 -- src/api/v1/ApiRouter.cpp | 75 ---------------------------------------- src/api/v1/ApiRouter.h | 59 ------------------------------- src/base/kernel/Base.cpp | 30 ++++++++++++++++ src/base/kernel/Base.h | 9 +++-- 7 files changed, 37 insertions(+), 146 deletions(-) delete mode 100644 src/api/v1/ApiRouter.cpp delete mode 100644 src/api/v1/ApiRouter.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 4d205f55..e0290778 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -235,8 +235,6 @@ if (WITH_HTTP) src/api/requests/ApiRequest.h src/api/requests/HttpApiRequest.cpp src/api/requests/HttpApiRequest.h - src/api/v1/ApiRouter.cpp - src/api/v1/ApiRouter.h ) else() set(HTTP_SOURCES "") diff --git a/src/api/Api.cpp b/src/api/Api.cpp index 11fc2a69..9151aa7e 100644 --- a/src/api/Api.cpp +++ b/src/api/Api.cpp @@ -35,7 +35,6 @@ #include "api/Api.h" #include "api/interfaces/IApiListener.h" #include "api/requests/HttpApiRequest.h" -#include "api/v1/ApiRouter.h" #include "base/kernel/Base.h" #include "base/tools/Buffer.h" #include "base/tools/Chrono.h" @@ -61,16 +60,11 @@ xmrig::Api::Api(Base *base) : base->addListener(this); genId(base->config()->apiId()); - - m_v1 = new ApiRouter(base); - addListener(m_v1); } xmrig::Api::~Api() { - delete m_v1; - # ifdef XMRIG_FEATURE_HTTP delete m_httpd; # endif diff --git a/src/api/Api.h b/src/api/Api.h index eef57c3a..f2ed3926 100644 --- a/src/api/Api.h +++ b/src/api/Api.h @@ -36,7 +36,6 @@ namespace xmrig { -class ApiRouter; class Base; class Httpd; class HttpData; @@ -67,7 +66,6 @@ private: void genId(const String &id); void genWorkerId(const String &id); - ApiRouter *m_v1; Base *m_base; char m_id[32]; char m_workerId[128]; diff --git a/src/api/v1/ApiRouter.cpp b/src/api/v1/ApiRouter.cpp deleted file mode 100644 index 21e69f2d..00000000 --- a/src/api/v1/ApiRouter.cpp +++ /dev/null @@ -1,75 +0,0 @@ -/* XMRig - * Copyright 2010 Jeff Garzik - * Copyright 2012-2014 pooler - * Copyright 2014 Lucas Jones - * Copyright 2014-2016 Wolf9466 - * Copyright 2016 Jay D Dee - * Copyright 2017-2018 XMR-Stak , - * Copyright 2018-2019 SChernykh - * Copyright 2016-2019 XMRig , - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include - - -#include "api/interfaces/IApiRequest.h" -#include "api/v1/ApiRouter.h" -#include "backend/common/interfaces/IThread.h" -#include "backend/cpu/Cpu.h" -#include "base/kernel/Base.h" -#include "base/kernel/Platform.h" -#include "core/config/Config.h" -#include "rapidjson/document.h" -#include "version.h" - - -xmrig::ApiRouter::ApiRouter(Base *base) : - m_base(base) -{ -} - - -xmrig::ApiRouter::~ApiRouter() -{ -} - - -void xmrig::ApiRouter::onRequest(IApiRequest &request) -{ - if (request.method() == IApiRequest::METHOD_GET) { - if (request.url() == "/1/config") { - if (request.isRestricted()) { - return request.done(403); - } - - request.accept(); - m_base->config()->getJSON(request.doc()); - } - } - else if (request.method() == IApiRequest::METHOD_PUT || request.method() == IApiRequest::METHOD_POST) { - if (request.url() == "/1/config") { - request.accept(); - - if (!m_base->reload(request.json())) { - return request.done(400); - } - - request.done(204); - } - } -} diff --git a/src/api/v1/ApiRouter.h b/src/api/v1/ApiRouter.h deleted file mode 100644 index 008f5bc0..00000000 --- a/src/api/v1/ApiRouter.h +++ /dev/null @@ -1,59 +0,0 @@ -/* XMRig - * Copyright 2010 Jeff Garzik - * Copyright 2012-2014 pooler - * Copyright 2014 Lucas Jones - * Copyright 2014-2016 Wolf9466 - * Copyright 2016 Jay D Dee - * Copyright 2017-2018 XMR-Stak , - * Copyright 2018-2019 SChernykh - * Copyright 2016-2019 XMRig , - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef XMRIG_APIROUTER_H -#define XMRIG_APIROUTER_H - - -#include "api/interfaces/IApiListener.h" -#include "rapidjson/fwd.h" - - -class Hashrate; - - -namespace xmrig { - - -class Base; - - -class ApiRouter : public IApiListener -{ -public: - ApiRouter(Base *base); - ~ApiRouter() override; - -protected: - void onRequest(IApiRequest &request) override; - -private: - Base *m_base; -}; - - -} // namespace xmrig - - -#endif /* XMRIG_APIROUTER_H */ diff --git a/src/base/kernel/Base.cpp b/src/base/kernel/Base.cpp index 46f32684..d290e7f4 100644 --- a/src/base/kernel/Base.cpp +++ b/src/base/kernel/Base.cpp @@ -48,6 +48,7 @@ #ifdef XMRIG_FEATURE_API # include "api/Api.h" +# include "api/interfaces/IApiRequest.h" #endif @@ -167,6 +168,7 @@ int xmrig::Base::init() # ifdef XMRIG_FEATURE_API d_ptr->api = new Api(this); + d_ptr->api->addListener(this); # endif Platform::init(config()->userAgent()); @@ -288,3 +290,31 @@ void xmrig::Base::onFileChanged(const String &fileName) d_ptr->replace(config); } + + +#ifdef XMRIG_FEATURE_API +void xmrig::Base::onRequest(IApiRequest &request) +{ + if (request.method() == IApiRequest::METHOD_GET) { + if (request.url() == "/1/config") { + if (request.isRestricted()) { + return request.done(403); + } + + request.accept(); + config()->getJSON(request.doc()); + } + } + else if (request.method() == IApiRequest::METHOD_PUT || request.method() == IApiRequest::METHOD_POST) { + if (request.url() == "/1/config") { + request.accept(); + + if (!reload(request.json())) { + return request.done(400); + } + + request.done(204); + } + } +} +#endif diff --git a/src/base/kernel/Base.h b/src/base/kernel/Base.h index 592d3a37..6a33a802 100644 --- a/src/base/kernel/Base.h +++ b/src/base/kernel/Base.h @@ -26,6 +26,7 @@ #define XMRIG_BASE_H +#include "api/interfaces/IApiListener.h" #include "base/kernel/interfaces/IConfigListener.h" #include "base/kernel/interfaces/IWatcherListener.h" #include "rapidjson/fwd.h" @@ -35,13 +36,13 @@ namespace xmrig { class Api; -class Config; class BasePrivate; +class Config; class IBaseListener; class Process; -class Base : public IWatcherListener +class Base : public IWatcherListener, public IApiListener { public: Base(Process *process); @@ -60,6 +61,10 @@ public: protected: void onFileChanged(const String &fileName) override; +# ifdef XMRIG_FEATURE_API + void onRequest(IApiRequest &request) override; +# endif + private: BasePrivate *d_ptr; }; From 2fc54d240a31ac46d8bd9d17e00d96b2080f6088 Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 19 Jul 2019 05:03:14 +0700 Subject: [PATCH 044/172] Fixed build. --- cmake/flags.cmake | 2 +- src/backend/common/Hashrate.cpp | 8 ++++---- src/backend/cpu/CpuBackend.cpp | 2 +- src/core/Miner.cpp | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cmake/flags.cmake b/cmake/flags.cmake index 3a0add7a..3f2bd0a0 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -61,7 +61,7 @@ elseif (CMAKE_CXX_COMPILER_ID MATCHES Clang) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall") set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Ofast -funroll-loops -fmerge-all-constants") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -fno-exceptions -fno-rtti -Wno-missing-braces") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -fexceptions -fno-rtti -Wno-missing-braces") set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -Ofast -funroll-loops -fmerge-all-constants") if (XMRIG_ARMv8) diff --git a/src/backend/common/Hashrate.cpp b/src/backend/common/Hashrate.cpp index a9c63733..99a9a9c5 100644 --- a/src/backend/common/Hashrate.cpp +++ b/src/backend/common/Hashrate.cpp @@ -24,7 +24,7 @@ #include -#include +#include #include #include @@ -37,7 +37,7 @@ inline static const char *format(double h, char *buf, size_t size) { - if (isnormal(h)) { + if (std::isnormal(h)) { snprintf(buf, size, "%03.1f", h); return buf; } @@ -82,7 +82,7 @@ double xmrig::Hashrate::calc(size_t ms) const for (size_t i = 0; i < m_threads; ++i) { data = calc(i, ms); - if (isnormal(data)) { + if (std::isnormal(data)) { result += data; } } @@ -153,7 +153,7 @@ void xmrig::Hashrate::add(size_t threadId, uint64_t count, uint64_t timestamp) void xmrig::Hashrate::updateHighest() { double highest = calc(ShortInterval); - if (isnormal(highest) && highest > m_highest) { + if (std::isnormal(highest) && highest > m_highest) { m_highest = highest; } } diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp index 165ed42e..ffc21597 100644 --- a/src/backend/cpu/CpuBackend.cpp +++ b/src/backend/cpu/CpuBackend.cpp @@ -309,7 +309,7 @@ rapidjson::Value xmrig::CpuBackend::toJSON(rapidjson::Document &doc) const hugepages.PushBack(pages[1], allocator); out.AddMember("hugepages", hugepages, allocator); - out.AddMember("memory", d_ptr->algo.isValid() ? (ways * d_ptr->algo.memory()) : 0, allocator); + out.AddMember("memory", static_cast(d_ptr->algo.isValid() ? (ways * d_ptr->algo.memory()) : 0), allocator); if (d_ptr->threads.empty() || !hashrate()) { return out; diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index f6acde12..df83c5e9 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -134,7 +134,7 @@ public: cpu.AddMember("brand", StringRef(Cpu::info()->brand()), allocator); cpu.AddMember("aes", Cpu::info()->hasAES(), allocator); cpu.AddMember("x64", Cpu::info()->isX64(), allocator); - cpu.AddMember("sockets", Cpu::info()->sockets(), allocator); + cpu.AddMember("sockets", static_cast(Cpu::info()->sockets()), allocator); reply.AddMember("version", APP_VERSION, allocator); reply.AddMember("kind", APP_KIND, allocator); From 222cebba7109bf8a1423d8d70d90bdcd5c3319b3 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 20 Jul 2019 00:37:15 +0700 Subject: [PATCH 045/172] Fixed command line config and removed --max-cpu-usage and --safe. --- src/backend/common/Threads.cpp | 75 +++++--- src/backend/common/Threads.h | 2 +- src/backend/cpu/CpuConfig.cpp | 70 ++++---- src/backend/cpu/CpuConfig.h | 4 +- src/backend/cpu/CpuWorker.cpp | 2 +- src/base/base.cmake | 2 - src/base/kernel/config/BaseConfig.cpp | 15 +- src/base/kernel/config/BaseTransform.cpp | 49 ++---- src/base/kernel/config/BaseTransform.h | 4 + src/base/kernel/interfaces/IConfig.h | 9 - src/base/kernel/interfaces/IConfigTransform.h | 3 +- src/core/config/ConfigTransform.cpp | 166 +++++++++++++++++- src/core/config/ConfigTransform.h | 8 +- src/core/config/Config_platform.h | 35 ---- src/core/config/usage.h | 2 - 15 files changed, 283 insertions(+), 163 deletions(-) diff --git a/src/backend/common/Threads.cpp b/src/backend/common/Threads.cpp index 4cb9d4c6..894c404b 100644 --- a/src/backend/common/Threads.cpp +++ b/src/backend/common/Threads.cpp @@ -28,6 +28,15 @@ #include "rapidjson/document.h" +namespace xmrig { + + +static const char *kAsterisk = "*"; + + +} // namespace xmrig + + template const std::vector &xmrig::Threads::get(const String &profileName) const { @@ -41,34 +50,7 @@ const std::vector &xmrig::Threads::get(const String &profileName) const template -xmrig::String xmrig::Threads::profileName(const Algorithm &algorithm, bool strict) const -{ - if (isDisabled(algorithm)) { - return String(); - } - - const String name = algorithm.shortName(); - if (has(name)) { - return name; - } - - if (m_aliases.count(algorithm) > 0) { - return m_aliases.at(algorithm); - } - - if (!strict && name.contains("/")) { - const String base = name.split('/').at(0); - if (has(base)) { - return base; - } - } - - return String(); -} - - -template -void xmrig::Threads::read(const rapidjson::Value &value) +size_t xmrig::Threads::read(const rapidjson::Value &value) { using namespace rapidjson; @@ -109,6 +91,43 @@ void xmrig::Threads::read(const rapidjson::Value &value) } } } + + return m_profiles.size(); +} + + +template +xmrig::String xmrig::Threads::profileName(const Algorithm &algorithm, bool strict) const +{ + if (isDisabled(algorithm)) { + return String(); + } + + const String name = algorithm.shortName(); + if (has(name)) { + return name; + } + + if (m_aliases.count(algorithm) > 0) { + return m_aliases.at(algorithm); + } + + if (strict) { + return String(); + } + + if (name.contains("/")) { + const String base = name.split('/').at(0); + if (has(base)) { + return base; + } + } + + if (has(kAsterisk)) { + return kAsterisk; + } + + return String(); } diff --git a/src/backend/common/Threads.h b/src/backend/common/Threads.h index 70bc02a4..bc9e36fd 100644 --- a/src/backend/common/Threads.h +++ b/src/backend/common/Threads.h @@ -50,8 +50,8 @@ public: inline void move(const char *profile, std::vector &&threads) { m_profiles.insert({ profile, threads }); } const std::vector &get(const String &profileName) const; + size_t read(const rapidjson::Value &value); String profileName(const Algorithm &algorithm, bool strict = false) const; - void read(const rapidjson::Value &value); void toJSON(rapidjson::Value &out, rapidjson::Document &doc) const; private: diff --git a/src/backend/cpu/CpuConfig.cpp b/src/backend/cpu/CpuConfig.cpp index 457f7ef4..4c86ceea 100644 --- a/src/backend/cpu/CpuConfig.cpp +++ b/src/backend/cpu/CpuConfig.cpp @@ -126,48 +126,56 @@ void xmrig::CpuConfig::read(const rapidjson::Value &value) m_hugePages = Json::getBool(value, kHugePages, m_hugePages); setAesMode(Json::getValue(value, kHwAes)); - setPriority(Json::getInt(value, kPriority, -1)); + setPriority(Json::getInt(value, kPriority, -1)); # ifdef XMRIG_FEATURE_ASM m_assembly = Json::getValue(value, kAsm); # endif - m_threads.read(value); + if (!m_threads.read(value)) { + generate(); + } } else if (value.IsBool() && value.IsFalse()) { m_enabled = false; } else { - m_shouldSave = true; - - m_threads.disable(Algorithm::CN_0); - m_threads.move(kCn, Cpu::info()->threads(Algorithm::CN_0)); - -# ifdef XMRIG_ALGO_CN_GPU - m_threads.move(kCnGPU, Cpu::info()->threads(Algorithm::CN_GPU)); -# endif - -# ifdef XMRIG_ALGO_CN_LITE - m_threads.disable(Algorithm::CN_LITE_0); - m_threads.move(kCnLite, Cpu::info()->threads(Algorithm::CN_LITE_1)); -# endif - -# ifdef XMRIG_ALGO_CN_HEAVY - m_threads.move(kCnHeavy, Cpu::info()->threads(Algorithm::CN_HEAVY_0)); -# endif - -# ifdef XMRIG_ALGO_CN_PICO - m_threads.move(kCnPico, Cpu::info()->threads(Algorithm::CN_PICO_0)); -# endif - -# ifdef XMRIG_ALGO_RANDOMX - m_threads.move(kRx, Cpu::info()->threads(Algorithm::RX_0)); - m_threads.move(kRxWOW, Cpu::info()->threads(Algorithm::RX_WOW)); -# endif + generate(); } } +void xmrig::CpuConfig::generate() +{ + m_shouldSave = true; + + m_threads.disable(Algorithm::CN_0); + m_threads.move(kCn, Cpu::info()->threads(Algorithm::CN_0)); + +# ifdef XMRIG_ALGO_CN_GPU + m_threads.move(kCnGPU, Cpu::info()->threads(Algorithm::CN_GPU)); +# endif + +# ifdef XMRIG_ALGO_CN_LITE + m_threads.disable(Algorithm::CN_LITE_0); + m_threads.move(kCnLite, Cpu::info()->threads(Algorithm::CN_LITE_1)); +# endif + +# ifdef XMRIG_ALGO_CN_HEAVY + m_threads.move(kCnHeavy, Cpu::info()->threads(Algorithm::CN_HEAVY_0)); +# endif + +# ifdef XMRIG_ALGO_CN_PICO + m_threads.move(kCnPico, Cpu::info()->threads(Algorithm::CN_PICO_0)); +# endif + +# ifdef XMRIG_ALGO_RANDOMX + m_threads.move(kRx, Cpu::info()->threads(Algorithm::RX_0)); + m_threads.move(kRxWOW, Cpu::info()->threads(Algorithm::RX_WOW)); +# endif +} + + void xmrig::CpuConfig::setAesMode(const rapidjson::Value &aesMode) { if (aesMode.IsBool()) { @@ -177,9 +185,3 @@ void xmrig::CpuConfig::setAesMode(const rapidjson::Value &aesMode) m_aes = AES_AUTO; } } - - -void xmrig::CpuConfig::setPriority(int priority) -{ - m_priority = (priority >= -1 && priority <= 5) ? priority : -1; -} diff --git a/src/backend/cpu/CpuConfig.h b/src/backend/cpu/CpuConfig.h index 8ff8b77c..5b2f3f86 100644 --- a/src/backend/cpu/CpuConfig.h +++ b/src/backend/cpu/CpuConfig.h @@ -59,8 +59,10 @@ public: inline int priority() const { return m_priority; } private: + void generate(); void setAesMode(const rapidjson::Value &aesMode); - void setPriority(int priority); + + inline void setPriority(int priority) { m_priority = (priority >= -1 && priority <= 5) ? priority : -1; } AesMode m_aes = AES_AUTO; Assembly m_assembly; diff --git a/src/backend/cpu/CpuWorker.cpp b/src/backend/cpu/CpuWorker.cpp index e35c5155..14ffaa73 100644 --- a/src/backend/cpu/CpuWorker.cpp +++ b/src/backend/cpu/CpuWorker.cpp @@ -273,7 +273,7 @@ template void xmrig::CpuWorker::allocateCnCtx() { if (m_ctx[0] == nullptr) { - CnCtx::create(m_ctx, m_memory->scratchpad(), m_memory->size(), N); + CnCtx::create(m_ctx, m_memory->scratchpad(), m_algorithm.memory(), N); } } diff --git a/src/base/base.cmake b/src/base/base.cmake index b25d9743..ef4da131 100644 --- a/src/base/base.cmake +++ b/src/base/base.cmake @@ -146,5 +146,3 @@ else() remove_definitions(/DXMRIG_FEATURE_HTTP) remove_definitions(/DXMRIG_FEATURE_API) endif() - -add_definitions(/DXMRIG_DEPRECATED) diff --git a/src/base/kernel/config/BaseConfig.cpp b/src/base/kernel/config/BaseConfig.cpp index 489849a3..462639e3 100644 --- a/src/base/kernel/config/BaseConfig.cpp +++ b/src/base/kernel/config/BaseConfig.cpp @@ -139,20 +139,7 @@ bool xmrig::BaseConfig::read(const IJsonReader &reader, const char *fileName) m_apiWorkerId = Json::getString(api, "worker-id"); } -# ifdef XMRIG_DEPRECATED - if (api.IsObject() && api.HasMember("port")) { - m_upgrade = true; - m_http.load(api); - m_http.setEnabled(Json::getUint(api, "port") > 0); - m_http.setHost("0.0.0.0"); - } - else { - m_http.load(reader.getObject("http")); - } -# else - m_http.load(chain.getObject("http")); -# endif - + m_http.load(reader.getObject("http")); m_pools.load(reader); return m_pools.active() > 0; diff --git a/src/base/kernel/config/BaseTransform.cpp b/src/base/kernel/config/BaseTransform.cpp index 615342b9..554565a5 100644 --- a/src/base/kernel/config/BaseTransform.cpp +++ b/src/base/kernel/config/BaseTransform.cpp @@ -87,15 +87,31 @@ void xmrig::BaseTransform::load(JsonChain &chain, Process *process, IConfigTrans LOG_WARN("%s: unsupported non-option argument '%s'", argv[0], argv[optind]); } + transform.finalize(doc); chain.add(std::move(doc)); } +void xmrig::BaseTransform::finalize(rapidjson::Document &doc) +{ + using namespace rapidjson; + auto &allocator = doc.GetAllocator(); + + if (m_algorithm.isValid() && doc.HasMember(kPools)) { + auto &pools = doc[kPools]; + for (Value &pool : pools.GetArray()) { + pool.AddMember(StringRef("algo"), m_algorithm.toJSON(), allocator); + } + } +} + + void xmrig::BaseTransform::transform(rapidjson::Document &doc, int key, const char *arg) { switch (key) { case IConfig::AlgorithmKey: /* --algo */ - return set(doc, "algo", arg); + m_algorithm = arg; + break; case IConfig::UserpassKey: /* --userpass */ { @@ -134,13 +150,6 @@ void xmrig::BaseTransform::transform(rapidjson::Document &doc, int key, const ch case IConfig::LogFileKey: /* --log-file */ return set(doc, "log-file", arg); -# ifdef XMRIG_DEPRECATED - case IConfig::ApiAccessTokenKey: /* --api-access-token */ - fputs("option \"--api-access-token\" deprecated, use \"--http-access-token\" instead.\n", stderr); - fflush(stdout); - return set(doc, kHttp, "access-token", arg); -# endif - case IConfig::HttpAccessTokenKey: /* --http-access-token */ return set(doc, kHttp, "access-token", arg); @@ -162,9 +171,6 @@ void xmrig::BaseTransform::transform(rapidjson::Document &doc, int key, const ch case IConfig::HttpPort: /* --http-port */ case IConfig::DonateLevelKey: /* --donate-level */ case IConfig::DaemonPollKey: /* --daemon-poll-interval */ -# ifdef XMRIG_DEPRECATED - case IConfig::ApiPort: /* --api-port */ -# endif return transformUint64(doc, key, static_cast(strtol(arg, nullptr, 10))); case IConfig::BackgroundKey: /* --background */ @@ -179,10 +185,6 @@ void xmrig::BaseTransform::transform(rapidjson::Document &doc, int key, const ch case IConfig::ColorKey: /* --no-color */ case IConfig::HttpRestrictedKey: /* --http-no-restricted */ -# ifdef XMRIG_DEPRECATED - case IConfig::ApiRestrictedKey: /* --api-no-restricted */ - case IConfig::ApiIPv6Key: /* --api-ipv6 */ -# endif return transformBoolean(doc, key, false); default: @@ -217,16 +219,6 @@ void xmrig::BaseTransform::transformBoolean(rapidjson::Document &doc, int key, b case IConfig::ColorKey: /* --no-color */ return set(doc, "colors", enable); -# ifdef XMRIG_DEPRECATED - case IConfig::ApiIPv6Key: /* --api-ipv6 */ - break; - - case IConfig::ApiRestrictedKey: /* --api-no-restricted */ - fputs("option \"--api-no-restricted\" deprecated, use \"--http-no-restricted\" instead.\n", stderr); - fflush(stdout); - return set(doc, kHttp, "restricted", enable); -# endif - case IConfig::HttpRestrictedKey: /* --http-no-restricted */ return set(doc, kHttp, "restricted", enable); @@ -257,13 +249,6 @@ void xmrig::BaseTransform::transformUint64(rapidjson::Document &doc, int key, ui case IConfig::ProxyDonateKey: /* --donate-over-proxy */ return set(doc, "donate-over-proxy", arg); -# ifdef XMRIG_DEPRECATED - case IConfig::ApiPort: /* --api-port */ - fputs("option \"--api-port\" deprecated, use \"--http-port\" instead.\n", stderr); - fflush(stdout); - return set(doc, kHttp, "port", arg); -# endif - case IConfig::HttpPort: /* --http-port */ return set(doc, kHttp, "port", arg); diff --git a/src/base/kernel/config/BaseTransform.h b/src/base/kernel/config/BaseTransform.h index 3952e22b..02b28c12 100644 --- a/src/base/kernel/config/BaseTransform.h +++ b/src/base/kernel/config/BaseTransform.h @@ -49,6 +49,7 @@ public: static void load(JsonChain &chain, Process *process, IConfigTransform &transform); protected: + void finalize(rapidjson::Document &doc) override; void transform(rapidjson::Document &doc, int key, const char *arg) override; @@ -96,6 +97,9 @@ protected: } } +protected: + Algorithm m_algorithm; + private: void transformBoolean(rapidjson::Document &doc, int key, bool enable); diff --git a/src/base/kernel/interfaces/IConfig.h b/src/base/kernel/interfaces/IConfig.h index c8189ba5..ba20a0ca 100644 --- a/src/base/kernel/interfaces/IConfig.h +++ b/src/base/kernel/interfaces/IConfig.h @@ -73,13 +73,6 @@ public: DaemonKey = 1018, DaemonPollKey = 1019, -# ifdef XMRIG_DEPRECATED - ApiPort = 4000, - ApiAccessTokenKey = 4001, - ApiIPv6Key = 4003, - ApiRestrictedKey = 4004, -# endif - // xmrig common CPUPriorityKey = 1021, NicehashKey = 1006, @@ -90,8 +83,6 @@ public: CPUAffinityKey = 1020, DryRunKey = 5000, HugePagesKey = 1009, - MaxCPUUsageKey = 1004, - SafeKey = 1005, ThreadsKey = 't', // HardwareAESKey = 1011, AssemblyKey = 1015, diff --git a/src/base/kernel/interfaces/IConfigTransform.h b/src/base/kernel/interfaces/IConfigTransform.h index f8854388..8afe8221 100644 --- a/src/base/kernel/interfaces/IConfigTransform.h +++ b/src/base/kernel/interfaces/IConfigTransform.h @@ -42,7 +42,8 @@ class IConfigTransform public: virtual ~IConfigTransform() = default; - virtual void transform(rapidjson::Document &doc, int key, const char *arg) = 0; + virtual void finalize(rapidjson::Document &doc) = 0; + virtual void transform(rapidjson::Document &doc, int key, const char *arg) = 0; }; diff --git a/src/core/config/ConfigTransform.cpp b/src/core/config/ConfigTransform.cpp index 7d313726..38bb42a1 100644 --- a/src/core/config/ConfigTransform.cpp +++ b/src/core/config/ConfigTransform.cpp @@ -23,27 +23,189 @@ */ -#include "core/config/ConfigTransform.h" #include "base/kernel/interfaces/IConfig.h" +#include "core/config/ConfigTransform.h" +#include "crypto/cn/CnHash.h" -xmrig::ConfigTransform::ConfigTransform() +namespace xmrig { + +static const char *kAffinity = "affinity"; +static const char *kAsterisk = "*"; +static const char *kCpu = "cpu"; +static const char *kIntensity = "intensity"; + + +static inline uint64_t intensity(uint64_t av) +{ + switch (av) { + case CnHash::AV_SINGLE: + case CnHash::AV_SINGLE_SOFT: + return 1; + + case CnHash::AV_DOUBLE_SOFT: + case CnHash::AV_DOUBLE: + return 2; + + case CnHash::AV_TRIPLE_SOFT: + case CnHash::AV_TRIPLE: + return 3; + + case CnHash::AV_QUAD_SOFT: + case CnHash::AV_QUAD: + return 4; + + case CnHash::AV_PENTA_SOFT: + case CnHash::AV_PENTA: + return 5; + + default: + break; + } + + return 1; +} + + +static inline bool isHwAes(uint64_t av) +{ + return av == CnHash::AV_SINGLE || av == CnHash::AV_DOUBLE || (av > CnHash::AV_DOUBLE_SOFT && av < CnHash::AV_TRIPLE_SOFT); +} + + +static inline int64_t affinity(uint64_t index, int64_t affinity) +{ + if (affinity == -1L) { + return -1L; + } + + size_t idx = 0; + + for (size_t i = 0; i < 64; i++) { + if (!(static_cast(affinity) & (1ULL << i))) { + continue; + } + + if (idx == index) { + return static_cast(i); + } + + idx++; + } + + return -1L; +} + + +} + + +xmrig::ConfigTransform::ConfigTransform() : BaseTransform() +{ +} + + +void xmrig::ConfigTransform::finalize(rapidjson::Document &doc) +{ + using namespace rapidjson; + auto &allocator = doc.GetAllocator(); + + BaseTransform::finalize(doc); + + if (m_threads) { + if (!doc.HasMember(kCpu)) { + doc.AddMember(StringRef(kCpu), Value(kObjectType), allocator); + } + + Value threads(kArrayType); + + if (m_intensity > 1) { + for (uint64_t i = 0; i < m_threads; ++i) { + Value thread(kObjectType); + thread.AddMember(StringRef(kIntensity), m_intensity, allocator); + thread.AddMember(StringRef(kAffinity), affinity(i, m_affinity), allocator); + + threads.PushBack(thread, doc.GetAllocator()); + } + } + else { + for (uint64_t i = 0; i < m_threads; ++i) { + threads.PushBack(affinity(i, m_affinity), doc.GetAllocator()); + } + } + + doc[kCpu].AddMember(StringRef(kAsterisk), threads, doc.GetAllocator()); + } } void xmrig::ConfigTransform::transform(rapidjson::Document &doc, int key, const char *arg) { BaseTransform::transform(doc, key, arg); + + switch (key) { + case IConfig::AVKey: /* --av */ + case IConfig::CPUPriorityKey: /* --cpu-priority */ + case IConfig::ThreadsKey: /* --threads */ + return transformUint64(doc, key, static_cast(strtol(arg, nullptr, 10))); + + case IConfig::HugePagesKey: /* --no-huge-pages */ + return transformBoolean(doc, key, false); + + case IConfig::CPUAffinityKey: /* --cpu-affinity */ + { + const char *p = strstr(arg, "0x"); + return transformUint64(doc, key, p ? strtoull(p, nullptr, 16) : strtoull(arg, nullptr, 10)); + } + +# ifndef XMRIG_NO_ASM + case IConfig::AssemblyKey: /* --asm */ + return set(doc, kCpu, "asm", arg); +# endif + + default: + break; + } } void xmrig::ConfigTransform::transformBoolean(rapidjson::Document &doc, int key, bool enable) { + switch (key) { + case IConfig::HugePagesKey: /* --no-huge-pages */ + return set(doc, kCpu, "huge-pages", enable); + + default: + break; + } } void xmrig::ConfigTransform::transformUint64(rapidjson::Document &doc, int key, uint64_t arg) { + using namespace rapidjson; + + switch (key) { + case IConfig::CPUAffinityKey: /* --cpu-affinity */ + m_affinity = static_cast(arg); + break; + + case IConfig::ThreadsKey: /* --threads */ + m_threads = arg; + break; + + case IConfig::AVKey: /* --av */ + m_intensity = intensity(arg); + set(doc, kCpu, "hw-aes", isHwAes(arg)); + break; + + case IConfig::CPUPriorityKey: /* --cpu-priority */ + return set(doc, kCpu, "priority", arg); + + default: + break; + } } + diff --git a/src/core/config/ConfigTransform.h b/src/core/config/ConfigTransform.h index 2d291d8f..440a7169 100644 --- a/src/core/config/ConfigTransform.h +++ b/src/core/config/ConfigTransform.h @@ -5,7 +5,8 @@ * Copyright 2014-2016 Wolf9466 * Copyright 2016 Jay D Dee * Copyright 2017-2018 XMR-Stak , - * Copyright 2016-2018 XMRig , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -37,11 +38,16 @@ public: ConfigTransform(); protected: + void finalize(rapidjson::Document &doc) override; void transform(rapidjson::Document &doc, int key, const char *arg) override; private: void transformBoolean(rapidjson::Document &doc, int key, bool enable); void transformUint64(rapidjson::Document &doc, int key, uint64_t arg); + + int64_t m_affinity = -1; + uint64_t m_intensity = 1; + uint64_t m_threads = 0; }; diff --git a/src/core/config/Config_platform.h b/src/core/config/Config_platform.h index ca06a703..85ab0b43 100644 --- a/src/core/config/Config_platform.h +++ b/src/core/config/Config_platform.h @@ -62,7 +62,6 @@ static const option options[] = { { "dry-run", 0, nullptr, IConfig::DryRunKey }, { "keepalive", 0, nullptr, IConfig::KeepAliveKey }, { "log-file", 1, nullptr, IConfig::LogFileKey }, - { "max-cpu-usage", 1, nullptr, IConfig::MaxCPUUsageKey }, { "nicehash", 0, nullptr, IConfig::NicehashKey }, { "no-color", 0, nullptr, IConfig::ColorKey }, { "no-huge-pages", 0, nullptr, IConfig::HugePagesKey }, @@ -71,7 +70,6 @@ static const option options[] = { { "print-time", 1, nullptr, IConfig::PrintTimeKey }, { "retries", 1, nullptr, IConfig::RetriesKey }, { "retry-pause", 1, nullptr, IConfig::RetryPauseKey }, - { "safe", 0, nullptr, IConfig::SafeKey }, { "syslog", 0, nullptr, IConfig::SyslogKey }, { "threads", 1, nullptr, IConfig::ThreadsKey }, { "url", 1, nullptr, IConfig::UrlKey }, @@ -84,43 +82,10 @@ static const option options[] = { { "asm", 1, nullptr, IConfig::AssemblyKey }, { "daemon", 0, nullptr, IConfig::DaemonKey }, { "daemon-poll-interval", 1, nullptr, IConfig::DaemonPollKey }, - -# ifdef XMRIG_DEPRECATED - { "api-port", 1, nullptr, IConfig::ApiPort }, - { "api-access-token", 1, nullptr, IConfig::ApiAccessTokenKey }, - { "api-no-restricted", 0, nullptr, IConfig::ApiRestrictedKey }, - { "api-ipv6", 0, nullptr, IConfig::ApiIPv6Key }, -# endif - { nullptr, 0, nullptr, 0 } }; -static struct option const config_options[] = { - { "algo", 1, nullptr, IConfig::AlgorithmKey }, - { "av", 1, nullptr, IConfig::AVKey }, - { "background", 0, nullptr, IConfig::BackgroundKey }, - { "colors", 0, nullptr, IConfig::ColorKey }, - { "cpu-affinity", 1, nullptr, IConfig::CPUAffinityKey }, - { "cpu-priority", 1, nullptr, IConfig::CPUPriorityKey }, - { "donate-level", 1, nullptr, IConfig::DonateLevelKey }, - { "donate-over-proxy", 1, nullptr, IConfig::ProxyDonateKey }, - { "dry-run", 0, nullptr, IConfig::DryRunKey }, - { "huge-pages", 0, nullptr, IConfig::HugePagesKey }, - { "log-file", 1, nullptr, IConfig::LogFileKey }, - { "max-cpu-usage", 1, nullptr, IConfig::MaxCPUUsageKey }, - { "print-time", 1, nullptr, IConfig::PrintTimeKey }, - { "retries", 1, nullptr, IConfig::RetriesKey }, - { "retry-pause", 1, nullptr, IConfig::RetryPauseKey }, - { "safe", 0, nullptr, IConfig::SafeKey }, - { "syslog", 0, nullptr, IConfig::SyslogKey }, - { "threads", 1, nullptr, IConfig::ThreadsKey }, - { "user-agent", 1, nullptr, IConfig::UserAgentKey }, - { "asm", 1, nullptr, IConfig::AssemblyKey }, - { nullptr, 0, nullptr, 0 } -}; - - } // namespace xmrig diff --git a/src/core/config/usage.h b/src/core/config/usage.h index 42cbc24a..ec3dd589 100644 --- a/src/core/config/usage.h +++ b/src/core/config/usage.h @@ -87,8 +87,6 @@ Options:\n\ -S, --syslog use system log for output messages\n" # endif "\ - --max-cpu-usage=N maximum CPU usage for automatic threads mode (default: 100)\n\ - --safe safe adjust threads and av settings for current CPU\n\ --asm=ASM ASM optimizations, possible values: auto, none, intel, ryzen, bulldozer.\n\ --print-time=N print hashrate report every N seconds\n" #ifdef XMRIG_FEATURE_HTTP From d2ca254789f9ec8eb3023b5bcc6905e11f243376 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 20 Jul 2019 01:20:50 +0700 Subject: [PATCH 046/172] Disable rx/0 algorithm. --- src/backend/common/Threads.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/common/Threads.h b/src/backend/common/Threads.h index bc9e36fd..126245f6 100644 --- a/src/backend/common/Threads.h +++ b/src/backend/common/Threads.h @@ -43,7 +43,7 @@ class Threads { public: inline bool has(const char *profile) const { return m_profiles.count(profile) > 0; } - inline bool isDisabled(const Algorithm &algo) const { return m_disabled.count(algo) > 0; } + inline bool isDisabled(const Algorithm &algo) const { return m_disabled.count(algo) > 0 || algo == Algorithm::RX_0; } inline bool isExist(const Algorithm &algo) const { return isDisabled(algo) || m_aliases.count(algo) > 0 || has(algo.shortName()); } inline const std::vector &get(const Algorithm &algo, bool strict = false) const { return get(profileName(algo, strict)); } inline void disable(const Algorithm &algo) { m_disabled.insert(algo); } From dc2c0552e082fd8787c94a0ca1918591943f1ef7 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 20 Jul 2019 01:43:36 +0700 Subject: [PATCH 047/172] Moved current valid algorithm to first position in algo list. --- src/net/Network.cpp | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/net/Network.cpp b/src/net/Network.cpp index 5ee00388..547a8638 100644 --- a/src/net/Network.cpp +++ b/src/net/Network.cpp @@ -27,7 +27,9 @@ #pragma warning(disable:4244) #endif +#include #include +#include #include #include @@ -154,14 +156,23 @@ void xmrig::Network::onJobResult(const JobResult &result) } -void xmrig::Network::onLogin(IStrategy *, IClient *, rapidjson::Document &doc, rapidjson::Value ¶ms) +void xmrig::Network::onLogin(IStrategy *, IClient *client, rapidjson::Document &doc, rapidjson::Value ¶ms) { using namespace rapidjson; auto &allocator = doc.GetAllocator(); + Algorithms algorithms = m_controller->miner()->algorithms(); + const Algorithm algorithm = client->pool().algorithm(); + if (algorithm.isValid()) { + const size_t index = static_cast(std::distance(algorithms.begin(), std::find(algorithms.begin(), algorithms.end(), algorithm))); + if (index > 0 && index < algorithms.size()) { + std::swap(algorithms[0], algorithms[index]); + } + } + Value algo(kArrayType); - for (const auto &a : m_controller->miner()->algorithms()) { + for (const auto &a : algorithms) { algo.PushBack(StringRef(a.shortName()), allocator); } From 3fb180f04e9108925293a6940f0d052ee59176c6 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 20 Jul 2019 05:24:21 +0700 Subject: [PATCH 048/172] Removed --variant option, use --algo instead. --- src/base/kernel/config/BaseTransform.cpp | 15 ++++++++++----- src/base/kernel/interfaces/IConfig.h | 1 - src/core/config/Config_platform.h | 1 - src/core/config/usage.h | 19 ++++++++++++++----- 4 files changed, 24 insertions(+), 12 deletions(-) diff --git a/src/base/kernel/config/BaseTransform.cpp b/src/base/kernel/config/BaseTransform.cpp index 554565a5..8043c6e9 100644 --- a/src/base/kernel/config/BaseTransform.cpp +++ b/src/base/kernel/config/BaseTransform.cpp @@ -44,6 +44,7 @@ namespace xmrig { +static const char *kAlgo = "algo"; static const char *kApi = "api"; static const char *kHttp = "http"; static const char *kPools = "pools"; @@ -100,7 +101,9 @@ void xmrig::BaseTransform::finalize(rapidjson::Document &doc) if (m_algorithm.isValid() && doc.HasMember(kPools)) { auto &pools = doc[kPools]; for (Value &pool : pools.GetArray()) { - pool.AddMember(StringRef("algo"), m_algorithm.toJSON(), allocator); + if (!pool.HasMember(kAlgo)) { + pool.AddMember(StringRef(kAlgo), m_algorithm.toJSON(), allocator); + } } } } @@ -110,7 +113,12 @@ void xmrig::BaseTransform::transform(rapidjson::Document &doc, int key, const ch { switch (key) { case IConfig::AlgorithmKey: /* --algo */ - m_algorithm = arg; + if (!doc.HasMember(kPools)) { + m_algorithm = arg; + } + else { + return add(doc, kPools, kAlgo, arg); + } break; case IConfig::UserpassKey: /* --userpass */ @@ -144,9 +152,6 @@ void xmrig::BaseTransform::transform(rapidjson::Document &doc, int key, const ch case IConfig::FingerprintKey: /* --tls-fingerprint */ return add(doc, kPools, "tls-fingerprint", arg); - case IConfig::VariantKey: /* --variant */ - return add(doc, kPools, "variant", arg); - case IConfig::LogFileKey: /* --log-file */ return set(doc, "log-file", arg); diff --git a/src/base/kernel/interfaces/IConfig.h b/src/base/kernel/interfaces/IConfig.h index ba20a0ca..2697bf01 100644 --- a/src/base/kernel/interfaces/IConfig.h +++ b/src/base/kernel/interfaces/IConfig.h @@ -65,7 +65,6 @@ public: UserAgentKey = 1008, UserKey = 'u', UserpassKey = 'O', - VariantKey = 1010, VerboseKey = 1100, TlsKey = 1013, FingerprintKey = 1014, diff --git a/src/core/config/Config_platform.h b/src/core/config/Config_platform.h index 85ab0b43..fdd15c96 100644 --- a/src/core/config/Config_platform.h +++ b/src/core/config/Config_platform.h @@ -65,7 +65,6 @@ static const option options[] = { { "nicehash", 0, nullptr, IConfig::NicehashKey }, { "no-color", 0, nullptr, IConfig::ColorKey }, { "no-huge-pages", 0, nullptr, IConfig::HugePagesKey }, - { "variant", 1, nullptr, IConfig::VariantKey }, { "pass", 1, nullptr, IConfig::PasswordKey }, { "print-time", 1, nullptr, IConfig::PrintTimeKey }, { "retries", 1, nullptr, IConfig::RetriesKey }, diff --git a/src/core/config/usage.h b/src/core/config/usage.h index ec3dd589..2d0d5623 100644 --- a/src/core/config/usage.h +++ b/src/core/config/usage.h @@ -36,18 +36,28 @@ static char const usage[] = "\ Usage: " APP_ID " [OPTIONS]\n\ Options:\n\ -a, --algo=ALGO specify the algorithm to use\n\ - cryptonight\n" + cn/r, cn/2, cn/1, cn/0, cn/double, cn/half, cn/fast,\n\ + cn/rwz, cn/zls, cn/xao, cn/rto" +#ifdef XMRIG_ALGO_CN_GPU +", cn/gpu,\n" +#else +",\n" +#endif #ifdef XMRIG_ALGO_CN_LITE "\ - cryptonight-lite\n" + cn-lite/1,\n" #endif #ifdef XMRIG_ALGO_CN_HEAVY "\ - cryptonight-heavy\n" + cn-heavy/xhv, cn-heavy/tube, cn-heavy/0,\n" #endif #ifdef XMRIG_ALGO_CN_PICO "\ - cryptonight-pico\n" + cn-pico,\n" +#endif +#ifdef XMRIG_ALGO_RANDOMX +"\ + rx/wow, rx/loki\n" #endif "\ -o, --url=URL URL of mining server\n\ @@ -76,7 +86,6 @@ Options:\n\ --cpu-priority set process priority (0 idle, 2 normal to 5 highest)\n\ --no-huge-pages disable huge pages support\n\ --no-color disable colored output\n\ - --variant algorithm PoW variant\n\ --donate-level=N donate level, default 5%% (5 minutes in 100 minutes)\n\ --user-agent set custom user-agent string for pool\n\ -B, --background run the miner in the background\n\ From 71300fa852917a082255af394687873351fccc66 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 20 Jul 2019 05:35:40 +0700 Subject: [PATCH 049/172] v2.99.0-evo --- src/version.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/version.h b/src/version.h index 7e849c7e..309f8873 100644 --- a/src/version.h +++ b/src/version.h @@ -28,15 +28,15 @@ #define APP_ID "xmrig" #define APP_NAME "XMRig" #define APP_DESC "XMRig CPU miner" -#define APP_VERSION "2.16.1-evo" +#define APP_VERSION "2.99.0-evo" #define APP_DOMAIN "xmrig.com" #define APP_SITE "www.xmrig.com" #define APP_COPYRIGHT "Copyright (C) 2016-2019 xmrig.com" #define APP_KIND "cpu" #define APP_VER_MAJOR 2 -#define APP_VER_MINOR 16 -#define APP_VER_PATCH 1 +#define APP_VER_MINOR 99 +#define APP_VER_PATCH 0 #ifdef _MSC_VER # if (_MSC_VER >= 1920) From 3d7598b28dd0fdb25279528452fc7f525b232d7d Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 20 Jul 2019 05:57:58 +0700 Subject: [PATCH 050/172] Updated default config example. --- src/config.json | 28 +++++++++------------------- src/core/config/Config_default.h | 28 +++++++++------------------- 2 files changed, 18 insertions(+), 38 deletions(-) diff --git a/src/config.json b/src/config.json index 53df5486..c5ae0676 100644 --- a/src/config.json +++ b/src/config.json @@ -1,9 +1,14 @@ { - "algo": "cryptonight", "api": { "id": null, "worker-id": null }, + "autosave": true, + "background": false, + "colors": true, + "cpu": null, + "donate-level": 5, + "donate-over-proxy": 1, "http": { "enabled": false, "host": "127.0.0.1", @@ -11,41 +16,26 @@ "access-token": null, "restricted": true }, - "asm": true, - "autosave": true, - "av": 0, - "background": false, - "colors": true, - "cpu-affinity": null, - "cpu-priority": null, - "donate-level": 5, - "donate-over-proxy": 1, - "huge-pages": true, - "hw-aes": null, "log-file": null, - "max-cpu-usage": 100, "pools": [ { + "algo": null, "url": "donate.v2.xmrig.com:3333", "user": "YOUR_WALLET_ADDRESS", "pass": "x", "rig-id": null, "nicehash": false, "keepalive": false, - "variant": -1, "enabled": true, "tls": false, "tls-fingerprint": null, - "daemon": false, - "daemon-poll-interval": 1000 + "daemon": false } ], "print-time": 60, "retries": 5, "retry-pause": 5, - "safe": false, - "threads": null, - "user-agent": null, "syslog": false, + "user-agent": null, "watch": true } \ No newline at end of file diff --git a/src/core/config/Config_default.h b/src/core/config/Config_default.h index d6145cf4..2ca0bda5 100644 --- a/src/core/config/Config_default.h +++ b/src/core/config/Config_default.h @@ -33,11 +33,16 @@ namespace xmrig { const static char *default_config = R"===( { - "algo": "cryptonight", "api": { "id": null, "worker-id": null }, + "autosave": true, + "background": false, + "colors": true, + "cpu": null, + "donate-level": 5, + "donate-over-proxy": 1, "http": { "enabled": false, "host": "127.0.0.1", @@ -45,42 +50,27 @@ R"===( "access-token": null, "restricted": true }, - "asm": true, - "autosave": true, - "av": 0, - "background": false, - "colors": true, - "cpu-affinity": null, - "cpu-priority": null, - "donate-level": 5, - "donate-over-proxy": 1, - "huge-pages": true, - "hw-aes": null, "log-file": null, - "max-cpu-usage": 100, "pools": [ { + "algo": null, "url": "donate.v2.xmrig.com:3333", "user": "YOUR_WALLET_ADDRESS", "pass": "x", "rig-id": null, "nicehash": false, "keepalive": false, - "variant": -1, "enabled": true, "tls": false, "tls-fingerprint": null, - "daemon": false, - "daemon-poll-interval": 1000 + "daemon": false } ], "print-time": 60, "retries": 5, "retry-pause": 5, - "safe": false, - "threads": null, - "user-agent": null, "syslog": false, + "user-agent": null, "watch": true } )==="; From e2a5bfa0b40b0aca09f41ddd052c3c3ffdc0950f Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 20 Jul 2019 16:20:11 +0700 Subject: [PATCH 051/172] Better default config. --- src/config.json | 10 +++++++++- src/core/config/Config_default.h | 10 +++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/src/config.json b/src/config.json index c5ae0676..57b2984b 100644 --- a/src/config.json +++ b/src/config.json @@ -6,7 +6,15 @@ "autosave": true, "background": false, "colors": true, - "cpu": null, + "cpu": { + "enabled": true, + "huge-pages": true, + "hw-aes": null, + "priority": null, + "asm": true, + "cn/0": false, + "cn-lite/0": false + }, "donate-level": 5, "donate-over-proxy": 1, "http": { diff --git a/src/core/config/Config_default.h b/src/core/config/Config_default.h index 2ca0bda5..06c29566 100644 --- a/src/core/config/Config_default.h +++ b/src/core/config/Config_default.h @@ -40,7 +40,15 @@ R"===( "autosave": true, "background": false, "colors": true, - "cpu": null, + "cpu": { + "enabled": true, + "huge-pages": true, + "hw-aes": null, + "priority": null, + "asm": true, + "cn/0": false, + "cn-lite/0": false + }, "donate-level": 5, "donate-over-proxy": 1, "http": { From c4388fa74c0e33bb2e0e069f0ceeee42411ba6ab Mon Sep 17 00:00:00 2001 From: xmrig Date: Sat, 20 Jul 2019 20:14:34 +0700 Subject: [PATCH 052/172] Update ALGORITHMS.md --- doc/ALGORITHMS.md | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/doc/ALGORITHMS.md b/doc/ALGORITHMS.md index 835a1d49..cc112e5c 100644 --- a/doc/ALGORITHMS.md +++ b/doc/ALGORITHMS.md @@ -1,22 +1,14 @@ # Algorithms -XMRig uses a different way to specify algorithms, compared to other miners. - -Algorithm selection splitted to 2 parts: - - * Global base algorithm per miner or proxy instance, `algo` option. Possible values: `cryptonight`, `cryptonight-lite`, `cryptonight-heavy`. - * Algorithm variant specified separately for each pool, `variant` option. - * [Full table for supported algorithm and variants.](https://github.com/xmrig/xmrig-proxy/blob/master/doc/STRATUM_EXT.md#14-algorithm-names-and-variants) +Since version 3 mining algorithm should specified for each pool separately (`algo` option), earlier versions was use one global `algo` option and per pool `variant` option (this option was removed in v3). If your pool support [mining algorithm negotiation](https://github.com/xmrig/xmrig-proxy/issues/168) you may not specify this option at all. #### Example ```json { - "algo": "cryptonight", - ... "pools": [ { "url": "...", - "variant": 1, + "algo": "cn/r", ... } ], @@ -24,8 +16,7 @@ Algorithm selection splitted to 2 parts: } ``` -## Mining algorithm negotiation -If your pool support [mining algorithm negotiation](https://github.com/xmrig/xmrig-proxy/issues/168) miner will choice proper variant automaticaly and if you choice wrong base algorithm you will see error message. +#### Pools with mining algorithm negotiation support. -Pools with mining algorithm negotiation support. * [www.hashvault.pro](https://www.hashvault.pro/) + * [moneroocean.stream](https://moneroocean.stream) From 484253bf68ecddba6f82796b163469dbca31b7c3 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 20 Jul 2019 20:57:37 +0700 Subject: [PATCH 053/172] Updated algorithms descriptions. --- doc/ALGORITHMS.md | 27 +++++++++++++++++++++++++++ src/crypto/common/Algorithm.h | 2 +- 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/doc/ALGORITHMS.md b/doc/ALGORITHMS.md index cc112e5c..9272ae81 100644 --- a/doc/ALGORITHMS.md +++ b/doc/ALGORITHMS.md @@ -20,3 +20,30 @@ Since version 3 mining algorithm should specified for each pool separately (`alg * [www.hashvault.pro](https://www.hashvault.pro/) * [moneroocean.stream](https://moneroocean.stream) + + ## Algorithm names + +| Name | Memory | Notes | +|-----------------|--------|--------------------------------------------------------------------------------------| +| `cn/0` | 2 MB | CryptoNight (original) | +| `cn/1` | 2 MB | CryptoNight variant 1 also known as `Monero7` and `CryptoNightV7`. | +| `cn/2` | 2 MB | CryptoNight variant 2. | +| `cn/r` | 2 MB | CryptoNightR (Monero's variant 4). | +| `cn/wow` | 2 MB | CryptoNightR (Wownero). | +| `cn/fast` | 2 MB | CryptoNight variant 1 with half iterations. | +| `cn/half` | 2 MB | CryptoNight variant 2 with half iterations (Masari/Torque) | +| `cn/xao` | 2 MB | CryptoNight variant 0 (modified, Alloy only) | +| `cn/rto` | 2 MB | CryptoNight variant 1 (modified, Arto only) | +| `cn/rwz` | 2 MB | CryptoNight variant 2 with 3/4 iterations and reversed shuffle operation (Graft). | +| `cn/zls` | 2 MB | CryptoNight variant 2 with 3/4 iterations (Zelerius). | +| `cn/double` | 2 MB | CryptoNight variant 2 with double iterations (X-CASH). | +| `cn/gpu` | 2 MB | CryptoNight-GPU (RYO). | +| `cn-lite/0` | 1 MB | CryptoNight-Lite variant 0. | +| `cn-lite/1` | 1 MB | CryptoNight-Lite variant 1. | +| `cn-heavy/0` | 4 MB | CryptoNight-Heavy . | +| `cn-heavy/xhv` | 4 MB | CryptoNight-Heavy (modified, TUBE only). | +| `cn-heavy/tube` | 4 MB | CryptoNight-Heavy (modified, Haven Protocol only). | +| `cn-pico` | 256 KB | TurtleCoin (TRTL) | +| `rx/0` | 2 MB | RandomX (reference configuration), reserved for future use. | +| `rx/wow` | 1 MB | RandomWOW (Wownero). | +| `rx/loki` | 2 MB | RandomXL (Loki). | \ No newline at end of file diff --git a/src/crypto/common/Algorithm.h b/src/crypto/common/Algorithm.h index a1d8ded2..3c44ec56 100644 --- a/src/crypto/common/Algorithm.h +++ b/src/crypto/common/Algorithm.h @@ -41,7 +41,7 @@ class Algorithm public: enum Id : int { INVALID = -1, - CN_0, // "cn/0" Original CryptoNight + CN_0, // "cn/0" CryptoNight (original) CN_1, // "cn/1" CryptoNight variant 1 also known as Monero7 and CryptoNightV7 CN_2, // "cn/2" CryptoNight variant 2 CN_R, // "cn/r" CryptoNightR (Monero's variant 4) From d1705a7f74621349737cf4a32a5041f545601afc Mon Sep 17 00:00:00 2001 From: xmrig Date: Sat, 20 Jul 2019 20:59:42 +0700 Subject: [PATCH 054/172] Update ALGORITHMS.md --- doc/ALGORITHMS.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/ALGORITHMS.md b/doc/ALGORITHMS.md index 9272ae81..07f92271 100644 --- a/doc/ALGORITHMS.md +++ b/doc/ALGORITHMS.md @@ -1,6 +1,6 @@ # Algorithms -Since version 3 mining algorithm should specified for each pool separately (`algo` option), earlier versions was use one global `algo` option and per pool `variant` option (this option was removed in v3). If your pool support [mining algorithm negotiation](https://github.com/xmrig/xmrig-proxy/issues/168) you may not specify this option at all. +Since version 3 mining [algorithm](#algorithm-names) should specified for each pool separately (`algo` option), earlier versions was use one global `algo` option and per pool `variant` option (this option was removed in v3). If your pool support [mining algorithm negotiation](https://github.com/xmrig/xmrig-proxy/issues/168) you may not specify this option at all. #### Example ```json @@ -46,4 +46,4 @@ Since version 3 mining algorithm should specified for each pool separately (`alg | `cn-pico` | 256 KB | TurtleCoin (TRTL) | | `rx/0` | 2 MB | RandomX (reference configuration), reserved for future use. | | `rx/wow` | 1 MB | RandomWOW (Wownero). | -| `rx/loki` | 2 MB | RandomXL (Loki). | \ No newline at end of file +| `rx/loki` | 2 MB | RandomXL (Loki). | From 83f0f2bcadc7f52e4cce6a70b6c8b2cf541b0564 Mon Sep 17 00:00:00 2001 From: xmrig Date: Sat, 20 Jul 2019 23:16:23 +0700 Subject: [PATCH 055/172] Create CPU.md --- doc/CPU.md | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 doc/CPU.md diff --git a/doc/CPU.md b/doc/CPU.md new file mode 100644 index 00000000..7dd7a00b --- /dev/null +++ b/doc/CPU.md @@ -0,0 +1,56 @@ +# CPU backend + +All CPU related settings contains in one `cpu` object in config file, CPU backend allow specify multiple profiles and allow switch between them without restrictions. Default auto-configuration create reasonable minimum of profiles which cover all supported algorithms. + +### Example + +Example below demonstrate all primary ideas of flexible profiles configuration: + +* `"rx/wow"` Exact match to algorithm `rx/wow`, defined 4 threads without CPU affinity. +* `"cn"` Default failback profile for all `cn/*` algorithms, defined 2 threads with CPU affinity, another failback profiles is `cn-lite`, `cn-heavy` and `rx`. +* `"cn-lite"` Default failback profile for all `cn-lite/*` algorithms, defined 2 double threads with CPU affinity. +* `"custom-profile"` Custom user defined profile. +* `"*"` Failback profile for all unhandled by other profiles algorithms. +* `"cn/r"` Exact match, alias to profile `custom-profile`. +* `"cn/0"` Exact match, disabled algorithm. + +```json +{ + "cpu": { + "enabled": true, + "huge-pages": true, + "hw-aes": null, + "priority": null, + "asm": true, + "rx/wow": [ + -1, + -1, + -1, + -1, + ], + "cn": [ + 0, + 2 + ], + "cn-lite": [ + { + "intensity": 2, + "affinity": 0 + }, + { + "intensity": 2, + "affinity": 2 + } + ], + "custom-profile": [ + 0, + 2, + ], + "*": [ + -1 + ], + "cn/r": "custom-profile", + "cn/0": false + } +} +``` From 7119e4c64cc1ab3dca646aa6eeadf5097790f22d Mon Sep 17 00:00:00 2001 From: xmrig Date: Sat, 20 Jul 2019 23:23:13 +0700 Subject: [PATCH 056/172] Update CPU.md --- doc/CPU.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/CPU.md b/doc/CPU.md index 7dd7a00b..ba662d2c 100644 --- a/doc/CPU.md +++ b/doc/CPU.md @@ -54,3 +54,7 @@ Example below demonstrate all primary ideas of flexible profiles configuration: } } ``` + +### Intensity +This option was known as `low_power_mode`, possible values is range from 1 to 5, for convinient if value 1 used, possible omit this option and specify CPU thread config by only one number: CPU affinity, instead of object. + From c7ba4f8f2fc24391549dc42058909a87c90be2c7 Mon Sep 17 00:00:00 2001 From: xmrig Date: Sat, 20 Jul 2019 23:33:04 +0700 Subject: [PATCH 057/172] Update CPU.md --- doc/CPU.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/CPU.md b/doc/CPU.md index ba662d2c..82c0a752 100644 --- a/doc/CPU.md +++ b/doc/CPU.md @@ -1,6 +1,6 @@ # CPU backend -All CPU related settings contains in one `cpu` object in config file, CPU backend allow specify multiple profiles and allow switch between them without restrictions. Default auto-configuration create reasonable minimum of profiles which cover all supported algorithms. +All CPU related settings contains in one `cpu` object in config file, CPU backend allow specify multiple profiles and allow switch between them without restrictions by pool request or config change. Default auto-configuration create reasonable minimum of profiles which cover all supported algorithms. ### Example From 162c3f3d325eef8027d2cd2f67b799fe491491d4 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sun, 21 Jul 2019 00:08:13 +0700 Subject: [PATCH 058/172] Only intensity=1 allowed for RandomX. --- src/backend/cpu/CpuWorker.cpp | 2 +- src/crypto/common/Algorithm.h | 42 +++++++++++++++++------------------ 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/src/backend/cpu/CpuWorker.cpp b/src/backend/cpu/CpuWorker.cpp index 14ffaa73..8eb4cdb1 100644 --- a/src/backend/cpu/CpuWorker.cpp +++ b/src/backend/cpu/CpuWorker.cpp @@ -94,7 +94,7 @@ bool xmrig::CpuWorker::selfTest() { # ifdef XMRIG_ALGO_RANDOMX if (m_algorithm.family() == Algorithm::RANDOM_X) { - return true; + return N == 1; } # endif diff --git a/src/crypto/common/Algorithm.h b/src/crypto/common/Algorithm.h index 3c44ec56..b30a946b 100644 --- a/src/crypto/common/Algorithm.h +++ b/src/crypto/common/Algorithm.h @@ -41,37 +41,37 @@ class Algorithm public: enum Id : int { INVALID = -1, - CN_0, // "cn/0" CryptoNight (original) - CN_1, // "cn/1" CryptoNight variant 1 also known as Monero7 and CryptoNightV7 - CN_2, // "cn/2" CryptoNight variant 2 - CN_R, // "cn/r" CryptoNightR (Monero's variant 4) - CN_WOW, // "cn/wow" CryptoNightR (Wownero) - CN_FAST, // "cn/fast" CryptoNight variant 1 with half iterations - CN_HALF, // "cn/half" CryptoNight variant 2 with half iterations (Masari/Stellite) - CN_XAO, // "cn/xao" Modified CryptoNight variant 0 (Alloy only) - CN_RTO, // "cn/rto" Modified CryptoNight variant 1 (Arto only) - CN_RWZ, // "cn/rwz" CryptoNight variant 2 with 3/4 iterations and reversed shuffle operation (Graft) - CN_ZLS, // "cn/zls" CryptoNight variant 2 with 3/4 iterations (Zelerius) - CN_DOUBLE, // "cn/double" CryptoNight variant 2 with double iterations (X-CASH) + CN_0, // "cn/0" CryptoNight (original). + CN_1, // "cn/1" CryptoNight variant 1 also known as Monero7 and CryptoNightV7. + CN_2, // "cn/2" CryptoNight variant 2. + CN_R, // "cn/r" CryptoNightR (Monero's variant 4). + CN_WOW, // "cn/wow" CryptoNightR (Wownero). + CN_FAST, // "cn/fast" CryptoNight variant 1 with half iterations. + CN_HALF, // "cn/half" CryptoNight variant 2 with half iterations (Masari/Torque). + CN_XAO, // "cn/xao" CryptoNight variant 0 (modified, Alloy only). + CN_RTO, // "cn/rto" CryptoNight variant 1 (modified, Arto only). + CN_RWZ, // "cn/rwz" CryptoNight variant 2 with 3/4 iterations and reversed shuffle operation (Graft). + CN_ZLS, // "cn/zls" CryptoNight variant 2 with 3/4 iterations (Zelerius). + CN_DOUBLE, // "cn/double" CryptoNight variant 2 with double iterations (X-CASH). # ifdef XMRIG_ALGO_CN_GPU - CN_GPU, // "cn/gpu" CryptoNight-GPU (Ryo) + CN_GPU, // "cn/gpu" CryptoNight-GPU (Ryo). # endif # ifdef XMRIG_ALGO_CN_LITE - CN_LITE_0, // "cn-lite/0" CryptoNight-Lite (1 MB) variant 0 - CN_LITE_1, // "cn-lite/1" CryptoNight-Lite (1 MB) variant 1 + CN_LITE_0, // "cn-lite/0" CryptoNight-Lite variant 0. + CN_LITE_1, // "cn-lite/1" CryptoNight-Lite variant 1. # endif # ifdef XMRIG_ALGO_CN_HEAVY - CN_HEAVY_0, // "cn-heavy/0" CryptoNight-Heavy (4 MB) - CN_HEAVY_TUBE, // "cn-heavy/tube" Modified CryptoNight-Heavy (TUBE only) - CN_HEAVY_XHV, // "cn-heavy/xhv" Modified CryptoNight-Heavy (Haven Protocol only) + CN_HEAVY_0, // "cn-heavy/0" CryptoNight-Heavy (4 MB). + CN_HEAVY_TUBE, // "cn-heavy/tube" CryptoNight-Heavy (modified, TUBE only). + CN_HEAVY_XHV, // "cn-heavy/xhv" CryptoNight-Heavy (modified, Haven Protocol only). # endif # ifdef XMRIG_ALGO_CN_PICO CN_PICO_0, // "cn-pico" CryptoNight Turtle (TRTL) # endif # ifdef XMRIG_ALGO_RANDOMX - RX_0, // "rx/0" RandomX (reference configuration) - RX_WOW, // "rx/wow" RandomWOW (Wownero) - RX_LOKI, // "rx/loki" RandomXL (Loki) + RX_0, // "rx/0" RandomX (reference configuration). + RX_WOW, // "rx/wow" RandomWOW (Wownero). + RX_LOKI, // "rx/loki" RandomXL (Loki). # endif MAX }; From 9660dfc7b3eff98495e0b379f30e62d4ef824504 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sun, 21 Jul 2019 04:35:52 +0700 Subject: [PATCH 059/172] Workaround for unsupported intensity option + warning. --- src/backend/common/Threads.h | 2 +- src/backend/common/Workers.cpp | 82 +++++++++++++++++++++++----------- src/backend/common/Workers.h | 3 +- 3 files changed, 60 insertions(+), 27 deletions(-) diff --git a/src/backend/common/Threads.h b/src/backend/common/Threads.h index 126245f6..bc9e36fd 100644 --- a/src/backend/common/Threads.h +++ b/src/backend/common/Threads.h @@ -43,7 +43,7 @@ class Threads { public: inline bool has(const char *profile) const { return m_profiles.count(profile) > 0; } - inline bool isDisabled(const Algorithm &algo) const { return m_disabled.count(algo) > 0 || algo == Algorithm::RX_0; } + inline bool isDisabled(const Algorithm &algo) const { return m_disabled.count(algo) > 0; } inline bool isExist(const Algorithm &algo) const { return isDisabled(algo) || m_aliases.count(algo) > 0 || has(algo.shortName()); } inline const std::vector &get(const Algorithm &algo, bool strict = false) const { return get(profileName(algo, strict)); } inline void disable(const Algorithm &algo) { m_disabled.insert(algo); } diff --git a/src/backend/common/Workers.cpp b/src/backend/common/Workers.cpp index d70546d3..6a369c1b 100644 --- a/src/backend/common/Workers.cpp +++ b/src/backend/common/Workers.cpp @@ -137,52 +137,84 @@ void xmrig::Workers::tick(uint64_t) template -void xmrig::Workers::onReady(void *) +xmrig::IWorker *xmrig::Workers::create(Thread *) { + return nullptr; +} + + +template +void xmrig::Workers::onReady(void *arg) +{ + Thread *handle = static_cast* >(arg); + + IWorker *worker = create(handle); + if (!worker || !worker->selfTest()) { + LOG_ERR("thread %zu error: \"hash self-test failed\".", worker->id()); + + return; + } + + handle->setWorker(worker); + handle->backend()->start(worker); } namespace xmrig { -template<> -void xmrig::Workers::onReady(void *arg) +#if defined (XMRIG_ALGO_RANDOMX) || defined (XMRIG_ALGO_CN_GPU) +static void printIntensityWarning(Thread *handle) { - auto handle = static_cast* >(arg); + LOG_WARN("CPU thread %zu warning: \"intensity %d not supported for %s algorithm\".", handle->index(), handle->config().intensity, handle->config().algorithm.shortName()); +} +#endif - IWorker *worker = nullptr; - switch (handle->config().intensity) { +template<> +xmrig::IWorker *xmrig::Workers::create(Thread *handle) +{ + const int intensity = handle->config().intensity; + +# if defined (XMRIG_ALGO_RANDOMX) || defined (XMRIG_ALGO_CN_GPU) + if (intensity > 1) { +# ifdef XMRIG_ALGO_RANDOMX + if (handle->config().algorithm.family() == Algorithm::RANDOM_X) { + printIntensityWarning(handle); + + return new CpuWorker<1>(handle->index(), handle->config()); + } +# endif + +# ifdef XMRIG_ALGO_CN_GPU + if (handle->config().algorithm == Algorithm::CN_GPU) { + printIntensityWarning(handle); + + return new CpuWorker<1>(handle->index(), handle->config()); + } +# endif + } +# endif + + + switch (intensity) { case 1: - worker = new CpuWorker<1>(handle->index(), handle->config()); - break; + return new CpuWorker<1>(handle->index(), handle->config()); case 2: - worker = new CpuWorker<2>(handle->index(), handle->config()); - break; + return new CpuWorker<2>(handle->index(), handle->config()); case 3: - worker = new CpuWorker<3>(handle->index(), handle->config()); - break; + return new CpuWorker<3>(handle->index(), handle->config()); case 4: - worker = new CpuWorker<4>(handle->index(), handle->config()); - break; + return new CpuWorker<4>(handle->index(), handle->config()); case 5: - worker = new CpuWorker<5>(handle->index(), handle->config()); - break; + return new CpuWorker<5>(handle->index(), handle->config()); } - handle->setWorker(worker); - - if (!worker->selfTest()) { - LOG_ERR("thread %zu error: \"hash self-test failed\".", handle->worker()->id()); - - return; - } - - handle->backend()->start(worker); + return nullptr; } diff --git a/src/backend/common/Workers.h b/src/backend/common/Workers.h index 32d9458a..77dd434c 100644 --- a/src/backend/common/Workers.h +++ b/src/backend/common/Workers.h @@ -52,6 +52,7 @@ public: void tick(uint64_t ticks); private: + static IWorker *create(Thread *handle); static void onReady(void *arg); std::vector *> m_workers; @@ -60,7 +61,7 @@ private: template<> -void Workers::onReady(void *arg); +IWorker *Workers::create(Thread *handle); extern template class Workers; From 2d719a28c467ee71e67732fc9f457962f78129cd Mon Sep 17 00:00:00 2001 From: xmrig Date: Sun, 21 Jul 2019 23:57:07 +0700 Subject: [PATCH 060/172] Update CHANGELOG.md --- CHANGELOG.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 25fc9ad2..4f6778ac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +# v2.99.0-beta +- [#1050](https://github.com/xmrig/xmrig/pull/1050) Added RandomXL algorithm for [Loki](https://loki.network/), algorithm name used by miner is `randomx/loki` or `rx/loki`. +- Added [flexible](https://github.com/xmrig/xmrig/blob/evo/doc/CPU.md) multi algorithm configuration. +- Added unlimited switching between incompatible algorithms, all mining options can be changed in runtime. +- Breaked backward compatibility with previous configs and command line, `variant` option replaced to `algo`, global option `algo` removed, all CPU related settings moved to `cpu` object. +- Options `av`, `safe` and `max-cpu-usage` removed. +- Algorithm `cn/msr` renamed to `cn/fast`. +- Algorithm `cn/xtl` removed. +- API endpoint `GET /1/threads` replaced to `GET /2/backends`. + # v2.16.0-beta - [#1036](https://github.com/xmrig/xmrig/pull/1036) Added RandomWOW (RandomX with different preferences) algorithm support for [Wownero](http://wownero.org/). - Algorithm name used by miner is `randomx/wow` or `rx/wow`. From 8600d8ee96f1ab3863ed319c88bcf53a588b8153 Mon Sep 17 00:00:00 2001 From: XMRig Date: Mon, 22 Jul 2019 00:18:30 +0700 Subject: [PATCH 061/172] v2.99.0-beta --- src/version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/version.h b/src/version.h index 309f8873..7ac08093 100644 --- a/src/version.h +++ b/src/version.h @@ -28,7 +28,7 @@ #define APP_ID "xmrig" #define APP_NAME "XMRig" #define APP_DESC "XMRig CPU miner" -#define APP_VERSION "2.99.0-evo" +#define APP_VERSION "2.99.0-beta" #define APP_DOMAIN "xmrig.com" #define APP_SITE "www.xmrig.com" #define APP_COPYRIGHT "Copyright (C) 2016-2019 xmrig.com" From 02c03b0465e34b1430c440d095d1299e5709bd91 Mon Sep 17 00:00:00 2001 From: XMRig Date: Mon, 22 Jul 2019 15:02:51 +0700 Subject: [PATCH 062/172] v2.99.1-evo --- src/version.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/version.h b/src/version.h index 7ac08093..ef5d82e0 100644 --- a/src/version.h +++ b/src/version.h @@ -28,7 +28,7 @@ #define APP_ID "xmrig" #define APP_NAME "XMRig" #define APP_DESC "XMRig CPU miner" -#define APP_VERSION "2.99.0-beta" +#define APP_VERSION "2.99.1-evo" #define APP_DOMAIN "xmrig.com" #define APP_SITE "www.xmrig.com" #define APP_COPYRIGHT "Copyright (C) 2016-2019 xmrig.com" @@ -36,7 +36,7 @@ #define APP_VER_MAJOR 2 #define APP_VER_MINOR 99 -#define APP_VER_PATCH 0 +#define APP_VER_PATCH 1 #ifdef _MSC_VER # if (_MSC_VER >= 1920) From b02e5968534468bbc7ecc45b0fe66aa0ed31abbf Mon Sep 17 00:00:00 2001 From: XMRig Date: Tue, 23 Jul 2019 00:40:24 +0700 Subject: [PATCH 063/172] Strip extra spaces from CPU brand string and improved BasicCpuInfo. --- src/Summary.cpp | 2 +- src/backend/cpu/interfaces/ICpuInfo.h | 3 +- src/backend/cpu/platform/AdvancedCpuInfo.cpp | 42 ++++++-- src/backend/cpu/platform/AdvancedCpuInfo.h | 5 +- src/backend/cpu/platform/BasicCpuInfo.cpp | 101 +++++++++++-------- src/backend/cpu/platform/BasicCpuInfo.h | 7 +- src/core/Miner.cpp | 2 +- 7 files changed, 103 insertions(+), 59 deletions(-) diff --git a/src/Summary.cpp b/src/Summary.cpp index 36f59ba3..fec7c65c 100644 --- a/src/Summary.cpp +++ b/src/Summary.cpp @@ -71,7 +71,7 @@ static void print_cpu(xmrig::Config *) Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s%s (%d)") " %sx64 %sAES %sAVX2", "CPU", Cpu::info()->brand(), - Cpu::info()->sockets(), + Cpu::info()->packages(), Cpu::info()->isX64() ? GREEN_BOLD_S : RED_BOLD_S "-", Cpu::info()->hasAES() ? GREEN_BOLD_S : RED_BOLD_S "-", Cpu::info()->hasAVX2() ? GREEN_BOLD_S : RED_BOLD_S "-" diff --git a/src/backend/cpu/interfaces/ICpuInfo.h b/src/backend/cpu/interfaces/ICpuInfo.h index 5848db89..7faa7187 100644 --- a/src/backend/cpu/interfaces/ICpuInfo.h +++ b/src/backend/cpu/interfaces/ICpuInfo.h @@ -48,14 +48,13 @@ public: virtual Assembly::Id assembly() const = 0; virtual bool hasAES() const = 0; virtual bool hasAVX2() const = 0; - virtual bool isSupported() const = 0; virtual const char *brand() const = 0; virtual CpuThreads threads(const Algorithm &algorithm) const = 0; virtual size_t cores() const = 0; virtual size_t L2() const = 0; virtual size_t L3() const = 0; virtual size_t nodes() const = 0; - virtual size_t sockets() const = 0; + virtual size_t packages() const = 0; virtual size_t threads() const = 0; }; diff --git a/src/backend/cpu/platform/AdvancedCpuInfo.cpp b/src/backend/cpu/platform/AdvancedCpuInfo.cpp index 45b0dd66..cd44cc2e 100644 --- a/src/backend/cpu/platform/AdvancedCpuInfo.cpp +++ b/src/backend/cpu/platform/AdvancedCpuInfo.cpp @@ -32,37 +32,61 @@ #include "backend/cpu/platform/AdvancedCpuInfo.h" +namespace xmrig { + + +static inline void cpu_brand_string(char out[64], const char *in) { + size_t pos = 0; + const size_t size = strlen(in); + + for (size_t i = 0; i < size; ++i) { + if (in[i] == ' ' && ((pos > 0 && out[pos - 1] == ' ') || pos == 0)) { + continue; + } + + out[pos++] = in[i]; + } + + if (pos > 0 && out[pos - 1] == ' ') { + out[pos - 1] = '\0'; + } +} + + +} // namespace xmrig + + xmrig::AdvancedCpuInfo::AdvancedCpuInfo() : m_brand() { struct cpu_raw_data_t raw = {}; - struct cpu_id_t data = {}; + struct cpu_id_t data = {}; cpuid_get_raw_data(&raw); cpu_identify(&raw, &data); - strncpy(m_brand, data.brand_str, sizeof(m_brand)); + cpu_brand_string(m_brand, data.brand_str); - m_threads = static_cast(data.total_logical_cpus); - m_sockets = std::max(threads() / static_cast(data.num_logical_cpus), 1); - m_cores = static_cast(data.num_cores) * m_sockets; - m_L3 = data.l3_cache > 0 ? static_cast(data.l3_cache) * m_sockets : 0; + m_threads = static_cast(data.total_logical_cpus); + m_packages = std::max(threads() / static_cast(data.num_logical_cpus), 1); + m_cores = static_cast(data.num_cores) * m_packages; + m_L3 = data.l3_cache > 0 ? static_cast(data.l3_cache) * m_packages : 0; const size_t l2 = static_cast(data.l2_cache); // Workaround for AMD CPUs https://github.com/anrieff/libcpuid/issues/97 if (data.vendor == VENDOR_AMD && data.ext_family >= 0x15 && data.ext_family < 0x17) { - m_L2 = l2 * (cores() / 2) * m_sockets; + m_L2 = l2 * (cores() / 2) * m_packages; m_L2_exclusive = true; } // Workaround for Intel Pentium Dual-Core, Core Duo, Core 2 Duo, Core 2 Quad and their Xeon homologue // These processors have L2 cache shared by 2 cores. else if (data.vendor == VENDOR_INTEL && data.ext_family == 0x06 && (data.ext_model == 0x0E || data.ext_model == 0x0F || data.ext_model == 0x17)) { size_t l2_count_per_socket = cores() > 1 ? cores() / 2 : 1; - m_L2 = data.l2_cache > 0 ? l2 * l2_count_per_socket * m_sockets : 0; + m_L2 = data.l2_cache > 0 ? l2 * l2_count_per_socket * m_packages : 0; } else{ - m_L2 = data.l2_cache > 0 ? l2 * cores() * m_sockets : 0; + m_L2 = data.l2_cache > 0 ? l2 * cores() * m_packages : 0; } if (data.flags[CPU_FEATURE_AES]) { diff --git a/src/backend/cpu/platform/AdvancedCpuInfo.h b/src/backend/cpu/platform/AdvancedCpuInfo.h index 889fba00..e8225844 100644 --- a/src/backend/cpu/platform/AdvancedCpuInfo.h +++ b/src/backend/cpu/platform/AdvancedCpuInfo.h @@ -43,13 +43,12 @@ protected: inline Assembly::Id assembly() const override { return m_assembly; } inline bool hasAES() const override { return m_aes; } inline bool hasAVX2() const override { return m_avx2; } - inline bool isSupported() const override { return true; } inline const char *brand() const override { return m_brand; } inline size_t cores() const override { return m_cores; } inline size_t L2() const override { return m_L2; } inline size_t L3() const override { return m_L3; } inline size_t nodes() const override { return 0; } - inline size_t sockets() const override { return m_sockets; } + inline size_t packages() const override { return m_packages; } inline size_t threads() const override { return m_threads; } private: @@ -61,7 +60,7 @@ private: size_t m_cores = 0; size_t m_L2 = 0; size_t m_L3 = 0; - size_t m_sockets = 1; + size_t m_packages = 1; size_t m_threads = 0; }; diff --git a/src/backend/cpu/platform/BasicCpuInfo.cpp b/src/backend/cpu/platform/BasicCpuInfo.cpp index f30466fe..1ceefecc 100644 --- a/src/backend/cpu/platform/BasicCpuInfo.cpp +++ b/src/backend/cpu/platform/BasicCpuInfo.cpp @@ -64,70 +64,88 @@ #define EDX_Reg (3) -#ifdef _MSC_VER -static inline void cpuid(int level, int output[4]) { - __cpuid(output, level); +namespace xmrig { + + +static inline void cpuid(uint32_t level, int32_t output[4]) +{ + memset(output, 0, sizeof(int32_t) * 4); + +# ifdef _MSC_VER + __cpuid(output, static_cast(level)); +# else + __cpuid_count(level, 0, output[0], output[1], output[2], output[3]); +# endif } -#else -static inline void cpuid(int level, int output[4]) { - int a, b, c, d; - __cpuid_count(level, 0, a, b, c, d); - - output[0] = a; - output[1] = b; - output[2] = c; - output[3] = d; -} -#endif -static inline void cpu_brand_string(char* s) { +static void cpu_brand_string(char out[64 + 6]) { int32_t cpu_info[4] = { 0 }; + char buf[64] = { 0 }; + cpuid(VENDOR_ID, cpu_info); if (cpu_info[EAX_Reg] >= 4) { - for (int i = 0; i < 4; i++) { + for (uint32_t i = 0; i < 4; i++) { cpuid(0x80000002 + i, cpu_info); - memcpy(s, cpu_info, sizeof(cpu_info)); - s += 16; + memcpy(buf + (i * 16), cpu_info, sizeof(cpu_info)); } } + + size_t pos = 0; + const size_t size = strlen(buf); + + for (size_t i = 0; i < size; ++i) { + if (buf[i] == ' ' && ((pos > 0 && out[pos - 1] == ' ') || pos == 0)) { + continue; + } + + out[pos++] = buf[i]; + } + + if (pos > 0 && out[pos - 1] == ' ') { + out[pos - 1] = '\0'; + } +} + + +static bool has_feature(uint32_t level, uint32_t reg, int32_t bit) +{ + int32_t cpu_info[4] = { 0 }; + cpuid(level, cpu_info); + + return (cpu_info[reg] & bit) != 0; +} + + +static inline int32_t get_masked(int32_t val, int32_t h, int32_t l) +{ + val &= (0x7FFFFFFF >> (31 - (h - l))) << l; + return val >> l; } static inline bool has_aes_ni() { - int32_t cpu_info[4] = { 0 }; - cpuid(PROCESSOR_INFO, cpu_info); - - return (cpu_info[ECX_Reg] & bit_AES) != 0; + return has_feature(PROCESSOR_INFO, ECX_Reg, bit_AES); } static inline bool has_avx2() { - int32_t cpu_info[4] = { 0 }; - cpuid(EXTENDED_FEATURES, cpu_info); - - return (cpu_info[EBX_Reg] & bit_AVX2) != 0; + return has_feature(EXTENDED_FEATURES, EBX_Reg, bit_AVX2) && has_feature(PROCESSOR_INFO, ECX_Reg, bit_OSXSAVE); } -static inline bool has_ossave() -{ - int32_t cpu_info[4] = { 0 }; - cpuid(PROCESSOR_INFO, cpu_info); - - return (cpu_info[ECX_Reg] & bit_OSXSAVE) != 0; -} +} // namespace xmrig xmrig::BasicCpuInfo::BasicCpuInfo() : + m_threads(std::thread::hardware_concurrency()), m_assembly(Assembly::NONE), m_aes(has_aes_ni()), m_brand(), - m_avx2(has_avx2() && has_ossave()), - m_threads(std::thread::hardware_concurrency()) + m_avx2(has_avx2()) { cpu_brand_string(m_brand); @@ -136,17 +154,20 @@ xmrig::BasicCpuInfo::BasicCpuInfo() : char vendor[13] = { 0 }; int32_t data[4] = { 0 }; - cpuid(0, data); + cpuid(VENDOR_ID, data); memcpy(vendor + 0, &data[1], 4); memcpy(vendor + 4, &data[3], 4); memcpy(vendor + 8, &data[2], 4); - if (memcmp(vendor, "GenuineIntel", 12) == 0) { - m_assembly = Assembly::INTEL; + if (memcmp(vendor, "AuthenticAMD", 12) == 0) { + cpuid(PROCESSOR_INFO, data); + const int32_t family = get_masked(data[EAX_Reg], 12, 8) + get_masked(data[EAX_Reg], 28, 20); + + m_assembly = family >= 23 ? Assembly::RYZEN : Assembly::BULLDOZER; } - else if (memcmp(vendor, "AuthenticAMD", 12) == 0) { - m_assembly = Assembly::RYZEN; + else { + m_assembly = Assembly::INTEL; } } # endif diff --git a/src/backend/cpu/platform/BasicCpuInfo.h b/src/backend/cpu/platform/BasicCpuInfo.h index 12d0e037..b05ed9cc 100644 --- a/src/backend/cpu/platform/BasicCpuInfo.h +++ b/src/backend/cpu/platform/BasicCpuInfo.h @@ -43,21 +43,22 @@ protected: inline Assembly::Id assembly() const override { return m_assembly; } inline bool hasAES() const override { return m_aes; } inline bool hasAVX2() const override { return m_avx2; } - inline bool isSupported() const override { return true; } inline const char *brand() const override { return m_brand; } inline size_t cores() const override { return 0; } inline size_t L2() const override { return 0; } inline size_t L3() const override { return 0; } inline size_t nodes() const override { return 0; } - inline size_t sockets() const override { return 1; } + inline size_t packages() const override { return 1; } inline size_t threads() const override { return m_threads; } +protected: + size_t m_threads; + private: Assembly m_assembly; bool m_aes; char m_brand[64 + 6]; const bool m_avx2; - const size_t m_threads; }; diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index df83c5e9..d0bfa78f 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -134,7 +134,7 @@ public: cpu.AddMember("brand", StringRef(Cpu::info()->brand()), allocator); cpu.AddMember("aes", Cpu::info()->hasAES(), allocator); cpu.AddMember("x64", Cpu::info()->isX64(), allocator); - cpu.AddMember("sockets", static_cast(Cpu::info()->sockets()), allocator); + cpu.AddMember("sockets", static_cast(Cpu::info()->packages()), allocator); reply.AddMember("version", APP_VERSION, allocator); reply.AddMember("kind", APP_KIND, allocator); From 42460b8805ae7a4a4636075391881dfe0b7394ad Mon Sep 17 00:00:00 2001 From: XMRig Date: Tue, 23 Jul 2019 01:18:55 +0700 Subject: [PATCH 064/172] Added hwloc stub. --- CMakeLists.txt | 5 +++ cmake/FindHWLOC.cmake | 25 +++++++++++++ src/backend/cpu/Cpu.cpp | 8 +++- src/backend/cpu/cpu.cmake | 33 +++++++++++++++-- src/backend/cpu/platform/HwlocCpuInfo.cpp | 34 +++++++++++++++++ src/backend/cpu/platform/HwlocCpuInfo.h | 45 +++++++++++++++++++++++ 6 files changed, 145 insertions(+), 5 deletions(-) create mode 100644 cmake/FindHWLOC.cmake create mode 100644 src/backend/cpu/platform/HwlocCpuInfo.cpp create mode 100644 src/backend/cpu/platform/HwlocCpuInfo.h diff --git a/CMakeLists.txt b/CMakeLists.txt index e0290778..8ae89502 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,6 +2,7 @@ cmake_minimum_required(VERSION 2.8) project(xmrig) option(WITH_LIBCPUID "Use Libcpuid" ON) +option(WITH_HWLOC "Use hwloc" OFF) option(WITH_CN_LITE "CryptoNight-Lite support" ON) option(WITH_CN_HEAVY "CryptoNight-Heavy support" ON) option(WITH_CN_PICO "CryptoNight-Pico support" ON) @@ -146,6 +147,10 @@ set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake") find_package(UV REQUIRED) +if (WITH_HWLOC) + find_package(HWLOC REQUIRED) +endif() + if (WITH_RANDOMX) include_directories(src/crypto/randomx) add_definitions(/DXMRIG_ALGO_RANDOMX) diff --git a/cmake/FindHWLOC.cmake b/cmake/FindHWLOC.cmake new file mode 100644 index 00000000..55309d3e --- /dev/null +++ b/cmake/FindHWLOC.cmake @@ -0,0 +1,25 @@ +find_path( + HWLOC_INCLUDE_DIR + NAMES hwloc.h + PATHS "${XMRIG_DEPS}" ENV "XMRIG_DEPS" + PATH_SUFFIXES "include" + NO_DEFAULT_PATH +) + +find_path(HWLOC_INCLUDE_DIR NAMES hwloc.h) + +find_library( + HWLOC_LIBRARY + NAMES hwloc.a hwloc libhwloc + PATHS "${XMRIG_DEPS}" ENV "XMRIG_DEPS" + PATH_SUFFIXES "lib" + NO_DEFAULT_PATH +) + +find_library(HWLOC_LIBRARY NAMES hwloc.a hwloc libhwloc) + +set(HWLOC_LIBRARIES ${HWLOC_LIBRARY}) +set(HWLOC_INCLUDE_DIRS ${HWLOC_INCLUDE_DIR}) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(HWLOC DEFAULT_MSG HWLOC_LIBRARY HWLOC_INCLUDE_DIR) diff --git a/src/backend/cpu/Cpu.cpp b/src/backend/cpu/Cpu.cpp index fdcad5a8..603ce634 100644 --- a/src/backend/cpu/Cpu.cpp +++ b/src/backend/cpu/Cpu.cpp @@ -29,7 +29,9 @@ #include "backend/cpu/Cpu.h" -#ifdef XMRIG_FEATURE_LIBCPUID +#if defined(XMRIG_FEATURE_HWLOC) +# include "backend/cpu/platform/HwlocCpuInfo.h" +#elif defined(XMRIG_FEATURE_LIBCPUID) # include "backend/cpu/platform/AdvancedCpuInfo.h" #else # include "backend/cpu/platform/BasicCpuInfo.h" @@ -51,7 +53,9 @@ void xmrig::Cpu::init() { assert(cpuInfo == nullptr); -# ifdef XMRIG_FEATURE_LIBCPUID +# if defined(XMRIG_FEATURE_HWLOC) + cpuInfo = new HwlocCpuInfo(); +# elif defined(XMRIG_FEATURE_LIBCPUID) cpuInfo = new AdvancedCpuInfo(); # else cpuInfo = new BasicCpuInfo(); diff --git a/src/backend/cpu/cpu.cmake b/src/backend/cpu/cpu.cmake index 871debd3..a8bbd8e8 100644 --- a/src/backend/cpu/cpu.cmake +++ b/src/backend/cpu/cpu.cmake @@ -18,16 +18,43 @@ set(SOURCES_BACKEND_CPU ) -if (WITH_LIBCPUID) +if (WITH_HWLOC) + set(WITH_LIBCPUID OFF) + + include_directories(${HWLOC_INCLUDE_DIR}) + set(CPUID_LIB ${HWLOC_LIBRARY}) + + remove_definitions(/DXMRIG_FEATURE_LIBCPUID) + add_definitions(/DXMRIG_FEATURE_HWLOC) + + set(SOURCES_CPUID + src/backend/cpu/platform/BasicCpuInfo.cpp + src/backend/cpu/platform/BasicCpuInfo.h + src/backend/cpu/platform/HwlocCpuInfo.cpp + src/backend/cpu/platform/HwlocCpuInfo.h + ) +elseif (WITH_LIBCPUID) + set(WITH_HWLOC OFF) + add_subdirectory(src/3rdparty/libcpuid) include_directories(src/3rdparty/libcpuid) + add_definitions(/DXMRIG_FEATURE_LIBCPUID) + remove_definitions(/DXMRIG_FEATURE_HWLOC) set(CPUID_LIB cpuid) - set(SOURCES_CPUID src/backend/cpu/platform/AdvancedCpuInfo.h src/backend/cpu/platform/AdvancedCpuInfo.cpp src/backend/cpu/Cpu.cpp) + set(SOURCES_CPUID + src/backend/cpu/platform/AdvancedCpuInfo.cpp + src/backend/cpu/platform/AdvancedCpuInfo.h + ) else() remove_definitions(/DXMRIG_FEATURE_LIBCPUID) - set(SOURCES_CPUID src/backend/cpu/platform/BasicCpuInfo.h src/backend/cpu/Cpu.cpp) + remove_definitions(/DXMRIG_FEATURE_HWLOC) + + set(CPUID_LIB "") + set(SOURCES_CPUID + src/backend/cpu/platform/BasicCpuInfo.h + ) if (XMRIG_ARM) set(SOURCES_CPUID ${SOURCES_CPUID} src/backend/cpu/platform/BasicCpuInfo_arm.cpp) diff --git a/src/backend/cpu/platform/HwlocCpuInfo.cpp b/src/backend/cpu/platform/HwlocCpuInfo.cpp new file mode 100644 index 00000000..ef518b33 --- /dev/null +++ b/src/backend/cpu/platform/HwlocCpuInfo.cpp @@ -0,0 +1,34 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2019 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#include + + +#include "backend/cpu/platform/HwlocCpuInfo.h" + + +xmrig::HwlocCpuInfo::HwlocCpuInfo() : BasicCpuInfo() +{ +} diff --git a/src/backend/cpu/platform/HwlocCpuInfo.h b/src/backend/cpu/platform/HwlocCpuInfo.h new file mode 100644 index 00000000..0b09bf88 --- /dev/null +++ b/src/backend/cpu/platform/HwlocCpuInfo.h @@ -0,0 +1,45 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2019 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_HWLOCCPUINFO_H +#define XMRIG_HWLOCCPUINFO_H + + +#include "backend/cpu/platform/BasicCpuInfo.h" + + +namespace xmrig { + + +class HwlocCpuInfo : public BasicCpuInfo +{ +public: + HwlocCpuInfo(); +}; + + +} /* namespace xmrig */ + + +#endif /* XMRIG_HWLOCCPUINFO_H */ From b27fc6fd5df9dea03ff4b234d2d2b8b040f685b8 Mon Sep 17 00:00:00 2001 From: XMRig Date: Tue, 23 Jul 2019 07:12:56 +0700 Subject: [PATCH 065/172] hwloc used for CPU information. --- src/Summary.cpp | 29 +++++++--- src/backend/cpu/platform/AdvancedCpuInfo.cpp | 4 +- src/backend/cpu/platform/HwlocCpuInfo.cpp | 58 +++++++++++++++++++- src/backend/cpu/platform/HwlocCpuInfo.h | 13 +++++ 4 files changed, 94 insertions(+), 10 deletions(-) diff --git a/src/Summary.cpp b/src/Summary.cpp index fec7c65c..62d22619 100644 --- a/src/Summary.cpp +++ b/src/Summary.cpp @@ -67,17 +67,30 @@ static void print_memory(xmrig::Config *) { static void print_cpu(xmrig::Config *) { using namespace xmrig; + const ICpuInfo *info = Cpu::info(); - Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s%s (%d)") " %sx64 %sAES %sAVX2", + Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s%s (%zu/%zu)") " %sx64 %sAES %sAVX2", "CPU", - Cpu::info()->brand(), - Cpu::info()->packages(), - Cpu::info()->isX64() ? GREEN_BOLD_S : RED_BOLD_S "-", - Cpu::info()->hasAES() ? GREEN_BOLD_S : RED_BOLD_S "-", - Cpu::info()->hasAVX2() ? GREEN_BOLD_S : RED_BOLD_S "-" + info->brand(), + info->packages(), + info->nodes(), + info->isX64() ? GREEN_BOLD_S : RED_BOLD_S "-", + info->hasAES() ? GREEN_BOLD_S : RED_BOLD_S "-", + info->hasAVX2() ? GREEN_BOLD_S : RED_BOLD_S "-" + ); +# if defined(XMRIG_FEATURE_LIBCPUID) || defined (XMRIG_FEATURE_HWLOC) + Log::print(WHITE_BOLD(" %-13s") BLACK_BOLD("L2:") WHITE_BOLD("%.1f MB") BLACK_BOLD(" L3:") WHITE_BOLD("%.1f MB") BLACK_BOLD(" cores:") CYAN_BOLD("%zu") BLACK_BOLD(" threads:") CYAN_BOLD("%zu"), + "", + info->L2() / 1048576.0, + info->L3() / 1048576.0, + info->cores(), + info->threads() + ); +# else + Log::print(WHITE_BOLD(" %-13s") BLACK_BOLD("threads:") CYAN_BOLD("%zu"), + "", + info->threads() ); -# ifdef XMRIG_FEATURE_LIBCPUID - Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s%.1f MB/%.1f MB"), "CPU L2/L3", Cpu::info()->L2() / 1024.0, Cpu::info()->L3() / 1024.0); # endif } diff --git a/src/backend/cpu/platform/AdvancedCpuInfo.cpp b/src/backend/cpu/platform/AdvancedCpuInfo.cpp index cd44cc2e..b5f2dfd5 100644 --- a/src/backend/cpu/platform/AdvancedCpuInfo.cpp +++ b/src/backend/cpu/platform/AdvancedCpuInfo.cpp @@ -89,6 +89,9 @@ xmrig::AdvancedCpuInfo::AdvancedCpuInfo() : m_L2 = data.l2_cache > 0 ? l2 * cores() * m_packages : 0; } + m_L2 *= 1024; + m_L3 *= 1024; + if (data.flags[CPU_FEATURE_AES]) { m_aes = true; @@ -127,7 +130,6 @@ xmrig::CpuThreads xmrig::AdvancedCpuInfo::threads(const Algorithm &algorithm) co } if (cache) { - cache *= 1024; const size_t memory = algorithm.memory(); assert(memory > 0); diff --git a/src/backend/cpu/platform/HwlocCpuInfo.cpp b/src/backend/cpu/platform/HwlocCpuInfo.cpp index ef518b33..b12a3010 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.cpp +++ b/src/backend/cpu/platform/HwlocCpuInfo.cpp @@ -29,6 +29,62 @@ #include "backend/cpu/platform/HwlocCpuInfo.h" -xmrig::HwlocCpuInfo::HwlocCpuInfo() : BasicCpuInfo() +namespace xmrig { + + +inline bool isCacheObject(hwloc_obj_t obj) { +# if HWLOC_API_VERSION >= 0x20000 + return hwloc_obj_type_is_cache(obj->type); +# else + return obj->type == HWLOC_OBJ_CACHE; +# endif +} + + +template +inline void findCache(hwloc_obj_t obj, func lambda) +{ + for (size_t i = 0; i < obj->arity; i++) { + if (isCacheObject(obj->children[i])) { + if (obj->children[i]->attr->cache.depth < 2) { + continue; + } + + lambda(obj->children[i]); + } + + findCache(obj->children[i], lambda); + } +} + + +inline size_t countByType(hwloc_topology_t topology, hwloc_obj_type_t type) +{ + const int count = hwloc_get_nbobjs_by_type(topology, type); + + return count > 0 ? static_cast(count) : 0; +} + + +} // namespace xmrig + + +xmrig::HwlocCpuInfo::HwlocCpuInfo() : BasicCpuInfo(), + m_cache() +{ + m_threads = 0; + + hwloc_topology_t topology; + hwloc_topology_init(&topology); + hwloc_topology_load(topology); + + findCache(hwloc_get_root_obj(topology), [this](hwloc_obj_t found) { this->m_cache[found->attr->cache.depth] += found->attr->cache.size; }); + + m_threads = countByType(topology, HWLOC_OBJ_PU); + m_cores = countByType(topology, HWLOC_OBJ_CORE); + m_nodes = countByType(topology, HWLOC_OBJ_NUMANODE); + m_packages = countByType(topology, HWLOC_OBJ_PACKAGE); + + hwloc_topology_destroy(topology); } diff --git a/src/backend/cpu/platform/HwlocCpuInfo.h b/src/backend/cpu/platform/HwlocCpuInfo.h index 0b09bf88..8d626a8a 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.h +++ b/src/backend/cpu/platform/HwlocCpuInfo.h @@ -36,6 +36,19 @@ class HwlocCpuInfo : public BasicCpuInfo { public: HwlocCpuInfo(); + +protected: + inline size_t cores() const override { return m_cores; } + inline size_t L2() const override { return m_cache[2]; } + inline size_t L3() const override { return m_cache[3]; } + inline size_t nodes() const override { return m_nodes; } + inline size_t packages() const override { return m_packages; } + +private: + size_t m_cache[5]; + size_t m_cores = 0; + size_t m_nodes = 0; + size_t m_packages = 0; }; From 4263c6c381ff8712dc8e1609bcc28aac588fce59 Mon Sep 17 00:00:00 2001 From: XMRig Date: Tue, 23 Jul 2019 07:45:00 +0700 Subject: [PATCH 066/172] Added hwloc version display. --- src/Summary.cpp | 47 +++++++++++++++------------ src/Summary.h | 19 +++++++---- src/base/kernel/Entry.cpp | 5 +++ src/base/kernel/config/BaseConfig.cpp | 21 +++++++----- 4 files changed, 56 insertions(+), 36 deletions(-) diff --git a/src/Summary.cpp b/src/Summary.cpp index 62d22619..7b22ab34 100644 --- a/src/Summary.cpp +++ b/src/Summary.cpp @@ -39,6 +39,9 @@ #include "version.h" +namespace xmrig { + + #ifdef XMRIG_FEATURE_ASM static const char *coloredAsmNames[] = { RED_BOLD("none"), @@ -49,24 +52,23 @@ static const char *coloredAsmNames[] = { }; -inline static const char *asmName(xmrig::Assembly::Id assembly) +inline static const char *asmName(Assembly::Id assembly) { return coloredAsmNames[assembly]; } #endif -static void print_memory(xmrig::Config *) { +static void print_memory(Config *) { # ifdef _WIN32 - xmrig::Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") "%s", - "HUGE PAGES", xmrig::VirtualMemory::isHugepagesAvailable() ? GREEN_BOLD("available") : RED_BOLD("unavailable")); + Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") "%s", + "HUGE PAGES", VirtualMemory::isHugepagesAvailable() ? GREEN_BOLD("available") : RED_BOLD("unavailable")); # endif } -static void print_cpu(xmrig::Config *) +static void print_cpu(Config *) { - using namespace xmrig; const ICpuInfo *info = Cpu::info(); Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s%s (%zu/%zu)") " %sx64 %sAES %sAVX2", @@ -95,41 +97,44 @@ static void print_cpu(xmrig::Config *) } -static void print_threads(xmrig::Config *config) +static void print_threads(Config *config) { - xmrig::Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") WHITE_BOLD("%s%d%%"), - "DONATE", - config->pools().donateLevel() == 0 ? RED_BOLD_S : "", - config->pools().donateLevel() - ); + Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") WHITE_BOLD("%s%d%%"), + "DONATE", + config->pools().donateLevel() == 0 ? RED_BOLD_S : "", + config->pools().donateLevel() + ); # ifdef XMRIG_FEATURE_ASM - if (config->cpu().assembly() == xmrig::Assembly::AUTO) { - const xmrig::Assembly assembly = xmrig::Cpu::info()->assembly(); + if (config->cpu().assembly() == Assembly::AUTO) { + const Assembly assembly = Cpu::info()->assembly(); - xmrig::Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13sauto:%s"), "ASSEMBLY", asmName(assembly)); + Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13sauto:%s"), "ASSEMBLY", asmName(assembly)); } else { - xmrig::Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s%s"), "ASSEMBLY", asmName(config->cpu().assembly())); + Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s%s"), "ASSEMBLY", asmName(config->cpu().assembly())); } # endif } -static void print_commands(xmrig::Config *) +static void print_commands(Config *) { - if (xmrig::Log::colors) { - xmrig::Log::print(GREEN_BOLD(" * ") WHITE_BOLD("COMMANDS ") MAGENTA_BOLD("h") WHITE_BOLD("ashrate, ") + if (Log::colors) { + Log::print(GREEN_BOLD(" * ") WHITE_BOLD("COMMANDS ") MAGENTA_BOLD("h") WHITE_BOLD("ashrate, ") MAGENTA_BOLD("p") WHITE_BOLD("ause, ") MAGENTA_BOLD("r") WHITE_BOLD("esume")); } else { - xmrig::Log::print(" * COMMANDS 'h' hashrate, 'p' pause, 'r' resume"); + Log::print(" * COMMANDS 'h' hashrate, 'p' pause, 'r' resume"); } } -void Summary::print(xmrig::Controller *controller) +} // namespace xmrig + + +void xmrig::Summary::print(Controller *controller) { controller->config()->printVersions(); print_memory(controller->config()); diff --git a/src/Summary.h b/src/Summary.h index f07dba35..4317d13e 100644 --- a/src/Summary.h +++ b/src/Summary.h @@ -5,7 +5,8 @@ * Copyright 2014-2016 Wolf9466 * Copyright 2016 Jay D Dee * Copyright 2017-2018 XMR-Stak , - * Copyright 2016-2018 XMRig , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -21,20 +22,24 @@ * along with this program. If not, see . */ -#ifndef __SUMMARY_H__ -#define __SUMMARY_H__ +#ifndef XMRIG_SUMMARY_H +#define XMRIG_SUMMARY_H namespace xmrig { - class Controller; -} + + +class Controller; class Summary { public: - static void print(xmrig::Controller *controller); + static void print(Controller *controller); }; -#endif /* __SUMMARY_H__ */ +} // namespace xmrig + + +#endif /* XMRIG_SUMMARY_H */ diff --git a/src/base/kernel/Entry.cpp b/src/base/kernel/Entry.cpp index 1834cb82..1e12b24d 100644 --- a/src/base/kernel/Entry.cpp +++ b/src/base/kernel/Entry.cpp @@ -31,6 +31,9 @@ # include #endif +#ifdef XMRIG_FEATURE_HWLOC +# include +#endif #include "base/kernel/Entry.h" #include "base/kernel/Process.h" @@ -75,6 +78,8 @@ static int showVersion() } # endif + printf("hwloc/%s\n", HWLOC_VERSION); + return 0; } diff --git a/src/base/kernel/config/BaseConfig.cpp b/src/base/kernel/config/BaseConfig.cpp index 462639e3..2fcbad70 100644 --- a/src/base/kernel/config/BaseConfig.cpp +++ b/src/base/kernel/config/BaseConfig.cpp @@ -34,6 +34,9 @@ # include #endif +#ifdef XMRIG_FEATURE_HWLOC +# include +#endif #ifdef XMRIG_AMD_PROJECT # if defined(__APPLE__) @@ -95,22 +98,24 @@ void xmrig::BaseConfig::printVersions() # elif defined(XMRIG_NVIDIA_PROJECT) const int cudaVersion = cuda_get_runtime_version(); int length = snprintf(buf, sizeof buf, "CUDA/%d.%d ", cudaVersion / 1000, cudaVersion % 100); -# else - memset(buf, 0, 16); +# endif -# if defined(XMRIG_FEATURE_HTTP) || defined(XMRIG_FEATURE_TLS) - int length = 0; -# endif -# endif + std::string libs; # if defined(XMRIG_FEATURE_TLS) && defined(OPENSSL_VERSION_TEXT) { constexpr const char *v = OPENSSL_VERSION_TEXT + 8; - length += snprintf(buf + length, (sizeof buf) - length, "OpenSSL/%.*s ", static_cast(strchr(v, ' ') - v), v); + snprintf(buf, sizeof buf, "OpenSSL/%.*s ", static_cast(strchr(v, ' ') - v), v); + libs += buf; } # endif - Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13slibuv/%s %s"), "LIBS", uv_version_string(), buf); +# ifdef XMRIG_FEATURE_HWLOC + snprintf(buf, sizeof buf, "hwloc/%s ", HWLOC_VERSION); + libs += buf; +# endif + + Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13slibuv/%s %s"), "LIBS", uv_version_string(), libs.c_str()); } From 91f732794b92868dd48505e715e88450ec90df58 Mon Sep 17 00:00:00 2001 From: XMRig Date: Tue, 23 Jul 2019 08:06:04 +0700 Subject: [PATCH 067/172] HWLOC_VERSION not always defined. --- src/base/kernel/Entry.cpp | 8 ++++++++ src/base/kernel/config/BaseConfig.cpp | 10 +++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/base/kernel/Entry.cpp b/src/base/kernel/Entry.cpp index 1e12b24d..da225e40 100644 --- a/src/base/kernel/Entry.cpp +++ b/src/base/kernel/Entry.cpp @@ -78,7 +78,15 @@ static int showVersion() } # endif +# if defined(XMRIG_FEATURE_HWLOC) +# if defined(HWLOC_VERSION) printf("hwloc/%s\n", HWLOC_VERSION); +# elif HWLOC_API_VERSION >= 0x20000 + printf("hwloc/2\n"); +# else + printf("hwloc/1\n"); +# endif +# endif return 0; } diff --git a/src/base/kernel/config/BaseConfig.cpp b/src/base/kernel/config/BaseConfig.cpp index 2fcbad70..c89b032a 100644 --- a/src/base/kernel/config/BaseConfig.cpp +++ b/src/base/kernel/config/BaseConfig.cpp @@ -110,8 +110,16 @@ void xmrig::BaseConfig::printVersions() } # endif -# ifdef XMRIG_FEATURE_HWLOC + +# if defined(XMRIG_FEATURE_HWLOC) +# if defined(HWLOC_VERSION) snprintf(buf, sizeof buf, "hwloc/%s ", HWLOC_VERSION); +# elif HWLOC_API_VERSION >= 0x20000 + snprintf(buf, sizeof buf, "hwloc/2 "); +# else + snprintf(buf, sizeof buf, "hwloc/1 "); +# endif + libs += buf; # endif From 73558a0eaa9c7730b338a4920367341fcd4cc4aa Mon Sep 17 00:00:00 2001 From: XMRig Date: Tue, 23 Jul 2019 08:36:09 +0700 Subject: [PATCH 068/172] Added Cpu::toJSON --- src/backend/cpu/Cpu.cpp | 39 +++++++++++++++++++++++++++++++++++++++ src/backend/cpu/Cpu.h | 2 ++ src/core/Miner.cpp | 8 +------- 3 files changed, 42 insertions(+), 7 deletions(-) diff --git a/src/backend/cpu/Cpu.cpp b/src/backend/cpu/Cpu.cpp index 603ce634..d32b2d8e 100644 --- a/src/backend/cpu/Cpu.cpp +++ b/src/backend/cpu/Cpu.cpp @@ -27,6 +27,7 @@ #include "backend/cpu/Cpu.h" +#include "rapidjson/document.h" #if defined(XMRIG_FEATURE_HWLOC) @@ -41,6 +42,18 @@ static xmrig::ICpuInfo *cpuInfo = nullptr; +const char *xmrig::Cpu::backend() +{ +# if defined(XMRIG_FEATURE_HWLOC) + return "hwloc"; +# elif defined(XMRIG_FEATURE_LIBCPUID) + return "libcpuid"; +# else + return "basic"; +# endif +} + + xmrig::ICpuInfo *xmrig::Cpu::info() { assert(cpuInfo != nullptr); @@ -49,6 +62,32 @@ xmrig::ICpuInfo *xmrig::Cpu::info() } +rapidjson::Value xmrig::Cpu::toJSON(rapidjson::Document &doc) +{ + using namespace rapidjson; + auto &allocator = doc.GetAllocator(); + + ICpuInfo *i = info(); + Value cpu(kObjectType); + Assembly assembly(i->assembly()); + + cpu.AddMember("brand", StringRef(i->brand()), allocator); + cpu.AddMember("aes", i->hasAES(), allocator); + cpu.AddMember("avx2", i->hasAVX2(), allocator); + cpu.AddMember("x64", i->isX64(), allocator); + cpu.AddMember("assembly", StringRef(assembly.toString()), allocator); + cpu.AddMember("l2", static_cast(i->L2()), allocator); + cpu.AddMember("l3", static_cast(i->L3()), allocator); + cpu.AddMember("cores", static_cast(i->cores()), allocator); + cpu.AddMember("threads", static_cast(i->threads()), allocator); + cpu.AddMember("packages", static_cast(i->packages()), allocator); + cpu.AddMember("nodes", static_cast(i->nodes()), allocator); + cpu.AddMember("backend", StringRef(backend()), allocator); + + return cpu; +} + + void xmrig::Cpu::init() { assert(cpuInfo == nullptr); diff --git a/src/backend/cpu/Cpu.h b/src/backend/cpu/Cpu.h index 23cf37e6..4638b568 100644 --- a/src/backend/cpu/Cpu.h +++ b/src/backend/cpu/Cpu.h @@ -35,7 +35,9 @@ namespace xmrig { class Cpu { public: + static const char *backend(); static ICpuInfo *info(); + static rapidjson::Value toJSON(rapidjson::Document &doc); static void init(); static void release(); diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index d0bfa78f..526c4fbf 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -130,16 +130,10 @@ public: using namespace rapidjson; auto &allocator = doc.GetAllocator(); - Value cpu(kObjectType); - cpu.AddMember("brand", StringRef(Cpu::info()->brand()), allocator); - cpu.AddMember("aes", Cpu::info()->hasAES(), allocator); - cpu.AddMember("x64", Cpu::info()->isX64(), allocator); - cpu.AddMember("sockets", static_cast(Cpu::info()->packages()), allocator); - reply.AddMember("version", APP_VERSION, allocator); reply.AddMember("kind", APP_KIND, allocator); reply.AddMember("ua", StringRef(Platform::userAgent()), allocator); - reply.AddMember("cpu", cpu, allocator); + reply.AddMember("cpu", Cpu::toJSON(doc), allocator); if (version == 1) { reply.AddMember("hugepages", false, allocator); From a6a0995d547f050a9984644e0a70f545b71f1d39 Mon Sep 17 00:00:00 2001 From: XMRig Date: Tue, 23 Jul 2019 14:19:41 +0700 Subject: [PATCH 069/172] Better method to get hwloc version. --- src/backend/cpu/Cpu.cpp | 14 +------------- src/backend/cpu/Cpu.h | 1 - src/backend/cpu/interfaces/ICpuInfo.h | 1 + src/backend/cpu/platform/AdvancedCpuInfo.cpp | 4 +++- src/backend/cpu/platform/AdvancedCpuInfo.h | 4 +++- src/backend/cpu/platform/BasicCpuInfo.cpp | 6 ++++++ src/backend/cpu/platform/BasicCpuInfo.h | 1 + src/backend/cpu/platform/BasicCpuInfo_arm.cpp | 8 +++++++- src/backend/cpu/platform/HwlocCpuInfo.cpp | 10 +++++++++- src/backend/cpu/platform/HwlocCpuInfo.h | 2 ++ src/base/kernel/config/BaseConfig.cpp | 13 ++----------- 11 files changed, 35 insertions(+), 29 deletions(-) diff --git a/src/backend/cpu/Cpu.cpp b/src/backend/cpu/Cpu.cpp index d32b2d8e..886ad15c 100644 --- a/src/backend/cpu/Cpu.cpp +++ b/src/backend/cpu/Cpu.cpp @@ -42,18 +42,6 @@ static xmrig::ICpuInfo *cpuInfo = nullptr; -const char *xmrig::Cpu::backend() -{ -# if defined(XMRIG_FEATURE_HWLOC) - return "hwloc"; -# elif defined(XMRIG_FEATURE_LIBCPUID) - return "libcpuid"; -# else - return "basic"; -# endif -} - - xmrig::ICpuInfo *xmrig::Cpu::info() { assert(cpuInfo != nullptr); @@ -82,7 +70,7 @@ rapidjson::Value xmrig::Cpu::toJSON(rapidjson::Document &doc) cpu.AddMember("threads", static_cast(i->threads()), allocator); cpu.AddMember("packages", static_cast(i->packages()), allocator); cpu.AddMember("nodes", static_cast(i->nodes()), allocator); - cpu.AddMember("backend", StringRef(backend()), allocator); + cpu.AddMember("backend", StringRef(i->backend()), allocator); return cpu; } diff --git a/src/backend/cpu/Cpu.h b/src/backend/cpu/Cpu.h index 4638b568..bece97d3 100644 --- a/src/backend/cpu/Cpu.h +++ b/src/backend/cpu/Cpu.h @@ -35,7 +35,6 @@ namespace xmrig { class Cpu { public: - static const char *backend(); static ICpuInfo *info(); static rapidjson::Value toJSON(rapidjson::Document &doc); static void init(); diff --git a/src/backend/cpu/interfaces/ICpuInfo.h b/src/backend/cpu/interfaces/ICpuInfo.h index 7faa7187..daaa39c3 100644 --- a/src/backend/cpu/interfaces/ICpuInfo.h +++ b/src/backend/cpu/interfaces/ICpuInfo.h @@ -48,6 +48,7 @@ public: virtual Assembly::Id assembly() const = 0; virtual bool hasAES() const = 0; virtual bool hasAVX2() const = 0; + virtual const char *backend() const = 0; virtual const char *brand() const = 0; virtual CpuThreads threads(const Algorithm &algorithm) const = 0; virtual size_t cores() const = 0; diff --git a/src/backend/cpu/platform/AdvancedCpuInfo.cpp b/src/backend/cpu/platform/AdvancedCpuInfo.cpp index b5f2dfd5..f8871d3c 100644 --- a/src/backend/cpu/platform/AdvancedCpuInfo.cpp +++ b/src/backend/cpu/platform/AdvancedCpuInfo.cpp @@ -24,11 +24,12 @@ #include #include -#include #include +#include #include +#include "3rdparty/libcpuid/libcpuid.h" #include "backend/cpu/platform/AdvancedCpuInfo.h" @@ -66,6 +67,7 @@ xmrig::AdvancedCpuInfo::AdvancedCpuInfo() : cpu_identify(&raw, &data); cpu_brand_string(m_brand, data.brand_str); + snprintf(m_backend, sizeof m_backend, "libcpuid/%s", cpuid_lib_version()); m_threads = static_cast(data.total_logical_cpus); m_packages = std::max(threads() / static_cast(data.num_logical_cpus), 1); diff --git a/src/backend/cpu/platform/AdvancedCpuInfo.h b/src/backend/cpu/platform/AdvancedCpuInfo.h index e8225844..51b84c9f 100644 --- a/src/backend/cpu/platform/AdvancedCpuInfo.h +++ b/src/backend/cpu/platform/AdvancedCpuInfo.h @@ -43,6 +43,7 @@ protected: inline Assembly::Id assembly() const override { return m_assembly; } inline bool hasAES() const override { return m_aes; } inline bool hasAVX2() const override { return m_avx2; } + inline const char *backend() const override { return m_backend; } inline const char *brand() const override { return m_brand; } inline size_t cores() const override { return m_cores; } inline size_t L2() const override { return m_L2; } @@ -56,7 +57,8 @@ private: bool m_aes = false; bool m_avx2 = false; bool m_L2_exclusive = false; - char m_brand[64]; + char m_backend[32]; + char m_brand[64 + 5]; size_t m_cores = 0; size_t m_L2 = 0; size_t m_L3 = 0; diff --git a/src/backend/cpu/platform/BasicCpuInfo.cpp b/src/backend/cpu/platform/BasicCpuInfo.cpp index 1ceefecc..4bbc8ba0 100644 --- a/src/backend/cpu/platform/BasicCpuInfo.cpp +++ b/src/backend/cpu/platform/BasicCpuInfo.cpp @@ -174,6 +174,12 @@ xmrig::BasicCpuInfo::BasicCpuInfo() : } +const char *xmrig::BasicCpuInfo::backend() const +{ + return "basic"; +} + + xmrig::CpuThreads xmrig::BasicCpuInfo::threads(const Algorithm &algorithm) const { if (threads() == 1) { diff --git a/src/backend/cpu/platform/BasicCpuInfo.h b/src/backend/cpu/platform/BasicCpuInfo.h index b05ed9cc..8c548559 100644 --- a/src/backend/cpu/platform/BasicCpuInfo.h +++ b/src/backend/cpu/platform/BasicCpuInfo.h @@ -38,6 +38,7 @@ public: BasicCpuInfo(); protected: + const char *backend() const override; CpuThreads threads(const Algorithm &algorithm) const override; inline Assembly::Id assembly() const override { return m_assembly; } diff --git a/src/backend/cpu/platform/BasicCpuInfo_arm.cpp b/src/backend/cpu/platform/BasicCpuInfo_arm.cpp index 3d733535..1e6c3cb7 100644 --- a/src/backend/cpu/platform/BasicCpuInfo_arm.cpp +++ b/src/backend/cpu/platform/BasicCpuInfo_arm.cpp @@ -57,7 +57,13 @@ xmrig::BasicCpuInfo::BasicCpuInfo() : } -xmrig::CpuThreads xmrig::BasicCpuInfo::threads(const Algorithm &algorithm) const +const char *xmrig::BasicCpuInfo::backend() const +{ + return "basic_arm"; +} + + +xmrig::CpuThreads xmrig::BasicCpuInfo::threads(const Algorithm &) const { return CpuThreads(threads()); } diff --git a/src/backend/cpu/platform/HwlocCpuInfo.cpp b/src/backend/cpu/platform/HwlocCpuInfo.cpp index b12a3010..156f3ae4 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.cpp +++ b/src/backend/cpu/platform/HwlocCpuInfo.cpp @@ -71,6 +71,7 @@ inline size_t countByType(hwloc_topology_t topology, hwloc_obj_type_t type) xmrig::HwlocCpuInfo::HwlocCpuInfo() : BasicCpuInfo(), + m_backend(), m_cache() { m_threads = 0; @@ -79,7 +80,14 @@ xmrig::HwlocCpuInfo::HwlocCpuInfo() : BasicCpuInfo(), hwloc_topology_init(&topology); hwloc_topology_load(topology); - findCache(hwloc_get_root_obj(topology), [this](hwloc_obj_t found) { this->m_cache[found->attr->cache.depth] += found->attr->cache.size; }); + hwloc_obj_t root = hwloc_get_root_obj(topology); + snprintf(m_backend, sizeof m_backend, "hwloc/%s", hwloc_obj_get_info_by_name(root, "hwlocVersion")); + + findCache(root, [this](hwloc_obj_t found) { + const unsigned depth = found->attr->cache.depth; + + this->m_cache[depth] += found->attr->cache.size; + }); m_threads = countByType(topology, HWLOC_OBJ_PU); m_cores = countByType(topology, HWLOC_OBJ_CORE); diff --git a/src/backend/cpu/platform/HwlocCpuInfo.h b/src/backend/cpu/platform/HwlocCpuInfo.h index 8d626a8a..a6768066 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.h +++ b/src/backend/cpu/platform/HwlocCpuInfo.h @@ -38,6 +38,7 @@ public: HwlocCpuInfo(); protected: + inline const char *backend() const override { return m_backend; } inline size_t cores() const override { return m_cores; } inline size_t L2() const override { return m_cache[2]; } inline size_t L3() const override { return m_cache[3]; } @@ -45,6 +46,7 @@ protected: inline size_t packages() const override { return m_packages; } private: + char m_backend[20]; size_t m_cache[5]; size_t m_cores = 0; size_t m_nodes = 0; diff --git a/src/base/kernel/config/BaseConfig.cpp b/src/base/kernel/config/BaseConfig.cpp index c89b032a..4600a1f4 100644 --- a/src/base/kernel/config/BaseConfig.cpp +++ b/src/base/kernel/config/BaseConfig.cpp @@ -35,7 +35,7 @@ #endif #ifdef XMRIG_FEATURE_HWLOC -# include +# include "backend/cpu/Cpu.h" #endif #ifdef XMRIG_AMD_PROJECT @@ -110,17 +110,8 @@ void xmrig::BaseConfig::printVersions() } # endif - # if defined(XMRIG_FEATURE_HWLOC) -# if defined(HWLOC_VERSION) - snprintf(buf, sizeof buf, "hwloc/%s ", HWLOC_VERSION); -# elif HWLOC_API_VERSION >= 0x20000 - snprintf(buf, sizeof buf, "hwloc/2 "); -# else - snprintf(buf, sizeof buf, "hwloc/1 "); -# endif - - libs += buf; + libs += Cpu::info()->backend(); # endif Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13slibuv/%s %s"), "LIBS", uv_version_string(), libs.c_str()); From 02d7c2f977c1fead92d561585b1e1a07708d0fc0 Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 24 Jul 2019 03:24:01 +0700 Subject: [PATCH 070/172] Implemented autoconfig via hwloc. --- src/backend/cpu/platform/HwlocCpuInfo.cpp | 153 ++++++++++++++++++++-- src/backend/cpu/platform/HwlocCpuInfo.h | 7 + 2 files changed, 150 insertions(+), 10 deletions(-) diff --git a/src/backend/cpu/platform/HwlocCpuInfo.cpp b/src/backend/cpu/platform/HwlocCpuInfo.cpp index 156f3ae4..cdfbdc59 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.cpp +++ b/src/backend/cpu/platform/HwlocCpuInfo.cpp @@ -32,7 +32,7 @@ namespace xmrig { -inline bool isCacheObject(hwloc_obj_t obj) +static inline bool isCacheObject(hwloc_obj_t obj) { # if HWLOC_API_VERSION >= 0x20000 return hwloc_obj_type_is_cache(obj->type); @@ -43,23 +43,38 @@ inline bool isCacheObject(hwloc_obj_t obj) template -inline void findCache(hwloc_obj_t obj, func lambda) +static inline void findCache(hwloc_obj_t obj, unsigned min, unsigned max, func lambda) { for (size_t i = 0; i < obj->arity; i++) { if (isCacheObject(obj->children[i])) { - if (obj->children[i]->attr->cache.depth < 2) { + const unsigned depth = obj->children[i]->attr->cache.depth; + if (depth < min || depth > max) { continue; } lambda(obj->children[i]); } - findCache(obj->children[i], lambda); + findCache(obj->children[i], min, max, lambda); } } -inline size_t countByType(hwloc_topology_t topology, hwloc_obj_type_t type) +template +static inline void findByType(hwloc_obj_t obj, hwloc_obj_type_t type, func lambda) +{ + for (size_t i = 0; i < obj->arity; i++) { + if (obj->children[i]->type == type) { + lambda(obj->children[i]); + } + else { + findByType(obj->children[i], type, lambda); + } + } +} + + +static inline size_t countByType(hwloc_topology_t topology, hwloc_obj_type_t type) { const int count = hwloc_get_nbobjs_by_type(topology, type); @@ -67,6 +82,22 @@ inline size_t countByType(hwloc_topology_t topology, hwloc_obj_type_t type) } +static inline size_t countByType(hwloc_obj_t obj, hwloc_obj_type_t type) +{ + size_t count = 0; + findByType(obj, type, [&count](hwloc_obj_t) { count++; }); + + return count; +} + + +static inline bool isCacheExclusive(hwloc_obj_t obj) +{ + const char *value = hwloc_obj_get_info_by_name(obj, "Inclusive"); + return value == nullptr || value[0] != '1'; +} + + } // namespace xmrig @@ -83,11 +114,7 @@ xmrig::HwlocCpuInfo::HwlocCpuInfo() : BasicCpuInfo(), hwloc_obj_t root = hwloc_get_root_obj(topology); snprintf(m_backend, sizeof m_backend, "hwloc/%s", hwloc_obj_get_info_by_name(root, "hwlocVersion")); - findCache(root, [this](hwloc_obj_t found) { - const unsigned depth = found->attr->cache.depth; - - this->m_cache[depth] += found->attr->cache.size; - }); + findCache(root, 2, 3, [this](hwloc_obj_t found) { this->m_cache[found->attr->cache.depth] += found->attr->cache.size; }); m_threads = countByType(topology, HWLOC_OBJ_PU); m_cores = countByType(topology, HWLOC_OBJ_CORE); @@ -96,3 +123,109 @@ xmrig::HwlocCpuInfo::HwlocCpuInfo() : BasicCpuInfo(), hwloc_topology_destroy(topology); } + + +xmrig::CpuThreads xmrig::HwlocCpuInfo::threads(const Algorithm &algorithm) const +{ + if (L2() == 0 && L3() == 0) { + return BasicCpuInfo::threads(algorithm); + } + + hwloc_topology_t topology; + hwloc_topology_init(&topology); + hwloc_topology_load(topology); + + const unsigned depth = L3() > 0 ? 3 : 2; + + CpuThreads threads; + threads.reserve(m_threads); + + std::vector caches; + caches.reserve(16); + + findCache(hwloc_get_root_obj(topology), depth, depth, [&caches](hwloc_obj_t found) { caches.emplace_back(found); }); + + for (hwloc_obj_t cache : caches) { + processTopLevelCache(cache, algorithm, threads); + } + + hwloc_topology_destroy(topology); + + return threads; +} + + +void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorithm &algorithm, CpuThreads &threads) const +{ + size_t PUs = countByType(cache, HWLOC_OBJ_PU); + if (PUs == 0) { + return; + } + + size_t size = cache->attr->cache.size; + const size_t scratchpad = algorithm.memory(); + + if (cache->attr->cache.depth == 3 && isCacheExclusive(cache)) { + for (size_t i = 0; i < cache->arity; ++i) { + hwloc_obj_t l2 = cache->children[i]; + if (isCacheObject(l2) && l2->attr != nullptr && l2->attr->cache.size >= scratchpad) { + size += scratchpad; + } + } + } + + std::vector cores; + cores.reserve(m_cores); + findByType(cache, HWLOC_OBJ_CORE, [&cores](hwloc_obj_t found) { cores.emplace_back(found); }); + + size_t cacheHashes = (size + (scratchpad / 2)) / scratchpad; + +# ifdef XMRIG_ALGO_CN_GPU + if (algorithm == Algorithm::CN_GPU) { + cacheHashes = PUs; + } +# endif + + if (cacheHashes >= PUs) { + for (hwloc_obj_t core : cores) { + if (core->arity == 0) { + continue; + } + + for (unsigned i = 0; i < core->arity; ++i) { + if (core->children[i]->type == HWLOC_OBJ_PU) { + threads.push_back(CpuThread(1, core->children[i]->os_index)); + } + } + } + + return; + } + + size_t pu_id = 0; + while (cacheHashes > 0 && PUs > 0) { + bool allocated_pu = false; + + for (hwloc_obj_t core : cores) { + if (core->arity <= pu_id || core->children[pu_id]->type != HWLOC_OBJ_PU) { + continue; + } + + cacheHashes--; + PUs--; + + allocated_pu = true; + threads.push_back(CpuThread(1, core->children[pu_id]->os_index)); + + if (cacheHashes == 0) { + break; + } + } + + if (!allocated_pu) { + break; + } + + pu_id++; + } +} diff --git a/src/backend/cpu/platform/HwlocCpuInfo.h b/src/backend/cpu/platform/HwlocCpuInfo.h index a6768066..a6cd6b55 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.h +++ b/src/backend/cpu/platform/HwlocCpuInfo.h @@ -29,6 +29,9 @@ #include "backend/cpu/platform/BasicCpuInfo.h" +typedef struct hwloc_obj *hwloc_obj_t; + + namespace xmrig { @@ -38,6 +41,8 @@ public: HwlocCpuInfo(); protected: + CpuThreads threads(const Algorithm &algorithm) const override; + inline const char *backend() const override { return m_backend; } inline size_t cores() const override { return m_cores; } inline size_t L2() const override { return m_cache[2]; } @@ -46,6 +51,8 @@ protected: inline size_t packages() const override { return m_packages; } private: + void processTopLevelCache(hwloc_obj_t obj, const Algorithm &algorithm, CpuThreads &threads) const; + char m_backend[20]; size_t m_cache[5]; size_t m_cores = 0; From fc58795e8fcbe2ea735bd450156139137a781646 Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 24 Jul 2019 06:11:54 +0700 Subject: [PATCH 071/172] Fixed cmake. --- CMakeLists.txt | 2 +- src/backend/cpu/cpu.cmake | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8ae89502..ca1292e4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -258,4 +258,4 @@ if (WITH_DEBUG_LOG) endif() add_executable(${CMAKE_PROJECT_NAME} ${HEADERS} ${SOURCES} ${SOURCES_OS} ${SOURCES_CPUID} ${HEADERS_CRYPTO} ${SOURCES_CRYPTO} ${SOURCES_SYSLOG} ${HTTP_SOURCES} ${TLS_SOURCES} ${XMRIG_ASM_SOURCES} ${CN_GPU_SOURCES}) -target_link_libraries(${CMAKE_PROJECT_NAME} ${XMRIG_ASM_LIBRARY} ${OPENSSL_LIBRARIES} ${UV_LIBRARIES} ${EXTRA_LIBS} ${CPUID_LIB}) +target_link_libraries(${CMAKE_PROJECT_NAME} ${XMRIG_ASM_LIBRARY} ${OPENSSL_LIBRARIES} ${UV_LIBRARIES} ${EXTRA_LIBS} ${CPUID_LIB} ${HWLOC_LIBRARY}) diff --git a/src/backend/cpu/cpu.cmake b/src/backend/cpu/cpu.cmake index a8bbd8e8..f0b1a486 100644 --- a/src/backend/cpu/cpu.cmake +++ b/src/backend/cpu/cpu.cmake @@ -22,11 +22,11 @@ if (WITH_HWLOC) set(WITH_LIBCPUID OFF) include_directories(${HWLOC_INCLUDE_DIR}) - set(CPUID_LIB ${HWLOC_LIBRARY}) remove_definitions(/DXMRIG_FEATURE_LIBCPUID) add_definitions(/DXMRIG_FEATURE_HWLOC) + set(CPUID_LIB "") set(SOURCES_CPUID src/backend/cpu/platform/BasicCpuInfo.cpp src/backend/cpu/platform/BasicCpuInfo.h From 107f378f7cd3909cb282d584581d81c7f526d684 Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 24 Jul 2019 06:35:42 +0700 Subject: [PATCH 072/172] Fixed cmake again. --- CMakeLists.txt | 10 ++++------ src/backend/cpu/cpu.cmake | 2 ++ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ca1292e4..fd2608b3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,6 +16,10 @@ option(BUILD_STATIC "Build static binary" OFF) option(ARM_TARGET "Force use specific ARM target 8 or 7" 0) option(WITH_EMBEDDED_CONFIG "Enable internal embedded JSON config" OFF) + +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake") + + include (CheckIncludeFile) include (cmake/cpu.cmake) include (src/base/base.cmake) @@ -143,14 +147,8 @@ endif() add_definitions(/D__STDC_FORMAT_MACROS) add_definitions(/DUNICODE) -set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake") - find_package(UV REQUIRED) -if (WITH_HWLOC) - find_package(HWLOC REQUIRED) -endif() - if (WITH_RANDOMX) include_directories(src/crypto/randomx) add_definitions(/DXMRIG_ALGO_RANDOMX) diff --git a/src/backend/cpu/cpu.cmake b/src/backend/cpu/cpu.cmake index f0b1a486..dab30310 100644 --- a/src/backend/cpu/cpu.cmake +++ b/src/backend/cpu/cpu.cmake @@ -19,6 +19,8 @@ set(SOURCES_BACKEND_CPU if (WITH_HWLOC) + find_package(HWLOC REQUIRED) + set(WITH_LIBCPUID OFF) include_directories(${HWLOC_INCLUDE_DIR}) From 39948484bde8721ea48d5b2bd4b17f367d09fcdd Mon Sep 17 00:00:00 2001 From: XMRig Date: Thu, 25 Jul 2019 06:42:19 +0700 Subject: [PATCH 073/172] #1072 Fixed RandomX seed re-initialization. --- src/backend/cpu/CpuWorker.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/backend/cpu/CpuWorker.cpp b/src/backend/cpu/CpuWorker.cpp index 8eb4cdb1..b391654d 100644 --- a/src/backend/cpu/CpuWorker.cpp +++ b/src/backend/cpu/CpuWorker.cpp @@ -81,8 +81,9 @@ xmrig::CpuWorker::~CpuWorker() template void xmrig::CpuWorker::allocateRandomX_VM() { + RxDataset *dataset = Rx::dataset(m_job.currentJob().seedHash(), m_job.currentJob().algorithm()); + if (!m_vm) { - RxDataset *dataset = Rx::dataset(m_job.currentJob().seedHash(), m_job.currentJob().algorithm()); m_vm = new RxVm(dataset, m_memory->scratchpad(), !m_hwAES); } } From 7200d754c226a29e5489fd298071a5285ef6943f Mon Sep 17 00:00:00 2001 From: xmrig Date: Thu, 25 Jul 2019 06:44:36 +0700 Subject: [PATCH 074/172] Update CHANGELOG.md --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f6778ac..11cc23d8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,6 @@ +# v2.99.1-beta +- [#1072](https://github.com/xmrig/xmrig/issues/1072) Fixed RandomX `seed_hash` re-initialization. + # v2.99.0-beta - [#1050](https://github.com/xmrig/xmrig/pull/1050) Added RandomXL algorithm for [Loki](https://loki.network/), algorithm name used by miner is `randomx/loki` or `rx/loki`. - Added [flexible](https://github.com/xmrig/xmrig/blob/evo/doc/CPU.md) multi algorithm configuration. From a007fae8f17a759f7339b0c1bb226aada436fbf3 Mon Sep 17 00:00:00 2001 From: XMRig Date: Thu, 25 Jul 2019 07:04:50 +0700 Subject: [PATCH 075/172] v2.99.1-beta --- src/version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/version.h b/src/version.h index ef5d82e0..9f66400d 100644 --- a/src/version.h +++ b/src/version.h @@ -28,7 +28,7 @@ #define APP_ID "xmrig" #define APP_NAME "XMRig" #define APP_DESC "XMRig CPU miner" -#define APP_VERSION "2.99.1-evo" +#define APP_VERSION "2.99.1-beta" #define APP_DOMAIN "xmrig.com" #define APP_SITE "www.xmrig.com" #define APP_COPYRIGHT "Copyright (C) 2016-2019 xmrig.com" From f8108cf6bc29a2bcaa26ae6a281cc4c66a1621ea Mon Sep 17 00:00:00 2001 From: XMRig Date: Thu, 25 Jul 2019 08:23:32 +0700 Subject: [PATCH 076/172] v2.99.2-evo --- src/version.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/version.h b/src/version.h index 9f66400d..d1dfc781 100644 --- a/src/version.h +++ b/src/version.h @@ -28,7 +28,7 @@ #define APP_ID "xmrig" #define APP_NAME "XMRig" #define APP_DESC "XMRig CPU miner" -#define APP_VERSION "2.99.1-beta" +#define APP_VERSION "2.99.2-evo" #define APP_DOMAIN "xmrig.com" #define APP_SITE "www.xmrig.com" #define APP_COPYRIGHT "Copyright (C) 2016-2019 xmrig.com" @@ -36,7 +36,7 @@ #define APP_VER_MAJOR 2 #define APP_VER_MINOR 99 -#define APP_VER_PATCH 1 +#define APP_VER_PATCH 2 #ifdef _MSC_VER # if (_MSC_VER >= 1920) From 2876702ea26d29454728877e76e9ce585bb69113 Mon Sep 17 00:00:00 2001 From: XMRig Date: Thu, 25 Jul 2019 11:24:27 +0700 Subject: [PATCH 077/172] Added cmake option HWLOC_DEBUG. --- CMakeLists.txt | 4 +- src/backend/cpu/cpu.cmake | 4 ++ src/backend/cpu/platform/BasicCpuInfo.cpp | 2 +- src/backend/cpu/platform/BasicCpuInfo.h | 2 +- src/backend/cpu/platform/HwlocCpuInfo.cpp | 57 ++++++++++++++++------- src/backend/cpu/platform/HwlocCpuInfo.h | 3 ++ 6 files changed, 53 insertions(+), 19 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index fd2608b3..5ef60842 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -12,9 +12,11 @@ option(WITH_HTTP "HTTP protocol support (client/server)" ON) option(WITH_DEBUG_LOG "Enable debug log output" OFF) option(WITH_TLS "Enable OpenSSL support" ON) option(WITH_ASM "Enable ASM PoW implementations" ON) +option(WITH_EMBEDDED_CONFIG "Enable internal embedded JSON config" OFF) + option(BUILD_STATIC "Build static binary" OFF) option(ARM_TARGET "Force use specific ARM target 8 or 7" 0) -option(WITH_EMBEDDED_CONFIG "Enable internal embedded JSON config" OFF) +option(HWLOC_DEBUG "Enable hwloc debug helpers and log" OFF) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake") diff --git a/src/backend/cpu/cpu.cmake b/src/backend/cpu/cpu.cmake index dab30310..b685d7e4 100644 --- a/src/backend/cpu/cpu.cmake +++ b/src/backend/cpu/cpu.cmake @@ -28,6 +28,10 @@ if (WITH_HWLOC) remove_definitions(/DXMRIG_FEATURE_LIBCPUID) add_definitions(/DXMRIG_FEATURE_HWLOC) + if (HWLOC_DEBUG) + add_definitions(/DXMRIG_HWLOC_DEBUG) + endif() + set(CPUID_LIB "") set(SOURCES_CPUID src/backend/cpu/platform/BasicCpuInfo.cpp diff --git a/src/backend/cpu/platform/BasicCpuInfo.cpp b/src/backend/cpu/platform/BasicCpuInfo.cpp index 4bbc8ba0..2b63edba 100644 --- a/src/backend/cpu/platform/BasicCpuInfo.cpp +++ b/src/backend/cpu/platform/BasicCpuInfo.cpp @@ -141,10 +141,10 @@ static inline bool has_avx2() xmrig::BasicCpuInfo::BasicCpuInfo() : + m_brand(), m_threads(std::thread::hardware_concurrency()), m_assembly(Assembly::NONE), m_aes(has_aes_ni()), - m_brand(), m_avx2(has_avx2()) { cpu_brand_string(m_brand); diff --git a/src/backend/cpu/platform/BasicCpuInfo.h b/src/backend/cpu/platform/BasicCpuInfo.h index 8c548559..6cf25714 100644 --- a/src/backend/cpu/platform/BasicCpuInfo.h +++ b/src/backend/cpu/platform/BasicCpuInfo.h @@ -53,12 +53,12 @@ protected: inline size_t threads() const override { return m_threads; } protected: + char m_brand[64 + 6]; size_t m_threads; private: Assembly m_assembly; bool m_aes; - char m_brand[64 + 6]; const bool m_avx2; }; diff --git a/src/backend/cpu/platform/HwlocCpuInfo.cpp b/src/backend/cpu/platform/HwlocCpuInfo.cpp index cdfbdc59..13ff1ee6 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.cpp +++ b/src/backend/cpu/platform/HwlocCpuInfo.cpp @@ -23,6 +23,12 @@ */ +#ifdef XMRIG_HWLOC_DEBUG +# include +#endif + + +#include #include @@ -107,21 +113,46 @@ xmrig::HwlocCpuInfo::HwlocCpuInfo() : BasicCpuInfo(), { m_threads = 0; - hwloc_topology_t topology; - hwloc_topology_init(&topology); - hwloc_topology_load(topology); + hwloc_topology_init(&m_topology); + hwloc_topology_load(m_topology); - hwloc_obj_t root = hwloc_get_root_obj(topology); +# ifdef XMRIG_HWLOC_DEBUG +# if defined(UV_VERSION_HEX) && UV_VERSION_HEX >= 0x010c00 + { + char env[520] = { 0 }; + size_t size = sizeof(env); + + if (uv_os_getenv("HWLOC_XMLFILE", env, &size) == 0) { + printf("use HWLOC XML file: \"%s\"\n", env); + } + } +# endif + + std::vector packages; + findByType(hwloc_get_root_obj(m_topology), HWLOC_OBJ_PACKAGE, [&packages](hwloc_obj_t found) { packages.emplace_back(found); }); + if (packages.size()) { + const char *value = hwloc_obj_get_info_by_name(packages[0], "CPUModel"); + if (value) { + strncpy(m_brand, value, 64); + } + } +# endif + + hwloc_obj_t root = hwloc_get_root_obj(m_topology); snprintf(m_backend, sizeof m_backend, "hwloc/%s", hwloc_obj_get_info_by_name(root, "hwlocVersion")); findCache(root, 2, 3, [this](hwloc_obj_t found) { this->m_cache[found->attr->cache.depth] += found->attr->cache.size; }); - m_threads = countByType(topology, HWLOC_OBJ_PU); - m_cores = countByType(topology, HWLOC_OBJ_CORE); - m_nodes = countByType(topology, HWLOC_OBJ_NUMANODE); - m_packages = countByType(topology, HWLOC_OBJ_PACKAGE); + m_threads = countByType(m_topology, HWLOC_OBJ_PU); + m_cores = countByType(m_topology, HWLOC_OBJ_CORE); + m_nodes = std::max(countByType(m_topology, HWLOC_OBJ_NUMANODE), 1); + m_packages = countByType(m_topology, HWLOC_OBJ_PACKAGE); +} - hwloc_topology_destroy(topology); + +xmrig::HwlocCpuInfo::~HwlocCpuInfo() +{ + hwloc_topology_destroy(m_topology); } @@ -131,10 +162,6 @@ xmrig::CpuThreads xmrig::HwlocCpuInfo::threads(const Algorithm &algorithm) const return BasicCpuInfo::threads(algorithm); } - hwloc_topology_t topology; - hwloc_topology_init(&topology); - hwloc_topology_load(topology); - const unsigned depth = L3() > 0 ? 3 : 2; CpuThreads threads; @@ -143,14 +170,12 @@ xmrig::CpuThreads xmrig::HwlocCpuInfo::threads(const Algorithm &algorithm) const std::vector caches; caches.reserve(16); - findCache(hwloc_get_root_obj(topology), depth, depth, [&caches](hwloc_obj_t found) { caches.emplace_back(found); }); + findCache(hwloc_get_root_obj(m_topology), depth, depth, [&caches](hwloc_obj_t found) { caches.emplace_back(found); }); for (hwloc_obj_t cache : caches) { processTopLevelCache(cache, algorithm, threads); } - hwloc_topology_destroy(topology); - return threads; } diff --git a/src/backend/cpu/platform/HwlocCpuInfo.h b/src/backend/cpu/platform/HwlocCpuInfo.h index a6cd6b55..4ba0f2fb 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.h +++ b/src/backend/cpu/platform/HwlocCpuInfo.h @@ -30,6 +30,7 @@ typedef struct hwloc_obj *hwloc_obj_t; +typedef struct hwloc_topology *hwloc_topology_t; namespace xmrig { @@ -39,6 +40,7 @@ class HwlocCpuInfo : public BasicCpuInfo { public: HwlocCpuInfo(); + ~HwlocCpuInfo() override; protected: CpuThreads threads(const Algorithm &algorithm) const override; @@ -54,6 +56,7 @@ private: void processTopLevelCache(hwloc_obj_t obj, const Algorithm &algorithm, CpuThreads &threads) const; char m_backend[20]; + hwloc_topology_t m_topology; size_t m_cache[5]; size_t m_cores = 0; size_t m_nodes = 0; From 4a32494060990f3e30fef5f5c445f866cba635c1 Mon Sep 17 00:00:00 2001 From: XMRig Date: Thu, 25 Jul 2019 12:20:59 +0700 Subject: [PATCH 078/172] Added option "init-threads". --- src/backend/cpu/CpuConfig.cpp | 16 ++++++++++++++-- src/backend/cpu/CpuConfig.h | 1 + src/crypto/rx/Rx.cpp | 20 +++++++++++++++----- src/crypto/rx/Rx.h | 1 + 4 files changed, 31 insertions(+), 7 deletions(-) diff --git a/src/backend/cpu/CpuConfig.cpp b/src/backend/cpu/CpuConfig.cpp index 4c86ceea..039c65f4 100644 --- a/src/backend/cpu/CpuConfig.cpp +++ b/src/backend/cpu/CpuConfig.cpp @@ -29,12 +29,18 @@ #include "rapidjson/document.h" +#ifdef XMRIG_ALGO_RANDOMX +# include "crypto/rx/Rx.h" +#endif + + namespace xmrig { static const char *kCn = "cn"; static const char *kEnabled = "enabled"; static const char *kHugePages = "huge-pages"; static const char *kHwAes = "hw-aes"; +static const char *kInitThreads = "init-threads"; static const char *kPriority = "priority"; #ifdef XMRIG_FEATURE_ASM @@ -89,6 +95,7 @@ rapidjson::Value xmrig::CpuConfig::toJSON(rapidjson::Document &doc) const obj.AddMember(StringRef(kHugePages), m_hugePages, allocator); obj.AddMember(StringRef(kHwAes), m_aes == AES_AUTO ? Value(kNullType) : Value(m_aes == AES_HW), allocator); obj.AddMember(StringRef(kPriority), priority() != -1 ? Value(priority()) : Value(kNullType), allocator); + obj.AddMember(StringRef(kInitThreads), m_initThreads, allocator); # ifdef XMRIG_FEATURE_ASM obj.AddMember(StringRef(kAsm), m_assembly.toJSON(), allocator); @@ -122,8 +129,9 @@ std::vector xmrig::CpuConfig::get(const Miner *miner, cons void xmrig::CpuConfig::read(const rapidjson::Value &value) { if (value.IsObject()) { - m_enabled = Json::getBool(value, kEnabled, m_enabled); - m_hugePages = Json::getBool(value, kHugePages, m_hugePages); + m_enabled = Json::getBool(value, kEnabled, m_enabled); + m_hugePages = Json::getBool(value, kHugePages, m_hugePages); + m_initThreads = Json::getInt(value, kInitThreads, m_initThreads); setAesMode(Json::getValue(value, kHwAes)); setPriority(Json::getInt(value, kPriority, -1)); @@ -132,6 +140,10 @@ void xmrig::CpuConfig::read(const rapidjson::Value &value) m_assembly = Json::getValue(value, kAsm); # endif +# ifdef XMRIG_ALGO_RANDOMX + Rx::setInitThreads(m_initThreads); +# endif + if (!m_threads.read(value)) { generate(); } diff --git a/src/backend/cpu/CpuConfig.h b/src/backend/cpu/CpuConfig.h index 5b2f3f86..0c44972f 100644 --- a/src/backend/cpu/CpuConfig.h +++ b/src/backend/cpu/CpuConfig.h @@ -69,6 +69,7 @@ private: bool m_enabled = true; bool m_hugePages = true; bool m_shouldSave = false; + int m_initThreads = -1; int m_priority = -1; Threads m_threads; }; diff --git a/src/crypto/rx/Rx.cpp b/src/crypto/rx/Rx.cpp index 4125d81f..6f208596 100644 --- a/src/crypto/rx/Rx.cpp +++ b/src/crypto/rx/Rx.cpp @@ -61,8 +61,8 @@ public: inline void unlock() { uv_mutex_unlock(&mutex); } - RxDataset *dataset = nullptr; - uint32_t initThreads = std::thread::hardware_concurrency(); + int initThreads = -1; + RxDataset *dataset = nullptr; uv_mutex_t mutex; }; @@ -121,13 +121,15 @@ xmrig::RxDataset *xmrig::Rx::dataset(const uint8_t *seed, const Algorithm &algor } if (!d_ptr->dataset->isReady(seed, algorithm)) { - const uint64_t ts = Chrono::steadyMSecs(); + const uint64_t ts = Chrono::steadyMSecs(); + const uint32_t threads = d_ptr->initThreads < 1 ? static_cast(Cpu::info()->threads()) + : static_cast(d_ptr->initThreads); if (d_ptr->dataset->get() != nullptr) { LOG_INFO("%s" MAGENTA_BOLD(" init dataset") " algo " WHITE_BOLD("%s (") CYAN_BOLD("%u") WHITE_BOLD(" threads)") BLACK_BOLD(" seed %s..."), tag, algorithm.shortName(), - d_ptr->initThreads, + threads, Buffer::toHex(seed, 8).data() ); } @@ -139,7 +141,7 @@ xmrig::RxDataset *xmrig::Rx::dataset(const uint8_t *seed, const Algorithm &algor ); } - d_ptr->dataset->init(seed, algorithm, d_ptr->initThreads); + d_ptr->dataset->init(seed, algorithm, threads); LOG_INFO("%s" GREEN(" init done") BLACK_BOLD(" (%" PRIu64 " ms)"), tag, Chrono::steadyMSecs() - ts); } @@ -151,6 +153,14 @@ xmrig::RxDataset *xmrig::Rx::dataset(const uint8_t *seed, const Algorithm &algor } +void xmrig::Rx::setInitThreads(int count) +{ + d_ptr->lock(); + d_ptr->initThreads = count; + d_ptr->unlock(); +} + + void xmrig::Rx::stop() { delete d_ptr; diff --git a/src/crypto/rx/Rx.h b/src/crypto/rx/Rx.h index 63bb2e14..1758c08e 100644 --- a/src/crypto/rx/Rx.h +++ b/src/crypto/rx/Rx.h @@ -44,6 +44,7 @@ class Rx public: static RxDataset *dataset(); static RxDataset *dataset(const uint8_t *seed, const Algorithm &algorithm, bool hugePages = true); + static void setInitThreads(int count); static void stop(); }; From d4772cbd5dcc4c56515d6f6e1224c222d47952cc Mon Sep 17 00:00:00 2001 From: XMRig Date: Thu, 25 Jul 2019 19:11:07 +0700 Subject: [PATCH 079/172] Fixed dataset initialization speed on Linux if thread affinity was used. --- src/backend/common/Worker.cpp | 1 + src/backend/common/Worker.h | 1 + src/backend/cpu/CpuBackend.cpp | 2 +- src/backend/cpu/CpuConfig.cpp | 9 --- src/backend/cpu/CpuConfig.h | 1 + src/backend/cpu/CpuWorker.cpp | 10 +++- src/core/Miner.cpp | 7 +++ src/crypto/rx/Rx.cpp | 104 +++++++++++++++++++++------------ src/crypto/rx/Rx.h | 11 +++- 9 files changed, 95 insertions(+), 51 deletions(-) diff --git a/src/backend/common/Worker.cpp b/src/backend/common/Worker.cpp index 98da61d4..5676f2c4 100644 --- a/src/backend/common/Worker.cpp +++ b/src/backend/common/Worker.cpp @@ -30,6 +30,7 @@ xmrig::Worker::Worker(size_t id, int64_t affinity, int priority) : + m_affinity(affinity), m_id(id), m_hashCount(0), m_timestamp(0), diff --git a/src/backend/common/Worker.h b/src/backend/common/Worker.h index faebf128..a601ab05 100644 --- a/src/backend/common/Worker.h +++ b/src/backend/common/Worker.h @@ -50,6 +50,7 @@ public: protected: void storeStats(); + const int64_t m_affinity; const size_t m_id; std::atomic m_hashCount; std::atomic m_timestamp; diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp index ffc21597..2fc34b59 100644 --- a/src/backend/cpu/CpuBackend.cpp +++ b/src/backend/cpu/CpuBackend.cpp @@ -295,7 +295,7 @@ rapidjson::Value xmrig::CpuBackend::toJSON(rapidjson::Document &doc) const # ifdef XMRIG_ALGO_RANDOMX if (d_ptr->algo.family() == Algorithm::RANDOM_X) { - RxDataset *dataset = Rx::dataset(); + RxDataset *dataset = Rx::dataset(-1); // FIXME if (dataset) { const auto rxPages = dataset->hugePages(); pages[0] += rxPages.first; diff --git a/src/backend/cpu/CpuConfig.cpp b/src/backend/cpu/CpuConfig.cpp index 039c65f4..ba66a45f 100644 --- a/src/backend/cpu/CpuConfig.cpp +++ b/src/backend/cpu/CpuConfig.cpp @@ -29,11 +29,6 @@ #include "rapidjson/document.h" -#ifdef XMRIG_ALGO_RANDOMX -# include "crypto/rx/Rx.h" -#endif - - namespace xmrig { static const char *kCn = "cn"; @@ -140,10 +135,6 @@ void xmrig::CpuConfig::read(const rapidjson::Value &value) m_assembly = Json::getValue(value, kAsm); # endif -# ifdef XMRIG_ALGO_RANDOMX - Rx::setInitThreads(m_initThreads); -# endif - if (!m_threads.read(value)) { generate(); } diff --git a/src/backend/cpu/CpuConfig.h b/src/backend/cpu/CpuConfig.h index 0c44972f..fc06bab9 100644 --- a/src/backend/cpu/CpuConfig.h +++ b/src/backend/cpu/CpuConfig.h @@ -56,6 +56,7 @@ public: inline bool isShouldSave() const { return m_shouldSave; } inline const Assembly &assembly() const { return m_assembly; } inline const Threads &threads() const { return m_threads; } + inline int initThreads() const { return m_initThreads; } inline int priority() const { return m_priority; } private: diff --git a/src/backend/cpu/CpuWorker.cpp b/src/backend/cpu/CpuWorker.cpp index b391654d..28642cd7 100644 --- a/src/backend/cpu/CpuWorker.cpp +++ b/src/backend/cpu/CpuWorker.cpp @@ -81,7 +81,15 @@ xmrig::CpuWorker::~CpuWorker() template void xmrig::CpuWorker::allocateRandomX_VM() { - RxDataset *dataset = Rx::dataset(m_job.currentJob().seedHash(), m_job.currentJob().algorithm()); + while (!Rx::isReady(m_job.currentJob(), m_affinity)) { + std::this_thread::sleep_for(std::chrono::milliseconds(200)); + + if (Nonce::sequence(Nonce::CPU) == 0) { + break; + } + } + + RxDataset *dataset = Rx::dataset(m_affinity); if (!m_vm) { m_vm = new RxVm(dataset, m_memory->scratchpad(), !m_hwAES); diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index 526c4fbf..42f4247e 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -24,6 +24,7 @@ #include +#include #include @@ -38,6 +39,7 @@ #include "core/Controller.h" #include "core/Miner.h" #include "crypto/common/Nonce.h" +#include "crypto/rx/Rx.h" #include "rapidjson/document.h" #include "version.h" @@ -362,6 +364,11 @@ void xmrig::Miner::setJob(const Job &job, bool donate) d_ptr->userJobId = job.id(); } +# ifdef XMRIG_ALGO_RANDOMX + const CpuConfig &cpu = d_ptr->controller->config()->cpu(); + Rx::init(job, cpu.initThreads(), cpu.isHugePages()); +# endif + uv_rwlock_wrunlock(&d_ptr->rwlock); d_ptr->handleJobChange(reset); diff --git a/src/crypto/rx/Rx.cpp b/src/crypto/rx/Rx.cpp index 6f208596..88c9fbb2 100644 --- a/src/crypto/rx/Rx.cpp +++ b/src/crypto/rx/Rx.cpp @@ -31,6 +31,7 @@ #include "backend/cpu/Cpu.h" #include "base/io/log/Log.h" +#include "base/net/stratum/Job.h" #include "base/tools/Buffer.h" #include "base/tools/Chrono.h" #include "crypto/rx/Rx.h" @@ -52,7 +53,10 @@ public: inline ~RxPrivate() { - delete dataset; + for (RxDataset *dataset : datasets) { + delete dataset; + } + uv_mutex_destroy(&mutex); } @@ -61,35 +65,82 @@ public: inline void unlock() { uv_mutex_unlock(&mutex); } - int initThreads = -1; - RxDataset *dataset = nullptr; + std::vector datasets; uv_mutex_t mutex; }; static RxPrivate *d_ptr = new RxPrivate(); -static const char *tag = BLUE_BG(" rx "); +static const char *tag = BLUE_BG(WHITE_BOLD_S " rx "); } // namespace xmrig -xmrig::RxDataset *xmrig::Rx::dataset() +bool xmrig::Rx::isReady(const Job &job, int64_t) { d_ptr->lock(); - RxDataset *dataset = d_ptr->dataset; + const bool rc = isReady(job.seedHash(), job.algorithm()); + d_ptr->unlock(); + + return rc; +} + + + +xmrig::RxDataset *xmrig::Rx::dataset(int64_t) +{ + d_ptr->lock(); + RxDataset *dataset = d_ptr->datasets[0]; d_ptr->unlock(); return dataset; } -xmrig::RxDataset *xmrig::Rx::dataset(const uint8_t *seed, const Algorithm &algorithm, bool hugePages) +void xmrig::Rx::init(const Job &job, int initThreads, bool hugePages) +{ + d_ptr->lock(); + if (d_ptr->datasets.empty()) { + d_ptr->datasets.push_back(nullptr); + } + + if (isReady(job.seedHash(), job.algorithm())) { + d_ptr->unlock(); + + return; + } + + const uint32_t threads = initThreads < 1 ? static_cast(Cpu::info()->threads()) + : static_cast(initThreads); + + std::thread thread(initDataset, 0, job.seedHash(), job.algorithm(), threads, hugePages); + thread.detach(); + + d_ptr->unlock(); +} + + +void xmrig::Rx::stop() +{ + delete d_ptr; + + d_ptr = nullptr; +} + + +bool xmrig::Rx::isReady(const uint8_t *seed, const Algorithm &algorithm) +{ + return !d_ptr->datasets.empty() && d_ptr->datasets[0] != nullptr && d_ptr->datasets[0]->isReady(seed, algorithm); +} + + +void xmrig::Rx::initDataset(size_t index, const uint8_t *seed, const Algorithm &algorithm, uint32_t threads, bool hugePages) { d_ptr->lock(); - if (!d_ptr->dataset) { + if (!d_ptr->datasets[index]) { const uint64_t ts = Chrono::steadyMSecs(); LOG_INFO("%s" MAGENTA_BOLD(" allocate") CYAN_BOLD(" %zu MiB") BLACK_BOLD(" (%zu+%zu) for RandomX dataset & cache"), @@ -99,10 +150,10 @@ xmrig::RxDataset *xmrig::Rx::dataset(const uint8_t *seed, const Algorithm &algor RxCache::size() / 1024 / 1024 ); - d_ptr->dataset = new RxDataset(hugePages); + d_ptr->datasets[index] = new RxDataset(hugePages); - if (d_ptr->dataset->get() != nullptr) { - const auto hugePages = d_ptr->dataset->hugePages(); + if (d_ptr->datasets[index]->get() != nullptr) { + const auto hugePages = d_ptr->datasets[index]->hugePages(); const double percent = hugePages.first == 0 ? 0.0 : static_cast(hugePages.first) / hugePages.second * 100.0; LOG_INFO("%s" GREEN(" allocate done") " huge pages %s%u/%u %1.0f%%" CLEAR " %sJIT" BLACK_BOLD(" (%" PRIu64 " ms)"), @@ -111,7 +162,7 @@ xmrig::RxDataset *xmrig::Rx::dataset(const uint8_t *seed, const Algorithm &algor hugePages.first, hugePages.second, percent, - d_ptr->dataset->cache()->isJIT() ? GREEN_BOLD_S "+" : RED_BOLD_S "-", + d_ptr->datasets[index]->cache()->isJIT() ? GREEN_BOLD_S "+" : RED_BOLD_S "-", Chrono::steadyMSecs() - ts ); } @@ -120,12 +171,10 @@ xmrig::RxDataset *xmrig::Rx::dataset(const uint8_t *seed, const Algorithm &algor } } - if (!d_ptr->dataset->isReady(seed, algorithm)) { - const uint64_t ts = Chrono::steadyMSecs(); - const uint32_t threads = d_ptr->initThreads < 1 ? static_cast(Cpu::info()->threads()) - : static_cast(d_ptr->initThreads); + if (!d_ptr->datasets[index]->isReady(seed, algorithm)) { + const uint64_t ts = Chrono::steadyMSecs(); - if (d_ptr->dataset->get() != nullptr) { + if (d_ptr->datasets[index]->get() != nullptr) { LOG_INFO("%s" MAGENTA_BOLD(" init dataset") " algo " WHITE_BOLD("%s (") CYAN_BOLD("%u") WHITE_BOLD(" threads)") BLACK_BOLD(" seed %s..."), tag, algorithm.shortName(), @@ -141,29 +190,10 @@ xmrig::RxDataset *xmrig::Rx::dataset(const uint8_t *seed, const Algorithm &algor ); } - d_ptr->dataset->init(seed, algorithm, threads); + d_ptr->datasets[index]->init(seed, algorithm, threads); LOG_INFO("%s" GREEN(" init done") BLACK_BOLD(" (%" PRIu64 " ms)"), tag, Chrono::steadyMSecs() - ts); } - RxDataset *dataset = d_ptr->dataset; - d_ptr->unlock(); - - return dataset; -} - - -void xmrig::Rx::setInitThreads(int count) -{ - d_ptr->lock(); - d_ptr->initThreads = count; d_ptr->unlock(); } - - -void xmrig::Rx::stop() -{ - delete d_ptr; - - d_ptr = nullptr; -} diff --git a/src/crypto/rx/Rx.h b/src/crypto/rx/Rx.h index 1758c08e..456dfe4e 100644 --- a/src/crypto/rx/Rx.h +++ b/src/crypto/rx/Rx.h @@ -37,15 +37,20 @@ namespace xmrig class Algorithm; class RxDataset; +class Job; class Rx { public: - static RxDataset *dataset(); - static RxDataset *dataset(const uint8_t *seed, const Algorithm &algorithm, bool hugePages = true); - static void setInitThreads(int count); + static bool isReady(const Job &job, int64_t affinity); + static RxDataset *dataset(int64_t affinity); + static void init(const Job &job, int initThreads, bool hugePages); static void stop(); + +private: + static bool isReady(const uint8_t *seed, const Algorithm &algorithm); + static void initDataset(size_t index, const uint8_t *seed, const Algorithm &algorithm, uint32_t threads, bool hugePages); }; From a5e8b31d55b798fb91b49d05e8416fe2baa519bc Mon Sep 17 00:00:00 2001 From: XMRig Date: Thu, 25 Jul 2019 19:27:12 +0700 Subject: [PATCH 080/172] Fixed crash. --- src/crypto/rx/Rx.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/crypto/rx/Rx.cpp b/src/crypto/rx/Rx.cpp index 88c9fbb2..d6d49c65 100644 --- a/src/crypto/rx/Rx.cpp +++ b/src/crypto/rx/Rx.cpp @@ -101,6 +101,10 @@ xmrig::RxDataset *xmrig::Rx::dataset(int64_t) void xmrig::Rx::init(const Job &job, int initThreads, bool hugePages) { + if (job.algorithm().family() != Algorithm::RANDOM_X) { + return; + } + d_ptr->lock(); if (d_ptr->datasets.empty()) { d_ptr->datasets.push_back(nullptr); From 35b3377d4569ee5e652d122322a00daea1f39f77 Mon Sep 17 00:00:00 2001 From: SChernykh Date: Thu, 25 Jul 2019 16:38:33 +0200 Subject: [PATCH 081/172] Correct buffer size for fillAes4Rx4 More bytes than needed were filled. --- src/crypto/randomx/virtual_machine.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crypto/randomx/virtual_machine.cpp b/src/crypto/randomx/virtual_machine.cpp index 6560dc95..c216de69 100644 --- a/src/crypto/randomx/virtual_machine.cpp +++ b/src/crypto/randomx/virtual_machine.cpp @@ -121,7 +121,7 @@ namespace randomx { template void VmBase::generateProgram(void* seed) { - fillAes4Rx4(seed, sizeof(program), &program); + fillAes4Rx4(seed, 128 + RandomX_CurrentConfig.ProgramSize * 8, &program); } template class VmBase; From ffa6bda1062cfd1be7fc5075fc2912b0979edd2b Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 26 Jul 2019 15:29:06 +0700 Subject: [PATCH 082/172] Added VirtualMemory::bindToNUMANode --- CMakeLists.txt | 1 + src/Summary.cpp | 16 ++++-- src/backend/common/Worker.cpp | 3 ++ src/backend/cpu/platform/HwlocCpuInfo.cpp | 7 +++ src/backend/cpu/platform/HwlocCpuInfo.h | 9 ++++ src/crypto/common/VirtualMemory.cpp | 66 +++++++++++++++++++++++ src/crypto/common/VirtualMemory.h | 1 + 7 files changed, 98 insertions(+), 5 deletions(-) create mode 100644 src/crypto/common/VirtualMemory.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 5ef60842..42dcdb8d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -107,6 +107,7 @@ set(SOURCES_CRYPTO src/crypto/common/Algorithm.cpp src/crypto/common/keccak.cpp src/crypto/common/Nonce.cpp + src/crypto/common/VirtualMemory.cpp ) if (WIN32) diff --git a/src/Summary.cpp b/src/Summary.cpp index 7b22ab34..ab6b7b1e 100644 --- a/src/Summary.cpp +++ b/src/Summary.cpp @@ -62,7 +62,7 @@ inline static const char *asmName(Assembly::Id assembly) static void print_memory(Config *) { # ifdef _WIN32 Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s") "%s", - "HUGE PAGES", VirtualMemory::isHugepagesAvailable() ? GREEN_BOLD("available") : RED_BOLD("unavailable")); + "HUGE PAGES", VirtualMemory::isHugepagesAvailable() ? GREEN_BOLD("permission granted") : RED_BOLD("unavailable")); # endif } @@ -71,22 +71,28 @@ static void print_cpu(Config *) { const ICpuInfo *info = Cpu::info(); - Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s%s (%zu/%zu)") " %sx64 %sAES %sAVX2", + Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s%s (%zu)") " %sx64 %sAES %sAVX2", "CPU", info->brand(), info->packages(), - info->nodes(), info->isX64() ? GREEN_BOLD_S : RED_BOLD_S "-", info->hasAES() ? GREEN_BOLD_S : RED_BOLD_S "-", info->hasAVX2() ? GREEN_BOLD_S : RED_BOLD_S "-" ); # if defined(XMRIG_FEATURE_LIBCPUID) || defined (XMRIG_FEATURE_HWLOC) - Log::print(WHITE_BOLD(" %-13s") BLACK_BOLD("L2:") WHITE_BOLD("%.1f MB") BLACK_BOLD(" L3:") WHITE_BOLD("%.1f MB") BLACK_BOLD(" cores:") CYAN_BOLD("%zu") BLACK_BOLD(" threads:") CYAN_BOLD("%zu"), - "", + Log::print(WHITE_BOLD(" %-13s") BLACK_BOLD("L2:") WHITE_BOLD("%.1f MB") BLACK_BOLD(" L3:") WHITE_BOLD("%.1f MB") + CYAN_BOLD(" %zu") "C" BLACK_BOLD("/") CYAN_BOLD("%zu") "T" +# ifdef XMRIG_FEATURE_HWLOC + BLACK_BOLD(" NUMA:") CYAN_BOLD("%zu") +# endif + , "", info->L2() / 1048576.0, info->L3() / 1048576.0, info->cores(), info->threads() +# ifdef XMRIG_FEATURE_HWLOC + , info->nodes() +# endif ); # else Log::print(WHITE_BOLD(" %-13s") BLACK_BOLD("threads:") CYAN_BOLD("%zu"), diff --git a/src/backend/common/Worker.cpp b/src/backend/common/Worker.cpp index 5676f2c4..92438b39 100644 --- a/src/backend/common/Worker.cpp +++ b/src/backend/common/Worker.cpp @@ -27,6 +27,7 @@ #include "backend/common/Worker.h" #include "base/kernel/Platform.h" #include "base/tools/Chrono.h" +#include "crypto/common/VirtualMemory.h" xmrig::Worker::Worker(size_t id, int64_t affinity, int priority) : @@ -36,7 +37,9 @@ xmrig::Worker::Worker(size_t id, int64_t affinity, int priority) : m_timestamp(0), m_count(0) { + VirtualMemory::bindToNUMANode(affinity); Platform::trySetThreadAffinity(affinity); + Platform::setThreadPriority(priority); } diff --git a/src/backend/cpu/platform/HwlocCpuInfo.cpp b/src/backend/cpu/platform/HwlocCpuInfo.cpp index 13ff1ee6..253d5108 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.cpp +++ b/src/backend/cpu/platform/HwlocCpuInfo.cpp @@ -38,6 +38,9 @@ namespace xmrig { +uint32_t HwlocCpuInfo::m_features = 0; + + static inline bool isCacheObject(hwloc_obj_t obj) { # if HWLOC_API_VERSION >= 0x20000 @@ -147,6 +150,10 @@ xmrig::HwlocCpuInfo::HwlocCpuInfo() : BasicCpuInfo(), m_cores = countByType(m_topology, HWLOC_OBJ_CORE); m_nodes = std::max(countByType(m_topology, HWLOC_OBJ_NUMANODE), 1); m_packages = countByType(m_topology, HWLOC_OBJ_PACKAGE); + + if (nodes() > 1 && hwloc_topology_get_support(m_topology)->membind->set_thisthread_membind) { + m_features |= SET_THISTHREAD_MEMBIND; + } } diff --git a/src/backend/cpu/platform/HwlocCpuInfo.h b/src/backend/cpu/platform/HwlocCpuInfo.h index 4ba0f2fb..a5678fa9 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.h +++ b/src/backend/cpu/platform/HwlocCpuInfo.h @@ -39,9 +39,16 @@ namespace xmrig { class HwlocCpuInfo : public BasicCpuInfo { public: + enum Feature : uint32_t { + SET_THISTHREAD_MEMBIND = 1 + }; + + HwlocCpuInfo(); ~HwlocCpuInfo() override; + static inline bool has(Feature feature) { return m_features & feature; } + protected: CpuThreads threads(const Algorithm &algorithm) const override; @@ -55,6 +62,8 @@ protected: private: void processTopLevelCache(hwloc_obj_t obj, const Algorithm &algorithm, CpuThreads &threads) const; + static uint32_t m_features; + char m_backend[20]; hwloc_topology_t m_topology; size_t m_cache[5]; diff --git a/src/crypto/common/VirtualMemory.cpp b/src/crypto/common/VirtualMemory.cpp new file mode 100644 index 00000000..285bdf80 --- /dev/null +++ b/src/crypto/common/VirtualMemory.cpp @@ -0,0 +1,66 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018 Lee Clagett + * Copyright 2018-2019 SChernykh + * Copyright 2018-2019 tevador + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#ifdef XMRIG_FEATURE_HWLOC +# include +# include "backend/cpu/platform/HwlocCpuInfo.h" +#endif + + +#include "base/io/log/Log.h" +#include "crypto/common/VirtualMemory.h" + + +void xmrig::VirtualMemory::bindToNUMANode(int64_t affinity) +{ +# ifdef XMRIG_FEATURE_HWLOC + if (affinity < 0 || !HwlocCpuInfo::has(HwlocCpuInfo::SET_THISTHREAD_MEMBIND)) { + return; + } + + hwloc_topology_t topology; + hwloc_topology_init(&topology); + hwloc_topology_load(topology); + + const int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_PU); + const unsigned puId = static_cast(affinity); + + for (unsigned i = 0; i < hwloc_get_nbobjs_by_depth(topology, depth); i++) { + hwloc_obj_t pu = hwloc_get_obj_by_depth(topology, depth, i); + + if (pu->os_index == puId) { + if (hwloc_set_membind_nodeset(topology, pu->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_THREAD) < 0) { + LOG_WARN("CPU #%02u warning: \"can't bind memory\"", puId); + } + + break; + } + } + + hwloc_topology_destroy(topology); +# endif +} diff --git a/src/crypto/common/VirtualMemory.h b/src/crypto/common/VirtualMemory.h index 44f77a23..b6ea680a 100644 --- a/src/crypto/common/VirtualMemory.h +++ b/src/crypto/common/VirtualMemory.h @@ -54,6 +54,7 @@ public: static void *allocateExecutableMemory(size_t size); static void *allocateLargePagesMemory(size_t size); + static void bindToNUMANode(int64_t affinity); static void flushInstructionCache(void *p, size_t size); static void freeLargePagesMemory(void *p, size_t size); static void init(bool hugePages); From 35d9c755e076f87203f114ceb2b581246986869d Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 26 Jul 2019 16:22:44 +0700 Subject: [PATCH 083/172] Special case for Intel CPUs with 1 MB L2 cache per core. --- src/backend/cpu/platform/HwlocCpuInfo.cpp | 45 ++++++++++++++++------- src/crypto/rx/Rx.cpp | 2 +- 2 files changed, 33 insertions(+), 14 deletions(-) diff --git a/src/backend/cpu/platform/HwlocCpuInfo.cpp b/src/backend/cpu/platform/HwlocCpuInfo.cpp index 253d5108..4e48f65e 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.cpp +++ b/src/backend/cpu/platform/HwlocCpuInfo.cpp @@ -189,28 +189,47 @@ xmrig::CpuThreads xmrig::HwlocCpuInfo::threads(const Algorithm &algorithm) const void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorithm &algorithm, CpuThreads &threads) const { + constexpr size_t oneMiB = 1024u * 1024u; + size_t PUs = countByType(cache, HWLOC_OBJ_PU); if (PUs == 0) { return; } - size_t size = cache->attr->cache.size; - const size_t scratchpad = algorithm.memory(); - - if (cache->attr->cache.depth == 3 && isCacheExclusive(cache)) { - for (size_t i = 0; i < cache->arity; ++i) { - hwloc_obj_t l2 = cache->children[i]; - if (isCacheObject(l2) && l2->attr != nullptr && l2->attr->cache.size >= scratchpad) { - size += scratchpad; - } - } - } - std::vector cores; cores.reserve(m_cores); findByType(cache, HWLOC_OBJ_CORE, [&cores](hwloc_obj_t found) { cores.emplace_back(found); }); - size_t cacheHashes = (size + (scratchpad / 2)) / scratchpad; + size_t L3 = cache->attr->cache.size; + size_t L2 = 0; + int L2_associativity = 0; + size_t extra = 0; + const size_t scratchpad = algorithm.memory(); + + if (cache->attr->cache.depth == 3 && isCacheExclusive(cache)) { + for (size_t i = 0; i < cache->arity; ++i) { + hwloc_obj_t l2 = cache->children[i]; + if (!isCacheObject(l2) || l2->attr == nullptr) { + continue; + } + + L2 += l2->attr->cache.size; + L2_associativity = l2->attr->cache.associativity; + + if (l2->attr->cache.size >= scratchpad) { + extra += scratchpad; + } + } + } + + if (scratchpad == 2 * oneMiB) { + if (L2 && (cores.size() * oneMiB) == L2 && L2_associativity == 16 && L3 >= L2) { + L3 = L2; + extra = L2; + } + } + + size_t cacheHashes = ((L3 + extra) + (scratchpad / 2)) / scratchpad; # ifdef XMRIG_ALGO_CN_GPU if (algorithm == Algorithm::CN_GPU) { diff --git a/src/crypto/rx/Rx.cpp b/src/crypto/rx/Rx.cpp index d6d49c65..fe4202e6 100644 --- a/src/crypto/rx/Rx.cpp +++ b/src/crypto/rx/Rx.cpp @@ -147,7 +147,7 @@ void xmrig::Rx::initDataset(size_t index, const uint8_t *seed, const Algorithm & if (!d_ptr->datasets[index]) { const uint64_t ts = Chrono::steadyMSecs(); - LOG_INFO("%s" MAGENTA_BOLD(" allocate") CYAN_BOLD(" %zu MiB") BLACK_BOLD(" (%zu+%zu) for RandomX dataset & cache"), + LOG_INFO("%s" MAGENTA_BOLD(" allocate") CYAN_BOLD(" %zu MB") BLACK_BOLD(" (%zu+%zu) for RandomX dataset & cache"), tag, (RxDataset::size() + RxCache::size()) / 1024 / 1024, RxDataset::size() / 1024 / 1024, From e53ae0c15e2b79aef7942ff4fbbc3712766bd0a3 Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 26 Jul 2019 17:03:10 +0700 Subject: [PATCH 084/172] Simplified VirtualMemory::bindToNUMANode. --- src/crypto/common/VirtualMemory.cpp | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/src/crypto/common/VirtualMemory.cpp b/src/crypto/common/VirtualMemory.cpp index 285bdf80..db27087c 100644 --- a/src/crypto/common/VirtualMemory.cpp +++ b/src/crypto/common/VirtualMemory.cpp @@ -46,19 +46,11 @@ void xmrig::VirtualMemory::bindToNUMANode(int64_t affinity) hwloc_topology_init(&topology); hwloc_topology_load(topology); - const int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_PU); const unsigned puId = static_cast(affinity); - for (unsigned i = 0; i < hwloc_get_nbobjs_by_depth(topology, depth); i++) { - hwloc_obj_t pu = hwloc_get_obj_by_depth(topology, depth, i); - - if (pu->os_index == puId) { - if (hwloc_set_membind_nodeset(topology, pu->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_THREAD) < 0) { - LOG_WARN("CPU #%02u warning: \"can't bind memory\"", puId); - } - - break; - } + hwloc_obj_t pu = hwloc_get_pu_obj_by_os_index(topology, puId); + if (pu == nullptr || hwloc_set_membind_nodeset(topology, pu->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_THREAD) < 0) { + LOG_WARN("CPU #%02u warning: \"can't bind memory\"", puId); } hwloc_topology_destroy(topology); From 828fc065b0852f9f00543881597b68f0ee5c8718 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 27 Jul 2019 19:41:59 +0700 Subject: [PATCH 085/172] Added support for allocate RandomX dataset on each NUMA node. --- src/backend/common/Worker.cpp | 4 +- src/backend/common/Worker.h | 1 + src/backend/cpu/CpuBackend.cpp | 2 +- src/backend/cpu/CpuWorker.cpp | 12 +- src/backend/cpu/platform/HwlocCpuInfo.cpp | 14 +- src/backend/cpu/platform/HwlocCpuInfo.h | 4 +- src/core/Miner.cpp | 2 +- src/crypto/common/VirtualMemory.cpp | 19 ++- src/crypto/common/VirtualMemory.h | 2 +- src/crypto/rx/Rx.cpp | 178 +++++++++++++++------- src/crypto/rx/Rx.h | 10 +- 11 files changed, 178 insertions(+), 70 deletions(-) diff --git a/src/backend/common/Worker.cpp b/src/backend/common/Worker.cpp index 92438b39..91ef0c7a 100644 --- a/src/backend/common/Worker.cpp +++ b/src/backend/common/Worker.cpp @@ -37,9 +37,9 @@ xmrig::Worker::Worker(size_t id, int64_t affinity, int priority) : m_timestamp(0), m_count(0) { - VirtualMemory::bindToNUMANode(affinity); - Platform::trySetThreadAffinity(affinity); + m_node = VirtualMemory::bindToNUMANode(affinity); + Platform::trySetThreadAffinity(affinity); Platform::setThreadPriority(priority); } diff --git a/src/backend/common/Worker.h b/src/backend/common/Worker.h index a601ab05..5f5df925 100644 --- a/src/backend/common/Worker.h +++ b/src/backend/common/Worker.h @@ -54,6 +54,7 @@ protected: const size_t m_id; std::atomic m_hashCount; std::atomic m_timestamp; + uint32_t m_node = 0; uint64_t m_count; }; diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp index 2fc34b59..539ab1f1 100644 --- a/src/backend/cpu/CpuBackend.cpp +++ b/src/backend/cpu/CpuBackend.cpp @@ -295,7 +295,7 @@ rapidjson::Value xmrig::CpuBackend::toJSON(rapidjson::Document &doc) const # ifdef XMRIG_ALGO_RANDOMX if (d_ptr->algo.family() == Algorithm::RANDOM_X) { - RxDataset *dataset = Rx::dataset(-1); // FIXME + RxDataset *dataset = Rx::dataset(0); // FIXME if (dataset) { const auto rxPages = dataset->hugePages(); pages[0] += rxPages.first; diff --git a/src/backend/cpu/CpuWorker.cpp b/src/backend/cpu/CpuWorker.cpp index 28642cd7..cd804199 100644 --- a/src/backend/cpu/CpuWorker.cpp +++ b/src/backend/cpu/CpuWorker.cpp @@ -24,6 +24,7 @@ */ +#include #include @@ -81,15 +82,20 @@ xmrig::CpuWorker::~CpuWorker() template void xmrig::CpuWorker::allocateRandomX_VM() { - while (!Rx::isReady(m_job.currentJob(), m_affinity)) { + while (!Rx::isReady(m_job.currentJob(), m_node)) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); if (Nonce::sequence(Nonce::CPU) == 0) { - break; + return; } } - RxDataset *dataset = Rx::dataset(m_affinity); + RxDataset *dataset = Rx::dataset(m_node); + assert(dataset != nullptr); + + if (!dataset) { + return; + } if (!m_vm) { m_vm = new RxVm(dataset, m_memory->scratchpad(), !m_hwAES); diff --git a/src/backend/cpu/platform/HwlocCpuInfo.cpp b/src/backend/cpu/platform/HwlocCpuInfo.cpp index 4e48f65e..eee59a3a 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.cpp +++ b/src/backend/cpu/platform/HwlocCpuInfo.cpp @@ -38,6 +38,7 @@ namespace xmrig { +std::vector HwlocCpuInfo::m_nodeIndexes; uint32_t HwlocCpuInfo::m_features = 0; @@ -151,8 +152,17 @@ xmrig::HwlocCpuInfo::HwlocCpuInfo() : BasicCpuInfo(), m_nodes = std::max(countByType(m_topology, HWLOC_OBJ_NUMANODE), 1); m_packages = countByType(m_topology, HWLOC_OBJ_PACKAGE); - if (nodes() > 1 && hwloc_topology_get_support(m_topology)->membind->set_thisthread_membind) { - m_features |= SET_THISTHREAD_MEMBIND; + if (m_nodes > 1) { + if (hwloc_topology_get_support(m_topology)->membind->set_thisthread_membind) { + m_features |= SET_THISTHREAD_MEMBIND; + } + + m_nodeIndexes.reserve(m_nodes); + hwloc_obj_t node = nullptr; + + while ((node = hwloc_get_next_obj_by_type(m_topology, HWLOC_OBJ_NUMANODE, node)) != nullptr) { + m_nodeIndexes.emplace_back(node->os_index); + } } } diff --git a/src/backend/cpu/platform/HwlocCpuInfo.h b/src/backend/cpu/platform/HwlocCpuInfo.h index a5678fa9..340864f5 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.h +++ b/src/backend/cpu/platform/HwlocCpuInfo.h @@ -47,7 +47,8 @@ public: HwlocCpuInfo(); ~HwlocCpuInfo() override; - static inline bool has(Feature feature) { return m_features & feature; } + static inline bool has(Feature feature) { return m_features & feature; } + static inline const std::vector &nodeIndexes() { return m_nodeIndexes; } protected: CpuThreads threads(const Algorithm &algorithm) const override; @@ -62,6 +63,7 @@ protected: private: void processTopLevelCache(hwloc_obj_t obj, const Algorithm &algorithm, CpuThreads &threads) const; + static std::vector m_nodeIndexes; static uint32_t m_features; char m_backend[20]; diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index 42f4247e..8d0596c2 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -366,7 +366,7 @@ void xmrig::Miner::setJob(const Job &job, bool donate) # ifdef XMRIG_ALGO_RANDOMX const CpuConfig &cpu = d_ptr->controller->config()->cpu(); - Rx::init(job, cpu.initThreads(), cpu.isHugePages()); + Rx::init(job, cpu.initThreads(), cpu.isHugePages(), true); # endif uv_rwlock_wrunlock(&d_ptr->rwlock); diff --git a/src/crypto/common/VirtualMemory.cpp b/src/crypto/common/VirtualMemory.cpp index db27087c..edacf0df 100644 --- a/src/crypto/common/VirtualMemory.cpp +++ b/src/crypto/common/VirtualMemory.cpp @@ -35,11 +35,11 @@ #include "crypto/common/VirtualMemory.h" -void xmrig::VirtualMemory::bindToNUMANode(int64_t affinity) +uint32_t xmrig::VirtualMemory::bindToNUMANode(int64_t affinity) { # ifdef XMRIG_FEATURE_HWLOC if (affinity < 0 || !HwlocCpuInfo::has(HwlocCpuInfo::SET_THISTHREAD_MEMBIND)) { - return; + return 0; } hwloc_topology_t topology; @@ -53,6 +53,21 @@ void xmrig::VirtualMemory::bindToNUMANode(int64_t affinity) LOG_WARN("CPU #%02u warning: \"can't bind memory\"", puId); } + hwloc_obj_t node = nullptr; + uint32_t nodeId = 0; + + while ((node = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_NUMANODE, node)) != nullptr) { + if (hwloc_bitmap_intersects(node->cpuset, pu->cpuset)) { + nodeId = node->os_index; + + break; + } + } + hwloc_topology_destroy(topology); + + return nodeId; +# else + return 0; # endif } diff --git a/src/crypto/common/VirtualMemory.h b/src/crypto/common/VirtualMemory.h index b6ea680a..ac2f75dd 100644 --- a/src/crypto/common/VirtualMemory.h +++ b/src/crypto/common/VirtualMemory.h @@ -52,9 +52,9 @@ public: return std::pair(isHugePages() ? (align(size()) / 2097152) : 0, align(size()) / 2097152); } + static uint32_t bindToNUMANode(int64_t affinity); static void *allocateExecutableMemory(size_t size); static void *allocateLargePagesMemory(size_t size); - static void bindToNUMANode(int64_t affinity); static void flushInstructionCache(void *p, size_t size); static void freeLargePagesMemory(void *p, size_t size); static void init(bool hugePages); diff --git a/src/crypto/rx/Rx.cpp b/src/crypto/rx/Rx.cpp index fe4202e6..94d1f30b 100644 --- a/src/crypto/rx/Rx.cpp +++ b/src/crypto/rx/Rx.cpp @@ -25,12 +25,20 @@ */ +#include #include #include +#ifdef XMRIG_FEATURE_HWLOC +# include +# include "backend/cpu/platform/HwlocCpuInfo.h" +#endif + + #include "backend/cpu/Cpu.h" #include "base/io/log/Log.h" +#include "base/kernel/Platform.h" #include "base/net/stratum/Job.h" #include "base/tools/Buffer.h" #include "base/tools/Chrono.h" @@ -42,6 +50,9 @@ namespace xmrig { +static const char *tag = BLUE_BG(WHITE_BOLD_S " rx ") " "; + + class RxPrivate { public: @@ -53,10 +64,12 @@ public: inline ~RxPrivate() { - for (RxDataset *dataset : datasets) { - delete dataset; + for (auto const &item : datasets) { + delete item.second; } + datasets.clear(); + uv_mutex_destroy(&mutex); } @@ -65,23 +78,79 @@ public: inline void unlock() { uv_mutex_unlock(&mutex); } - std::vector datasets; + static void allocate(RxPrivate *self, uint32_t nodeId) + { + const uint64_t ts = Chrono::steadyMSecs(); + +# ifdef XMRIG_FEATURE_HWLOC + if (self->numa) { + hwloc_topology_t topology; + hwloc_topology_init(&topology); + hwloc_topology_load(topology); + + hwloc_obj_t node = hwloc_get_numanode_obj_by_os_index(topology, nodeId); + if (node) { + if (HwlocCpuInfo::has(HwlocCpuInfo::SET_THISTHREAD_MEMBIND)) { + hwloc_set_membind_nodeset(topology, node->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_THREAD); + } + + Platform::setThreadAffinity(static_cast(hwloc_bitmap_first(node->cpuset))); + } + + hwloc_topology_destroy(topology); + } +# endif + + LOG_INFO("%s" CYAN_BOLD("#%u") MAGENTA_BOLD(" allocate") CYAN_BOLD(" %zu MB") BLACK_BOLD(" (%zu+%zu) for RandomX dataset & cache"), + tag, + nodeId, + (RxDataset::size() + RxCache::size()) / 1024 / 1024, + RxDataset::size() / 1024 / 1024, + RxCache::size() / 1024 / 1024 + ); + + RxDataset *dataset = new RxDataset(self->hugePages); + self->datasets[nodeId] = dataset; + + if (dataset->get() != nullptr) { + const auto hugePages = dataset->hugePages(); + const double percent = hugePages.first == 0 ? 0.0 : static_cast(hugePages.first) / hugePages.second * 100.0; + + LOG_INFO("%s" CYAN_BOLD("#%u") GREEN(" allocate done") " huge pages %s%u/%u %1.0f%%" CLEAR " %sJIT" BLACK_BOLD(" (%" PRIu64 " ms)"), + tag, + nodeId, + (hugePages.first == hugePages.second ? GREEN_BOLD_S : (hugePages.first == 0 ? RED_BOLD_S : YELLOW_BOLD_S)), + hugePages.first, + hugePages.second, + percent, + dataset->cache()->isJIT() ? GREEN_BOLD_S "+" : RED_BOLD_S "-", + Chrono::steadyMSecs() - ts + ); + } + else { + LOG_WARN(CLEAR "%s" CYAN_BOLD("#%u") YELLOW_BOLD_S " failed to allocate RandomX dataset, switching to slow mode", tag, nodeId); + } + } + + + bool hugePages = true; + bool numa = true; + std::map datasets; uv_mutex_t mutex; }; static RxPrivate *d_ptr = new RxPrivate(); -static const char *tag = BLUE_BG(WHITE_BOLD_S " rx "); } // namespace xmrig -bool xmrig::Rx::isReady(const Job &job, int64_t) +bool xmrig::Rx::isReady(const Job &job, uint32_t nodeId) { d_ptr->lock(); - const bool rc = isReady(job.seedHash(), job.algorithm()); + const bool rc = isReady(job.seedHash(), job.algorithm(), d_ptr->numa ? nodeId : 0); d_ptr->unlock(); return rc; @@ -89,38 +158,56 @@ bool xmrig::Rx::isReady(const Job &job, int64_t) -xmrig::RxDataset *xmrig::Rx::dataset(int64_t) +xmrig::RxDataset *xmrig::Rx::dataset(uint32_t nodeId) { d_ptr->lock(); - RxDataset *dataset = d_ptr->datasets[0]; + RxDataset *dataset = d_ptr->datasets[d_ptr->numa ? nodeId : 0]; d_ptr->unlock(); return dataset; } -void xmrig::Rx::init(const Job &job, int initThreads, bool hugePages) +void xmrig::Rx::init(const Job &job, int initThreads, bool hugePages, bool numa) { if (job.algorithm().family() != Algorithm::RANDOM_X) { return; } d_ptr->lock(); - if (d_ptr->datasets.empty()) { - d_ptr->datasets.push_back(nullptr); + + size_t ready = 0; + + for (auto const &item : d_ptr->datasets) { + if (isReady(job.seedHash(), job.algorithm(), item.first)) { + ready++; + } } - if (isReady(job.seedHash(), job.algorithm())) { + if (!d_ptr->datasets.empty() && ready == d_ptr->datasets.size()) { d_ptr->unlock(); return; } - const uint32_t threads = initThreads < 1 ? static_cast(Cpu::info()->threads()) - : static_cast(initThreads); + d_ptr->hugePages = hugePages; + d_ptr->numa = numa && Cpu::info()->nodes() > 1; + const uint32_t threads = initThreads < 1 ? static_cast(Cpu::info()->threads()) + : static_cast(initThreads); - std::thread thread(initDataset, 0, job.seedHash(), job.algorithm(), threads, hugePages); - thread.detach(); +# ifdef XMRIG_FEATURE_HWLOC + if (d_ptr->numa) { + for (uint32_t nodeId : HwlocCpuInfo::nodeIndexes()) { + std::thread thread(initDataset, nodeId, job.seedHash(), job.algorithm(), threads); + thread.detach(); + } + } + else +# endif + { + std::thread thread(initDataset, 0, job.seedHash(), job.algorithm(), threads); + thread.detach(); + } d_ptr->unlock(); } @@ -134,69 +221,56 @@ void xmrig::Rx::stop() } -bool xmrig::Rx::isReady(const uint8_t *seed, const Algorithm &algorithm) +bool xmrig::Rx::isReady(const uint8_t *seed, const Algorithm &algorithm, uint32_t nodeId) { - return !d_ptr->datasets.empty() && d_ptr->datasets[0] != nullptr && d_ptr->datasets[0]->isReady(seed, algorithm); + return !d_ptr->datasets.empty() && d_ptr->datasets[nodeId] != nullptr && d_ptr->datasets[nodeId]->isReady(seed, algorithm); } -void xmrig::Rx::initDataset(size_t index, const uint8_t *seed, const Algorithm &algorithm, uint32_t threads, bool hugePages) +void xmrig::Rx::initDataset(uint32_t nodeId, const uint8_t *seed, const Algorithm &algorithm, uint32_t threads) { d_ptr->lock(); - if (!d_ptr->datasets[index]) { - const uint64_t ts = Chrono::steadyMSecs(); + RxDataset *dataset = d_ptr->datasets[nodeId]; - LOG_INFO("%s" MAGENTA_BOLD(" allocate") CYAN_BOLD(" %zu MB") BLACK_BOLD(" (%zu+%zu) for RandomX dataset & cache"), - tag, - (RxDataset::size() + RxCache::size()) / 1024 / 1024, - RxDataset::size() / 1024 / 1024, - RxCache::size() / 1024 / 1024 - ); - - d_ptr->datasets[index] = new RxDataset(hugePages); - - if (d_ptr->datasets[index]->get() != nullptr) { - const auto hugePages = d_ptr->datasets[index]->hugePages(); - const double percent = hugePages.first == 0 ? 0.0 : static_cast(hugePages.first) / hugePages.second * 100.0; - - LOG_INFO("%s" GREEN(" allocate done") " huge pages %s%u/%u %1.0f%%" CLEAR " %sJIT" BLACK_BOLD(" (%" PRIu64 " ms)"), - tag, - (hugePages.first == hugePages.second ? GREEN_BOLD_S : (hugePages.first == 0 ? RED_BOLD_S : YELLOW_BOLD_S)), - hugePages.first, - hugePages.second, - percent, - d_ptr->datasets[index]->cache()->isJIT() ? GREEN_BOLD_S "+" : RED_BOLD_S "-", - Chrono::steadyMSecs() - ts - ); - } - else { - LOG_WARN(CLEAR "%s" YELLOW_BOLD_S " failed to allocate RandomX dataset, switching to slow mode", tag); + if (!dataset) { +# ifdef XMRIG_FEATURE_HWLOC + if (d_ptr->numa) { + std::thread thread(RxPrivate::allocate, d_ptr, nodeId); + thread.join(); + } else +# endif + { + RxPrivate::allocate(d_ptr, nodeId); } + + dataset = d_ptr->datasets[nodeId]; } - if (!d_ptr->datasets[index]->isReady(seed, algorithm)) { + if (!dataset->isReady(seed, algorithm)) { const uint64_t ts = Chrono::steadyMSecs(); - if (d_ptr->datasets[index]->get() != nullptr) { - LOG_INFO("%s" MAGENTA_BOLD(" init dataset") " algo " WHITE_BOLD("%s (") CYAN_BOLD("%u") WHITE_BOLD(" threads)") BLACK_BOLD(" seed %s..."), + if (dataset->get() != nullptr) { + LOG_INFO("%s" CYAN_BOLD("#%u") MAGENTA_BOLD(" init dataset") " algo " WHITE_BOLD("%s (") CYAN_BOLD("%u") WHITE_BOLD(" threads)") BLACK_BOLD(" seed %s..."), tag, + nodeId, algorithm.shortName(), threads, Buffer::toHex(seed, 8).data() ); } else { - LOG_INFO("%s" MAGENTA_BOLD(" init cache") " algo " WHITE_BOLD("%s") BLACK_BOLD(" seed %s..."), + LOG_INFO("%s" CYAN_BOLD("#%u") MAGENTA_BOLD(" init cache") " algo " WHITE_BOLD("%s") BLACK_BOLD(" seed %s..."), tag, + nodeId, algorithm.shortName(), Buffer::toHex(seed, 8).data() ); } - d_ptr->datasets[index]->init(seed, algorithm, threads); + dataset->init(seed, algorithm, threads); - LOG_INFO("%s" GREEN(" init done") BLACK_BOLD(" (%" PRIu64 " ms)"), tag, Chrono::steadyMSecs() - ts); + LOG_INFO("%s" CYAN_BOLD("#%u") GREEN(" init done") BLACK_BOLD(" (%" PRIu64 " ms)"), tag, nodeId, Chrono::steadyMSecs() - ts); } d_ptr->unlock(); diff --git a/src/crypto/rx/Rx.h b/src/crypto/rx/Rx.h index 456dfe4e..815c8690 100644 --- a/src/crypto/rx/Rx.h +++ b/src/crypto/rx/Rx.h @@ -43,14 +43,14 @@ class Job; class Rx { public: - static bool isReady(const Job &job, int64_t affinity); - static RxDataset *dataset(int64_t affinity); - static void init(const Job &job, int initThreads, bool hugePages); + static bool isReady(const Job &job, uint32_t nodeId); + static RxDataset *dataset(uint32_t nodeId); + static void init(const Job &job, int initThreads, bool hugePages, bool numa); static void stop(); private: - static bool isReady(const uint8_t *seed, const Algorithm &algorithm); - static void initDataset(size_t index, const uint8_t *seed, const Algorithm &algorithm, uint32_t threads, bool hugePages); + static bool isReady(const uint8_t *seed, const Algorithm &algorithm, uint32_t nodeId); + static void initDataset(uint32_t nodeId, const uint8_t *seed, const Algorithm &algorithm, uint32_t threads); }; From 9df9275120cd4b6133fa8c9882bfce2881d40973 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 27 Jul 2019 21:31:11 +0700 Subject: [PATCH 086/172] Return correct hugePages count if NUMA mode used. --- src/backend/cpu/CpuBackend.cpp | 9 +++------ src/crypto/rx/Rx.cpp | 23 +++++++++++++++++++++-- src/crypto/rx/Rx.h | 2 ++ src/crypto/rx/RxDataset.cpp | 2 +- 4 files changed, 27 insertions(+), 9 deletions(-) diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp index 539ab1f1..3ab06b46 100644 --- a/src/backend/cpu/CpuBackend.cpp +++ b/src/backend/cpu/CpuBackend.cpp @@ -295,12 +295,9 @@ rapidjson::Value xmrig::CpuBackend::toJSON(rapidjson::Document &doc) const # ifdef XMRIG_ALGO_RANDOMX if (d_ptr->algo.family() == Algorithm::RANDOM_X) { - RxDataset *dataset = Rx::dataset(0); // FIXME - if (dataset) { - const auto rxPages = dataset->hugePages(); - pages[0] += rxPages.first; - pages[1] += rxPages.second; - } + const auto rxPages = Rx::hugePages(); + pages[0] += rxPages.first; + pages[1] += rxPages.second; } # endif diff --git a/src/crypto/rx/Rx.cpp b/src/crypto/rx/Rx.cpp index 94d1f30b..cc89fe34 100644 --- a/src/crypto/rx/Rx.cpp +++ b/src/crypto/rx/Rx.cpp @@ -146,7 +146,6 @@ static RxPrivate *d_ptr = new RxPrivate(); } // namespace xmrig - bool xmrig::Rx::isReady(const Job &job, uint32_t nodeId) { d_ptr->lock(); @@ -157,7 +156,6 @@ bool xmrig::Rx::isReady(const Job &job, uint32_t nodeId) } - xmrig::RxDataset *xmrig::Rx::dataset(uint32_t nodeId) { d_ptr->lock(); @@ -168,6 +166,27 @@ xmrig::RxDataset *xmrig::Rx::dataset(uint32_t nodeId) } +std::pair xmrig::Rx::hugePages() +{ + std::pair pages(0, 0); + d_ptr->lock(); + + for (auto const &item : d_ptr->datasets) { + if (!item.second) { + continue; + } + + const auto p = item.second->hugePages(); + pages.first += p.first; + pages.second += p.second; + } + + d_ptr->unlock(); + + return pages; +} + + void xmrig::Rx::init(const Job &job, int initThreads, bool hugePages, bool numa) { if (job.algorithm().family() != Algorithm::RANDOM_X) { diff --git a/src/crypto/rx/Rx.h b/src/crypto/rx/Rx.h index 815c8690..43e4be1c 100644 --- a/src/crypto/rx/Rx.h +++ b/src/crypto/rx/Rx.h @@ -29,6 +29,7 @@ #include +#include namespace xmrig @@ -45,6 +46,7 @@ class Rx public: static bool isReady(const Job &job, uint32_t nodeId); static RxDataset *dataset(uint32_t nodeId); + static std::pair hugePages(); static void init(const Job &job, int initThreads, bool hugePages, bool numa); static void stop(); diff --git a/src/crypto/rx/RxDataset.cpp b/src/crypto/rx/RxDataset.cpp index 603cf578..617b9200 100644 --- a/src/crypto/rx/RxDataset.cpp +++ b/src/crypto/rx/RxDataset.cpp @@ -112,7 +112,7 @@ bool xmrig::RxDataset::isReady(const void *seed, const Algorithm &algorithm) con std::pair xmrig::RxDataset::hugePages() const { - constexpr size_t twoMiB = 2u * 1024u * 1024u; + constexpr size_t twoMiB = 2u * 1024u * 1024u; constexpr const size_t total = (VirtualMemory::align(size(), twoMiB) + VirtualMemory::align(RxCache::size(), twoMiB)) / twoMiB; size_t count = 0; From d10527036e4181eb1718d7b0295e507824fc48ca Mon Sep 17 00:00:00 2001 From: XMRig Date: Sun, 28 Jul 2019 07:46:59 +0700 Subject: [PATCH 087/172] Added "randomx" object to config. --- CMakeLists.txt | 2 ++ src/backend/cpu/CpuConfig.cpp | 3 -- src/backend/cpu/CpuConfig.h | 2 -- src/core/Miner.cpp | 7 ++-- src/core/config/Config.cpp | 26 +++++++++++++-- src/core/config/Config.h | 13 ++++++++ src/crypto/rx/RxConfig.cpp | 63 +++++++++++++++++++++++++++++++++++ src/crypto/rx/RxConfig.h | 53 +++++++++++++++++++++++++++++ 8 files changed, 159 insertions(+), 10 deletions(-) create mode 100644 src/crypto/rx/RxConfig.cpp create mode 100644 src/crypto/rx/RxConfig.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 42dcdb8d..f9dd6fd5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -182,6 +182,8 @@ if (WITH_RANDOMX) src/crypto/rx/RxAlgo.h src/crypto/rx/RxCache.cpp src/crypto/rx/RxCache.h + src/crypto/rx/RxConfig.cpp + src/crypto/rx/RxConfig.h src/crypto/rx/RxDataset.cpp src/crypto/rx/RxDataset.h src/crypto/rx/RxVm.cpp diff --git a/src/backend/cpu/CpuConfig.cpp b/src/backend/cpu/CpuConfig.cpp index ba66a45f..582649ef 100644 --- a/src/backend/cpu/CpuConfig.cpp +++ b/src/backend/cpu/CpuConfig.cpp @@ -35,7 +35,6 @@ static const char *kCn = "cn"; static const char *kEnabled = "enabled"; static const char *kHugePages = "huge-pages"; static const char *kHwAes = "hw-aes"; -static const char *kInitThreads = "init-threads"; static const char *kPriority = "priority"; #ifdef XMRIG_FEATURE_ASM @@ -90,7 +89,6 @@ rapidjson::Value xmrig::CpuConfig::toJSON(rapidjson::Document &doc) const obj.AddMember(StringRef(kHugePages), m_hugePages, allocator); obj.AddMember(StringRef(kHwAes), m_aes == AES_AUTO ? Value(kNullType) : Value(m_aes == AES_HW), allocator); obj.AddMember(StringRef(kPriority), priority() != -1 ? Value(priority()) : Value(kNullType), allocator); - obj.AddMember(StringRef(kInitThreads), m_initThreads, allocator); # ifdef XMRIG_FEATURE_ASM obj.AddMember(StringRef(kAsm), m_assembly.toJSON(), allocator); @@ -126,7 +124,6 @@ void xmrig::CpuConfig::read(const rapidjson::Value &value) if (value.IsObject()) { m_enabled = Json::getBool(value, kEnabled, m_enabled); m_hugePages = Json::getBool(value, kHugePages, m_hugePages); - m_initThreads = Json::getInt(value, kInitThreads, m_initThreads); setAesMode(Json::getValue(value, kHwAes)); setPriority(Json::getInt(value, kPriority, -1)); diff --git a/src/backend/cpu/CpuConfig.h b/src/backend/cpu/CpuConfig.h index fc06bab9..5b2f3f86 100644 --- a/src/backend/cpu/CpuConfig.h +++ b/src/backend/cpu/CpuConfig.h @@ -56,7 +56,6 @@ public: inline bool isShouldSave() const { return m_shouldSave; } inline const Assembly &assembly() const { return m_assembly; } inline const Threads &threads() const { return m_threads; } - inline int initThreads() const { return m_initThreads; } inline int priority() const { return m_priority; } private: @@ -70,7 +69,6 @@ private: bool m_enabled = true; bool m_hugePages = true; bool m_shouldSave = false; - int m_initThreads = -1; int m_priority = -1; Threads m_threads; }; diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index 8d0596c2..9d977d12 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -365,8 +365,11 @@ void xmrig::Miner::setJob(const Job &job, bool donate) } # ifdef XMRIG_ALGO_RANDOMX - const CpuConfig &cpu = d_ptr->controller->config()->cpu(); - Rx::init(job, cpu.initThreads(), cpu.isHugePages(), true); + Rx::init(job, + d_ptr->controller->config()->rx().threads(), + d_ptr->controller->config()->cpu().isHugePages(), + d_ptr->controller->config()->rx().isNUMA() + ); # endif uv_rwlock_wrunlock(&d_ptr->rwlock); diff --git a/src/core/config/Config.cpp b/src/core/config/Config.cpp index d6336b67..28c15234 100644 --- a/src/core/config/Config.cpp +++ b/src/core/config/Config.cpp @@ -38,6 +38,14 @@ #include "rapidjson/prettywriter.h" +namespace xmrig { + +static const char *kCPU = "cpu"; +static const char *kRandomX = "randomx"; + +} + + xmrig::Config::Config() : BaseConfig() { } @@ -49,7 +57,14 @@ bool xmrig::Config::read(const IJsonReader &reader, const char *fileName) return false; } - m_cpu.read(reader.getValue("cpu")); + m_cpu.read(reader.getValue(kCPU)); + +# ifdef XMRIG_ALGO_RANDOMX + if (!m_rx.read(reader.getValue(kRandomX))) { + printf("upgrade\n"); + m_upgrade = true; + } +# endif return true; } @@ -68,13 +83,18 @@ void xmrig::Config::getJSON(rapidjson::Document &doc) const api.AddMember("worker-id", m_apiWorkerId.toJSON(), allocator); doc.AddMember("api", api, allocator); + doc.AddMember("http", m_http.toJSON(doc), allocator); doc.AddMember("autosave", isAutoSave(), allocator); doc.AddMember("background", isBackground(), allocator); doc.AddMember("colors", Log::colors, allocator); - doc.AddMember("cpu", m_cpu.toJSON(doc), allocator); + +# ifdef XMRIG_ALGO_RANDOMX + doc.AddMember(StringRef(kRandomX), m_rx.toJSON(doc), allocator); +# endif + + doc.AddMember(StringRef(kCPU), m_cpu.toJSON(doc), allocator); doc.AddMember("donate-level", m_pools.donateLevel(), allocator); doc.AddMember("donate-over-proxy", m_pools.proxyDonate(), allocator); - doc.AddMember("http", m_http.toJSON(doc), allocator); doc.AddMember("log-file", m_logFile.toJSON(), allocator); doc.AddMember("pools", m_pools.toJSON(doc), allocator); doc.AddMember("print-time", printTime(), allocator); diff --git a/src/core/config/Config.h b/src/core/config/Config.h index e6b5c735..0014cb05 100644 --- a/src/core/config/Config.h +++ b/src/core/config/Config.h @@ -34,6 +34,11 @@ #include "rapidjson/fwd.h" +#ifdef XMRIG_ALGO_RANDOMX +# include "crypto/rx/RxConfig.h" +#endif + + namespace xmrig { @@ -51,9 +56,17 @@ public: inline bool isShouldSave() const { return (m_shouldSave || m_upgrade || m_cpu.isShouldSave()) && isAutoSave(); } inline const CpuConfig &cpu() const { return m_cpu; } +# ifdef XMRIG_ALGO_RANDOMX + inline const RxConfig &rx() const { return m_rx; } +# endif + private: bool m_shouldSave = false; CpuConfig m_cpu; + +# ifdef XMRIG_ALGO_RANDOMX + RxConfig m_rx; +# endif }; diff --git a/src/crypto/rx/RxConfig.cpp b/src/crypto/rx/RxConfig.cpp new file mode 100644 index 00000000..dc543fb4 --- /dev/null +++ b/src/crypto/rx/RxConfig.cpp @@ -0,0 +1,63 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#include "base/io/json/Json.h" +#include "crypto/rx/RxConfig.h" +#include "rapidjson/document.h" + + +namespace xmrig { + +static const char *kInit = "init"; +static const char *kNUMA = "numa"; + +} + + +rapidjson::Value xmrig::RxConfig::toJSON(rapidjson::Document &doc) const +{ + using namespace rapidjson; + auto &allocator = doc.GetAllocator(); + + Value obj(kObjectType); + + obj.AddMember(StringRef(kInit), m_threads, allocator); + obj.AddMember(StringRef(kNUMA), m_numa, allocator); + + return obj; +} + + +bool xmrig::RxConfig::read(const rapidjson::Value &value) +{ + if (value.IsObject()) { + m_numa = Json::getBool(value, kNUMA, m_numa); + m_threads = Json::getInt(value, kInit, m_threads); + + return true; + } + + return false; +} diff --git a/src/crypto/rx/RxConfig.h b/src/crypto/rx/RxConfig.h new file mode 100644 index 00000000..e06c764c --- /dev/null +++ b/src/crypto/rx/RxConfig.h @@ -0,0 +1,53 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_RXCONFIG_H +#define XMRIG_RXCONFIG_H + + +#include "rapidjson/fwd.h" + + +namespace xmrig { + + +class RxConfig +{ +public: + bool read(const rapidjson::Value &value); + rapidjson::Value toJSON(rapidjson::Document &doc) const; + + inline bool isNUMA() const { return m_numa; } + inline int threads() const { return m_threads; } + +private: + bool m_numa = true; + int m_threads = -1; +}; + + +} /* namespace xmrig */ + + +#endif /* XMRIG_RXCONFIG_H */ From a39e0e05e98a07724b3ea829dd6e0bcba56ae5ee Mon Sep 17 00:00:00 2001 From: XMRig Date: Sun, 28 Jul 2019 08:06:50 +0700 Subject: [PATCH 088/172] Cleanup. --- src/core/config/Config.cpp | 1 - src/crypto/randomx/instructions_portable.cpp | 12 ++++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/src/core/config/Config.cpp b/src/core/config/Config.cpp index 28c15234..34221e35 100644 --- a/src/core/config/Config.cpp +++ b/src/core/config/Config.cpp @@ -61,7 +61,6 @@ bool xmrig::Config::read(const IJsonReader &reader, const char *fileName) # ifdef XMRIG_ALGO_RANDOMX if (!m_rx.read(reader.getValue(kRandomX))) { - printf("upgrade\n"); m_upgrade = true; } # endif diff --git a/src/crypto/randomx/instructions_portable.cpp b/src/crypto/randomx/instructions_portable.cpp index 8c466ebe..8f0592dd 100644 --- a/src/crypto/randomx/instructions_portable.cpp +++ b/src/crypto/randomx/instructions_portable.cpp @@ -82,12 +82,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define HAVE_SETROUNDMODE_IMPL #endif -#ifndef HAVE_SETROUNDMODE_IMPL - static void setRoundMode_(uint32_t mode) { - fesetround(mode); - } -#endif - #ifndef HAVE_ROTR64 uint64_t rotr64(uint64_t a, unsigned int b) { return (a >> b) | (a << (-b & 63)); @@ -133,6 +127,12 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifdef RANDOMX_DEFAULT_FENV +# ifndef HAVE_SETROUNDMODE_IMPL + static void setRoundMode_(uint32_t mode) { + fesetround(mode); + } +# endif + void rx_reset_float_state() { setRoundMode_(FE_TONEAREST); rx_set_double_precision(); //set precision to 53 bits if needed by the platform From 2b29b81b898e7d547f8b80d0bd38aa8632d09dfd Mon Sep 17 00:00:00 2001 From: XMRig Date: Sun, 28 Jul 2019 09:24:53 +0700 Subject: [PATCH 089/172] Use internal hwloc for MSVC. --- CMakeLists.txt | 2 +- src/3rdparty/hwloc/AUTHORS | 44 + src/3rdparty/hwloc/CMakeLists.txt | 38 + src/3rdparty/hwloc/COPYING | 39 + src/3rdparty/hwloc/NEWS | 1599 ++++++ src/3rdparty/hwloc/README | 85 + src/3rdparty/hwloc/VERSION | 47 + src/3rdparty/hwloc/include/hwloc.h | 2270 +++++++++ .../hwloc/include/hwloc/autogen/config.h | 59 + src/3rdparty/hwloc/include/hwloc/bitmap.h | 467 ++ src/3rdparty/hwloc/include/hwloc/cuda.h | 220 + src/3rdparty/hwloc/include/hwloc/cudart.h | 177 + src/3rdparty/hwloc/include/hwloc/deprecated.h | 206 + src/3rdparty/hwloc/include/hwloc/diff.h | 289 ++ src/3rdparty/hwloc/include/hwloc/distances.h | 271 + src/3rdparty/hwloc/include/hwloc/export.h | 278 + src/3rdparty/hwloc/include/hwloc/gl.h | 135 + .../hwloc/include/hwloc/glibc-sched.h | 125 + src/3rdparty/hwloc/include/hwloc/helper.h | 1160 +++++ src/3rdparty/hwloc/include/hwloc/inlines.h | 146 + src/3rdparty/hwloc/include/hwloc/intel-mic.h | 134 + .../hwloc/include/hwloc/linux-libnuma.h | 273 + src/3rdparty/hwloc/include/hwloc/linux.h | 79 + src/3rdparty/hwloc/include/hwloc/nvml.h | 181 + src/3rdparty/hwloc/include/hwloc/opencl.h | 206 + .../hwloc/include/hwloc/openfabrics-verbs.h | 150 + src/3rdparty/hwloc/include/hwloc/plugins.h | 542 ++ src/3rdparty/hwloc/include/hwloc/rename.h | 765 +++ src/3rdparty/hwloc/include/hwloc/shmem.h | 137 + .../hwloc/include/private/autogen/config.h | 672 +++ .../hwloc/include/private/components.h | 43 + .../hwloc/include/private/cpuid-x86.h | 86 + src/3rdparty/hwloc/include/private/debug.h | 83 + .../include/private/internal-components.h | 41 + src/3rdparty/hwloc/include/private/misc.h | 583 +++ src/3rdparty/hwloc/include/private/netloc.h | 578 +++ src/3rdparty/hwloc/include/private/private.h | 417 ++ .../hwloc/include/private/solaris-chiptype.h | 43 + src/3rdparty/hwloc/include/private/xml.h | 108 + src/3rdparty/hwloc/src/base64.c | 309 ++ src/3rdparty/hwloc/src/bind.c | 922 ++++ src/3rdparty/hwloc/src/bitmap.c | 1676 ++++++ src/3rdparty/hwloc/src/components.c | 785 +++ src/3rdparty/hwloc/src/diff.c | 492 ++ src/3rdparty/hwloc/src/distances.c | 920 ++++ src/3rdparty/hwloc/src/misc.c | 166 + src/3rdparty/hwloc/src/pci-common.c | 941 ++++ src/3rdparty/hwloc/src/shmem.c | 287 ++ src/3rdparty/hwloc/src/static-components.h | 15 + src/3rdparty/hwloc/src/topology-noos.c | 65 + src/3rdparty/hwloc/src/topology-synthetic.c | 1521 ++++++ src/3rdparty/hwloc/src/topology-windows.c | 1189 +++++ src/3rdparty/hwloc/src/topology-x86.c | 1583 ++++++ .../hwloc/src/topology-xml-nolibxml.c | 919 ++++ src/3rdparty/hwloc/src/topology-xml.c | 2886 +++++++++++ src/3rdparty/hwloc/src/topology.c | 4484 +++++++++++++++++ src/3rdparty/hwloc/src/traversal.c | 616 +++ src/backend/cpu/cpu.cmake | 13 +- 58 files changed, 32562 insertions(+), 5 deletions(-) create mode 100644 src/3rdparty/hwloc/AUTHORS create mode 100644 src/3rdparty/hwloc/CMakeLists.txt create mode 100644 src/3rdparty/hwloc/COPYING create mode 100644 src/3rdparty/hwloc/NEWS create mode 100644 src/3rdparty/hwloc/README create mode 100644 src/3rdparty/hwloc/VERSION create mode 100644 src/3rdparty/hwloc/include/hwloc.h create mode 100644 src/3rdparty/hwloc/include/hwloc/autogen/config.h create mode 100644 src/3rdparty/hwloc/include/hwloc/bitmap.h create mode 100644 src/3rdparty/hwloc/include/hwloc/cuda.h create mode 100644 src/3rdparty/hwloc/include/hwloc/cudart.h create mode 100644 src/3rdparty/hwloc/include/hwloc/deprecated.h create mode 100644 src/3rdparty/hwloc/include/hwloc/diff.h create mode 100644 src/3rdparty/hwloc/include/hwloc/distances.h create mode 100644 src/3rdparty/hwloc/include/hwloc/export.h create mode 100644 src/3rdparty/hwloc/include/hwloc/gl.h create mode 100644 src/3rdparty/hwloc/include/hwloc/glibc-sched.h create mode 100644 src/3rdparty/hwloc/include/hwloc/helper.h create mode 100644 src/3rdparty/hwloc/include/hwloc/inlines.h create mode 100644 src/3rdparty/hwloc/include/hwloc/intel-mic.h create mode 100644 src/3rdparty/hwloc/include/hwloc/linux-libnuma.h create mode 100644 src/3rdparty/hwloc/include/hwloc/linux.h create mode 100644 src/3rdparty/hwloc/include/hwloc/nvml.h create mode 100644 src/3rdparty/hwloc/include/hwloc/opencl.h create mode 100644 src/3rdparty/hwloc/include/hwloc/openfabrics-verbs.h create mode 100644 src/3rdparty/hwloc/include/hwloc/plugins.h create mode 100644 src/3rdparty/hwloc/include/hwloc/rename.h create mode 100644 src/3rdparty/hwloc/include/hwloc/shmem.h create mode 100644 src/3rdparty/hwloc/include/private/autogen/config.h create mode 100644 src/3rdparty/hwloc/include/private/components.h create mode 100644 src/3rdparty/hwloc/include/private/cpuid-x86.h create mode 100644 src/3rdparty/hwloc/include/private/debug.h create mode 100644 src/3rdparty/hwloc/include/private/internal-components.h create mode 100644 src/3rdparty/hwloc/include/private/misc.h create mode 100644 src/3rdparty/hwloc/include/private/netloc.h create mode 100644 src/3rdparty/hwloc/include/private/private.h create mode 100644 src/3rdparty/hwloc/include/private/solaris-chiptype.h create mode 100644 src/3rdparty/hwloc/include/private/xml.h create mode 100644 src/3rdparty/hwloc/src/base64.c create mode 100644 src/3rdparty/hwloc/src/bind.c create mode 100644 src/3rdparty/hwloc/src/bitmap.c create mode 100644 src/3rdparty/hwloc/src/components.c create mode 100644 src/3rdparty/hwloc/src/diff.c create mode 100644 src/3rdparty/hwloc/src/distances.c create mode 100644 src/3rdparty/hwloc/src/misc.c create mode 100644 src/3rdparty/hwloc/src/pci-common.c create mode 100644 src/3rdparty/hwloc/src/shmem.c create mode 100644 src/3rdparty/hwloc/src/static-components.h create mode 100644 src/3rdparty/hwloc/src/topology-noos.c create mode 100644 src/3rdparty/hwloc/src/topology-synthetic.c create mode 100644 src/3rdparty/hwloc/src/topology-windows.c create mode 100644 src/3rdparty/hwloc/src/topology-x86.c create mode 100644 src/3rdparty/hwloc/src/topology-xml-nolibxml.c create mode 100644 src/3rdparty/hwloc/src/topology-xml.c create mode 100644 src/3rdparty/hwloc/src/topology.c create mode 100644 src/3rdparty/hwloc/src/traversal.c diff --git a/CMakeLists.txt b/CMakeLists.txt index f9dd6fd5..a1779f53 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -261,4 +261,4 @@ if (WITH_DEBUG_LOG) endif() add_executable(${CMAKE_PROJECT_NAME} ${HEADERS} ${SOURCES} ${SOURCES_OS} ${SOURCES_CPUID} ${HEADERS_CRYPTO} ${SOURCES_CRYPTO} ${SOURCES_SYSLOG} ${HTTP_SOURCES} ${TLS_SOURCES} ${XMRIG_ASM_SOURCES} ${CN_GPU_SOURCES}) -target_link_libraries(${CMAKE_PROJECT_NAME} ${XMRIG_ASM_LIBRARY} ${OPENSSL_LIBRARIES} ${UV_LIBRARIES} ${EXTRA_LIBS} ${CPUID_LIB} ${HWLOC_LIBRARY}) +target_link_libraries(${CMAKE_PROJECT_NAME} ${XMRIG_ASM_LIBRARY} ${OPENSSL_LIBRARIES} ${UV_LIBRARIES} ${EXTRA_LIBS} ${CPUID_LIB}) diff --git a/src/3rdparty/hwloc/AUTHORS b/src/3rdparty/hwloc/AUTHORS new file mode 100644 index 00000000..7187a723 --- /dev/null +++ b/src/3rdparty/hwloc/AUTHORS @@ -0,0 +1,44 @@ +hwloc Authors +============= + +The following cumulative list contains the names of most individuals +who have committed code to the hwloc repository +(either directly or through a third party). + +Name Affiliation(s) +--------------------------- -------------------- +Grzegorz Andrejczuk Intel +Cédric Augonnet University of Bordeaux +Guillaume Beauchamp Inria +Ahmad Boissetri Binzagr Inria +Cyril Bordage Inria +Nicholas Buroker UWL +Christopher M. Cantalupo Intel +Jérôme Clet-Ortega University of Bordeaux +Ludovic Courtès Inria +Clément Foyer Inria +Nathalie Furmento CNRS +Bryon Gloden +Brice Goglin Inria +Gilles Gouaillardet RIST +Joshua Hursey UWL +Alexey Kardashevskiy IBM +Rob Latham ANL +Douglas MacFarland UWL +Marc Marí BSC +Jonathan L Peyton Intel +Piotr Luc Intel +Antoine Rougier intern from University of Bordeaux +Jeff Squyres Cisco +Samuel Thibault University of Bordeaux +Jean-Yves VET DDN +Benjamin Worpitz +Jeff Zhao Zhaoxin + +Affiliaion abbreviations: +------------------------- +ANL = Argonne National Lab +BSC = Barcelona Supercomputing Center +Cisco = Cisco Systems, Inc. +CNRS = Centre national de la recherche scientifique (France) +UWL = University of Wisconsin-La Crosse diff --git a/src/3rdparty/hwloc/CMakeLists.txt b/src/3rdparty/hwloc/CMakeLists.txt new file mode 100644 index 00000000..431c11eb --- /dev/null +++ b/src/3rdparty/hwloc/CMakeLists.txt @@ -0,0 +1,38 @@ +cmake_minimum_required (VERSION 2.8) +project (hwloc C) + +include_directories(include) +include_directories(src) + +add_definitions(/D_CRT_SECURE_NO_WARNINGS) +set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /MT") + +set(HEADERS + include/hwloc.h + src/static-components.h + ) + +set(SOURCES + src/base64.c + src/bind.c + src/bitmap.c + src/components.c + src/diff.c + src/distances.c + src/misc.c + src/pci-common.c + src/shmem.c + src/topology.c + src/topology-noos.c + src/topology-synthetic.c + src/topology-windows.c + src/topology-x86.c + src/topology-xml.c + src/topology-xml-nolibxml.c + src/traversal.c + ) + +add_library(hwloc STATIC + ${HEADERS} + ${SOURCES} + ) diff --git a/src/3rdparty/hwloc/COPYING b/src/3rdparty/hwloc/COPYING new file mode 100644 index 00000000..e77516e1 --- /dev/null +++ b/src/3rdparty/hwloc/COPYING @@ -0,0 +1,39 @@ +Copyright © 2004-2006 The Trustees of Indiana University and Indiana University Research and Technology Corporation. All rights reserved. +Copyright © 2004-2005 The University of Tennessee and The University of Tennessee Research Foundation. All rights reserved. +Copyright © 2004-2005 High Performance Computing Center Stuttgart, University of Stuttgart. All rights reserved. +Copyright © 2004-2005 The Regents of the University of California. All rights reserved. +Copyright © 2009 CNRS +Copyright © 2009-2016 Inria. All rights reserved. +Copyright © 2009-2015 Université Bordeaux +Copyright © 2009-2015 Cisco Systems, Inc. All rights reserved. +Copyright © 2009-2012 Oracle and/or its affiliates. All rights reserved. +Copyright © 2010 IBM +Copyright © 2010 Jirka Hladky +Copyright © 2012 Aleksej Saushev, The NetBSD Foundation +Copyright © 2012 Blue Brain Project, EPFL. All rights reserved. +Copyright © 2013-2014 University of Wisconsin-La Crosse. All rights reserved. +Copyright © 2015 Research Organization for Information Science and Technology (RIST). All rights reserved. +Copyright © 2015-2016 Intel, Inc. All rights reserved. +See COPYING in top-level directory. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/3rdparty/hwloc/NEWS b/src/3rdparty/hwloc/NEWS new file mode 100644 index 00000000..664c8d55 --- /dev/null +++ b/src/3rdparty/hwloc/NEWS @@ -0,0 +1,1599 @@ +Copyright © 2009 CNRS +Copyright © 2009-2019 Inria. All rights reserved. +Copyright © 2009-2013 Université Bordeaux +Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved. + +$COPYRIGHT$ + +Additional copyrights may follow + +$HEADER$ + +=========================================================================== + +This file contains the main features as well as overviews of specific +bug fixes (and other actions) for each version of hwloc since version +0.9 (as initially released as "libtopology", then re-branded to "hwloc" +in v0.9.1). + + +Version 2.0.4 (also included in 1.11.13 when appropriate) +------------- +* Add support for Linux 5.3 new sysfs cpu topology files with Die information. +* Add support for Intel v2 Extended Topology Enumeration in the x86 backend. +* Tiles, Modules and Dies are exposed as Groups for now. + + HWLOC_DONT_MERGE_DIE_GROUPS=1 may be set in the environment to prevent + Die groups from being automatically merged with identical parent or children. +* Ignore NUMA node information from AMD topoext in the x86 backend, + unless HWLOC_X86_TOPOEXT_NUMANODES=1 is set in the environment. +* Group objects have a new "dont_merge" attribute to prevent them from + being automatically merged with identical parent or children. + + +Version 2.0.3 (also included in 1.11.12 when appropriate) +------------- +* Fix build on Cygwin, thanks to Marco Atzeri for the patches. +* Fix a corner case of hwloc_topology_restrict() where children would + become out-of-order. +* Fix the return length of export_xmlbuffer() functions to always + include the ending \0. +* Fix lstopo --children-order argument parsing. + + +Version 2.0.2 (also included in 1.11.11 when appropriate) +------------- +* Add support for Hygon Dhyana processors in the x86 backend, + thanks to Pu Wen for the patch. +* Fix symbol renaming to also rename internal components, + thanks to Evan Ramos for the patch. +* Fix build on HP-UX, thanks to Richard Lloyd for reporting the issues. +* Detect PCI link speed without being root on Linux >= 4.13. +* Add HWLOC_VERSION* macros to the public headers, + thanks to Gilles Gouaillardet for the suggestion. + + +Version 2.0.1 (also included in 1.11.10 when relevant) +------------- +* Bump the library soname to 15:0:0 to avoid conflicts with hwloc 1.11.x + releases. The hwloc 2.0.0 soname was buggy (12:0:0), applications will + have to be recompiled. +* Serialize pciaccess discovery to fix concurrent topology loads in + multiple threads. +* Fix hwloc-dump-hwdata to only process SMBIOS information that correspond + to the KNL and KNM configuration. +* Add a heuristic for guessing KNL/KNM memory and cluster modes when + hwloc-dump-hwdata could not run as root earlier. +* Add --no-text lstopo option to remove text from some boxes in the + graphical output. Mostly useful for removing Group labels. +* Some minor fixes to memory binding. + + +Version 2.0.0 +------------- +*** The ABI of the library has changed. *** + For instance some hwloc_obj fields were reordered, added or removed, see below. + + HWLOC_API_VERSION and hwloc_get_api_version() now give 0x00020000. + + See "How do I handle ABI breaks and API upgrades ?" in the FAQ + and "Upgrading to hwloc 2.0 API" in the documentation. +* Major API changes + + Memory, I/O and Misc objects are now stored in dedicated children lists, + not in the usual children list that is now only used for CPU-side objects. + - hwloc_get_next_child() may still be used to iterate over these 4 lists + of children at once. + - hwloc_obj_type_is_normal(), _memory() and _io() may be used to check + the kind of a given object type. + + Topologies always have at least one NUMA object. On non-NUMA machines, + a single NUMA object is added to describe the entire machine memory. + The NUMA level cannot be ignored anymore. + + The NUMA level is special since NUMA nodes are not in the main hierarchy + of objects anymore. Its depth is a fake negative depth that should not be + compared with normal levels. + - If all memory objects are attached to parents at the same depth, + it may be retrieved with hwloc_get_memory_parents_depth(). + + The HWLOC_OBJ_CACHE type is replaced with 8 types HWLOC_OBJ_L[1-5]CACHE + and HWLOC_OBJ_L[1-3]ICACHE that remove the need to disambiguate levels + when looking for caches with _by_type() functions. + - New hwloc_obj_type_is_{,d,i}cache() functions may be used to check whether + a given type is a cache. + + Reworked ignoring/filtering API + - Replace hwloc_topology_ignore*() functions with hwloc_topology_set_type_filter() + and hwloc_topology_set_all_types_filter(). + . Contrary to hwloc_topology_ignore_{type,all}_keep_structure() which + removed individual objects, HWLOC_TYPE_FILTER_KEEP_STRUCTURE only removes + entire levels (so that topology do not become too asymmetric). + - Remove HWLOC_TOPOLOGY_FLAG_ICACHES in favor of hwloc_topology_set_icache_types_filter() + with HWLOC_TYPE_FILTER_KEEP_ALL. + - Remove HWLOC_TOPOLOGY_FLAG_IO_DEVICES, _IO_BRIDGES and _WHOLE_IO in favor of + hwloc_topology_set_io_types_filter() with HWLOC_TYPE_FILTER_KEEP_ALL or + HWLOC_TYPE_FILTER_KEEP_IMPORTANT. + + The distance API has been completely reworked. It is now described + in hwloc/distances.h. + + Return values + - Most functions in hwloc/bitmap.h now return an int that may be negative + in case of failure to realloc/extend the internal storage of a bitmap. + - hwloc_obj_add_info() also returns an int in case allocations fail. +* Minor API changes + + Object attributes + - obj->memory is removed. + . local_memory and page_types attributes are now in obj->attr->numanode + . total_memory moves obj->total_memory. + - Objects do not have allowed_cpuset and allowed_nodeset anymore. + They are only available for the entire topology using + hwloc_topology_get_allowed_cpuset() and hwloc_topology_get_allowed_nodeset(). + - Objects now have a "subtype" field that supersedes former "Type" and + "CoProcType" info attributes. + + Object and level depths are now signed ints. + + Object string printing and parsing + - hwloc_type_sscanf() deprecates the old hwloc_obj_type_sscanf(). + - hwloc_type_sscanf_as_depth() is added to convert a type name into + a level depth. + - hwloc_obj_cpuset_snprintf() is deprecated in favor of hwloc_bitmap_snprintf(). + + Misc objects + - Replace hwloc_topology_insert_misc_object_by_cpuset() with + hwloc_topology_insert_group_object() to precisely specify the location + of an additional hierarchy level in the topology. + - Misc objects have their own level and depth to iterate over all of them. + - Misc objects may now only be inserted as a leaf object with + hwloc_topology_insert_misc_object() which deprecates + hwloc_topology_insert_misc_object_by_parent(). + + hwloc_topology_restrict() doesn't remove objects that contain memory + by default anymore. + - The list of existing restrict flags was modified. + + The discovery support array now contains some NUMA specific bits. + + XML export functions take an additional flags argument, + for instance for exporting XMLs that are compatible with hwloc 1.x. + + Functions diff_load_xml*(), diff_export_xml*() and diff_destroy() in + hwloc/diff.h do not need a topology as first parameter anymore. + + hwloc_parse_cpumap_file () superseded by hwloc_linux_read_path_as_cpumask() + in hwloc/linux.h. + + HWLOC_MEMBIND_DEFAULT and HWLOC_MEMBIND_FIRSTTOUCH were clarified. +* New APIs and Features + + Add hwloc/shmem.h for sharing topologies between processes running on + the same machine (for reducing the memory footprint). + + Add the experimental netloc subproject. It is disabled by default + and can be enabled with --enable-netloc. + It currently brings command-line tools to gather and visualize the + topology of InfiniBand fabrics, and an API to convert such topologies + into Scotch architectures for process mapping. + See the documentation for details. +* Removed APIs and features + + Remove the online_cpuset from struct hwloc_obj. Offline PUs get unknown + topologies on Linux nowadays, and wrong topology on Solaris. Other OS + do not support them. And one cannot do much about them anyway. Just keep + them in complete_cpuset. + + Remove the now-unused "System" object type HWLOC_OBJ_SYSTEM, + defined to MACHINE for backward compatibility. + + The almost-unused "os_level" attribute has been removed from the + hwloc_obj structure. + + Remove the custom interface for assembling the topologies of different + nodes as well as the hwloc-assembler tools. + + hwloc_topology_set_fsroot() is removed, the environment variable + HWLOC_FSROOT may be used for the same remote testing/debugging purpose. + + Remove the deprecated hwloc_obj_snprintf(), hwloc_obj_type_of_string(), + hwloc_distribute[v](). + * Remove Myrinet Express interoperability (hwloc/myriexpress.h). + + Remove Kerrighed support from the Linux backend. + + Remove Tru64 (OSF/1) support. + - Remove HWLOC_MEMBIND_REPLICATE which wasn't available anywhere else. +* Backend improvements + + Linux + - OS devices do not have to be attached through PCI anymore, + for instance enabling the discovery of NVDIMM block devices. + - Remove the dependency on libnuma. + - Add a SectorSize attribute to block OS devices. + + Mac OS X + - Fix detection of cores and hyperthreads. + - Add CPUVendor, Model, ... attributes. + + Windows + - Add get_area_memlocation(). +* Tools + + lstopo and hwloc-info have a new --filter option matching the new filtering API. + + lstopo can be given --children-order=plain to force a basic displaying + of memory and normal children together below their parent. + + hwloc-distances was removed and replaced with lstopo --distances. +* Misc + + Exports + - Exporting to synthetic now ignores I/O and Misc objects. + + PCI discovery + - Separate OS device discovery from PCI discovery. Only the latter is disabled + with --disable-pci at configure time. Both may be disabled with --disable-io. + - The `linuxpci' component is now renamed into `linuxio'. + - The old `libpci' component name from hwloc 1.6 is not supported anymore, + only the `pci' name from hwloc 1.7 is now recognized. + - The HWLOC_PCI___LOCALCPUS environment variables are superseded + with a single HWLOC_PCI_LOCALITY where bus ranges may be specified. + - Do not set PCI devices and bridges name automatically. Vendor and device + names are already in info attributes. + + Components and discovery + - Add HWLOC_SYNTHETIC environment variable to enforce a synthetic topology + as if hwloc_topology_set_synthetic() had been called. + - HWLOC_COMPONENTS doesn't support xml or synthetic component attributes + anymore, they should be passed in HWLOC_XMLFILE or HWLOC_SYNTHETIC instead. + - HWLOC_COMPONENTS takes precedence over other environment variables + for selecting components. + + hwloc now requires a C99 compliant compiler. + + +Version 1.11.9 +-------------- +* Add support for Zhaoxin ZX-C and ZX-D processors in the x86 backend, + thanks to Jeff Zhao for the patch. +* Fix AMD Epyc 24-core L3 cache locality in the x86 backend. +* Don't crash in the x86 backend when the CPUID vendor string is unknown. +* Fix the missing pu discovery support bit on some OS. +* Fix the management of the lstopoStyle info attribute for custom colors. +* Add verbose warnings when failing to load hwloc v2.0+ XMLs. + + +Version 1.11.8 +-------------- +* Multiple Solaris improvements, thanks to Maureen Chew for the help: + + Detect caches on Sparc. + + Properly detect allowed/disallowed PUs and NUMA nodes with processor sets. + + Add hwloc_get_last_cpu_location() support for the current thread. +* Add support for CUDA compute capability 7.0 and fix support for 6.[12]. +* Tools improvements + + Fix search for objects by physical index in command-line tools. + + Add missing "cpubind:get_thisthread_last_cpu_location" in the output + of hwloc-info --support. + + Add --pid and --name to specify target processes in hwloc-ps. + + Display thread names in lstopo and hwloc-ps on Linux. +* Doc improvements + + Add a FAQ entry about building on Windows. + + Install missing sub-manpage for hwloc_obj_add_info() and + hwloc_obj_get_info_by_name(). + + +Version 1.11.7 +-------------- +* Fix hwloc-bind --membind for CPU-less NUMA nodes (again). + Thanks to Gilles Gouaillardet for reporting the issue. +* Fix a memory leak on IBM S/390 platforms running Linux. +* Fix a memory leak when forcing the x86 backend first on amd64/topoext + platforms running Linux. +* Command-line tools now support "hbm" instead "numanode" for filtering + only high-bandwidth memory nodes when selecting locations. + + hwloc-bind also support --hbm and --no-hbm for filtering only or + no HBM nodes. + Thanks to Nicolas Denoyelle for the suggestion. +* Add --children and --descendants to hwloc-info for listing object + children or object descendants of a specific type. +* Add --no-index, --index, --no-attrs, --attrs to disable/enable display + of index numbers or attributes in the graphical lstopo output. +* Try to gather hwloc-dump-hwdata output from all possible locations + in hwloc-gather-topology. +* Updates to the documentation of locations in hwloc(7) and + command-line tools manpages. + + +Version 1.11.6 +-------------- +* Make the Linux discovery about twice faster, especially on the CPU side, + by trying to avoid sysfs file accesses as much as possible. +* Add support for AMD Family 17h processors (Zen) SMT cores in the Linux + and x86 backends. +* Add the HWLOC_TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES flag (and the + HWLOC_THISSYSTEM_ALLOWED_RESOURCES environment variable) for reading the + set of allowed resources from the local operating system even if the + topology was loaded from XML or synthetic. +* Fix hwloc_bitmap_set/clr_range() for infinite ranges that do not + overlap currently defined ranges in the bitmap. +* Don't reset the lstopo zoom scale when moving the X11 window. +* lstopo now has --flags for manually setting topology flags. +* hwloc_get_depth_type() returns HWLOC_TYPE_DEPTH_UNKNOWN for Misc objects. + + +Version 1.11.5 +-------------- +* Add support for Knights Mill Xeon Phi, thanks to Piotr Luc for the patch. +* Reenable distance gathering on Solaris, disabled by mistake since v1.0. + Thanks to TU Wien for the help. +* Fix hwloc_get_*obj*_inside_cpuset() functions to ignore objects with + empty CPU sets, for instance, CPU-less NUMA nodes such as KNL MCDRAM. + Thanks to Nicolas Denoyelle for the report. +* Fix XML import of multiple distance matrices. +* Add a FAQ entry about "hwloc is only a structural model, it ignores + performance models, memory bandwidth, etc.?" + + +Version 1.11.4 +-------------- +* Add MemoryMode and ClusterMode attributes in the Machine object on KNL. + Add doc/examples/get-knl-modes.c for an example of retrieving them. + Thanks to Grzegorz Andrejczuk. +* Fix Linux build with -m32 with respect to libudev. + Thanks to Paul Hargrove for reporting the issue. +* Fix build with Visual Studio 2015, thanks to Eloi Gaudry for reporting + the issue and providing the patch. +* Don't forget to display OS device children in the graphical lstopo. +* Fix a memory leak on Solaris, thanks to Bryon Gloden for the patch. +* Properly handle realloc() failures, thanks to Bryon Gloden for reporting + the issue. +* Fix lstopo crash in ascii/fig/windows outputs when some objects have a + lstopoStyle info attribute. + + +Version 1.11.3 +-------------- +* Bug fixes + + Fix a memory leak on Linux S/390 hosts with books. + + Fix /proc/mounts parsing on Linux by using mntent.h. + Thanks to Nathan Hjelm for reporting the issue. + + Fix a x86 infinite loop on VMware due to the x2APIC feature being + advertised without actually being fully supported. + Thanks to Jianjun Wen for reporting the problem and testing the patch. + + Fix the return value of hwloc_alloc() on mmap() failure. + Thanks to Hugo Brunie for reporting the issue. + + Fix the return value of command-line tools in some error cases. + + Do not break individual thread bindings during x86 backend discovery in a + multithreaded process. Thanks to Farouk Mansouri for the report. + + Fix hwloc-bind --membind for CPU-less NUMA nodes. + + Fix some corner cases in the XML export/import of application userdata. +* API Improvements + + Add HWLOC_MEMBIND_BYNODESET flag so that membind() functions accept + either cpusets or nodesets. + + Add hwloc_get_area_memlocation() to check where pages are actually + allocated. Only implemented on Linux for now. + - There's no _nodeset() variant, but the new flag HWLOC_MEMBIND_BYNODESET + is supported. + + Make hwloc_obj_type_sscanf() parse back everything that may be outputted + by hwloc_obj_type_snprintf(). +* Detection Improvements + + Allow the x86 backend to add missing cache levels, so that it completes + what the Solaris backend lacks. + Thanks to Ryan Zezeski for reporting the issue. + + Do not filter-out FibreChannel PCI adapters by default anymore. + Thanks to Matt Muggeridge for the report. + + Add support for CUDA compute capability 6.x. +* Tools + + Add --support to hwloc-info to list supported features, just like with + hwloc_topology_get_support(). + - Also add --objects and --topology to explicitly switch between the + default modes. + + Add --tid to let hwloc-bind operate on individual threads on Linux. + + Add --nodeset to let hwloc-bind report memory binding as NUMA node sets. + + hwloc-annotate and lstopo don't drop application userdata from XMLs anymore. + - Add --cu to hwloc-annotate to drop these application userdata. + + Make the hwloc-dump-hwdata dump directory configurable through configure + options such as --runstatedir or --localstatedir. +* Misc Improvements + + Add systemd service template contrib/systemd/hwloc-dump-hwdata.service + for launching hwloc-dump-hwdata at boot on Linux. + Thanks to Grzegorz Andrejczuk. + + Add HWLOC_PLUGINS_BLACKLIST environment variable to prevent some plugins + from being loaded. Thanks to Alexandre Denis for the suggestion. + + Small improvements for various Windows build systems, + thanks to Jonathan L Peyton and Marco Atzeri. + + +Version 1.11.2 +-------------- +* Improve support for Intel Knights Landing Xeon Phi on Linux: + + Group local NUMA nodes of normal memory (DDR) and high-bandwidth memory + (MCDRAM) together through "Cluster" groups so that the local MCDRAM is + easy to find. + - See "How do I find the local MCDRAM NUMA node on Intel Knights + Landing Xeon Phi?" in the documentation. + - For uniformity across all KNL configurations, always have a NUMA node + object even if the host is UMA. + + Fix the detection of the memory-side cache: + - Add the hwloc-dump-hwdata superuser utility to dump SMBIOS information + into /var/run/hwloc/ as root during boot, and load this dumped + information from the hwloc library at runtime. + - See "Why do I need hwloc-dump-hwdata for caches on Intel Knights + Landing Xeon Phi?" in the documentation. + Thanks to Grzegorz Andrejczuk for the patches and for the help. +* The x86 and linux backends may now be combined for discovering CPUs + through x86 CPUID and memory from the Linux kernel. + This is useful for working around buggy CPU information reported by Linux + (for instance the AMD Bulldozer/Piledriver bug below). + Combination is enabled by passing HWLOC_COMPONENTS=x86 in the environment. +* Fix L3 cache sharing on AMD Opteron 63xx (Piledriver) and 62xx (Bulldozer) + in the x86 backend. Thanks to many users who helped. +* Fix the overzealous L3 cache sharing fix added to the x86 backend in 1.11.1 + for AMD Opteron 61xx (Magny-Cours) processors. +* The x86 backend may now add the info attribute Inclusive=0 or 1 to caches + it discovers, or to caches discovered by other backends earlier. + Thanks to Guillaume Beauchamp for the patch. +* Fix the management on alloc_membind() allocation failures on AIX, HP-UX + and OSF/Tru64. +* Fix spurious failures to load with ENOMEM on AIX in case of Misc objects + below PUs. +* lstopo improvements in X11 and Windows graphical mode: + + Add + - f 1 shortcuts to manually zoom-in, zoom-out, reset the scale, + or fit the entire window. + + Display all keyboard shortcuts in the console. +* Debug messages may be disabled at runtime by passing HWLOC_DEBUG_VERBOSE=0 + in the environment when --enable-debug was passed to configure. +* Add a FAQ entry "What are these Group objects in my topology?". + + +Version 1.11.1 +-------------- +* Detection fixes + + Hardwire the topology of Fujitsu K-computer, FX10, FX100 servers to + workaround buggy Linux kernels. + Thanks to Takahiro Kawashima and Gilles Gouaillardet. + + Fix L3 cache information on AMD Opteron 61xx Magny-Cours processors + in the x86 backend. Thanks to Guillaume Beauchamp for the patch. + + Detect block devices directly attached to PCI without a controller, + for instance NVMe disks. Thanks to Barry M. Tannenbaum. + + Add the PCISlot attribute to all PCI functions instead of only the + first one. +* Miscellaneous internal fixes + + Ignore PCI bridges that could fail assertions by reporting buggy + secondary-subordinate bus numbers + Thanks to George Bosilca for reporting the issue. + + Fix an overzealous assertion when inserting an intermediate Group object + while Groups are totally ignored. + + Fix a memory leak on Linux on AMD processors with dual-core compute units. + Thanks to Bob Benner. + + Fix a memory leak on failure to load a xml diff file. + + Fix some segfaults when inputting an invalid synthetic description. + + Fix a segfault when plugins fail to find core symbols. + Thanks to Guy Streeter. +* Many fixes and improvements in the Windows backend: + + Fix the discovery of more than 32 processors and multiple processor + groups. Thanks to Barry M. Tannenbaum for the help. + + Add thread binding set support in case of multiple process groups. + + Add thread binding get support. + + Add get_last_cpu_location() support for the current thread. + + Disable the unsupported process binding in case of multiple processor + groups. + + Fix/update the Visual Studio support under contrib/windows. + Thanks to Eloi Gaudry for the help. +* Tools fixes + + Fix a segfault when displaying logical indexes in the graphical lstopo. + Thanks to Guillaume Mercier for reporting the issue. + + Fix lstopo linking with X11 libraries, for instance on Mac OS X. + Thanks to Scott Atchley and Pierre Ramet for reporting the issue. + + hwloc-annotate, hwloc-diff and hwloc-patch do not drop unavailable + resources from the output anymore and those may be annotated as well. + + Command-line tools may now import XML from the standard input with -i -.xml + + Add missing documentation for the hwloc-info --no-icaches option. + + +Version 1.11.0 +-------------- +* API + + Socket objects are renamed into Package to align with the terminology + used by processor vendors. The old HWLOC_OBJ_SOCKET type and "Socket" + name are still supported for backward compatibility. + + HWLOC_OBJ_NODE is replaced with HWLOC_OBJ_NUMANODE for clarification. + HWLOC_OBJ_NODE is still supported for backward compatibility. + "Node" and "NUMANode" strings are supported as in earlier releases. +* Detection improvements + + Add support for Intel Knights Landing Xeon Phi. + Thanks to Grzegorz Andrejczuk and Lukasz Anaczkowski. + + Add Vendor, Model, Revision, SerialNumber, Type and LinuxDeviceID + info attributes to Block OS devices on Linux. Thanks to Vineet Pedaballe + for the help. + - Add --disable-libudev to avoid dependency on the libudev library. + + Add "MemoryModule" Misc objects with information about DIMMs, on Linux + when privileged and when I/O is enabled. + Thanks to Vineet Pedaballe for the help. + + Add a PCISlot attribute to PCI devices on Linux when supported to + identify the physical PCI slot where the board is plugged. + + Add CPUStepping info attribute on x86 processors, + thanks to Thomas Röhl for the suggestion. + + Ignore the device-tree on non-Power architectures to avoid buggy + detection on ARM. Thanks to Orion Poplawski for reporting the issue. + + Work-around buggy Xeon E5v3 BIOS reporting invalid PCI-NUMA affinity + for the PCI links on the second processor. + + Add support for CUDA compute capability 5.x, thanks Benjamin Worpitz. + + Many fixes to the x86 backend + - Add L1i and fix L2/L3 type on old AMD processors without topoext support. + - Fix Intel CPU family and model numbers when basic family isn't 6 or 15. + - Fix package IDs on recent AMD processors. + - Fix misc issues due to incomplete APIC IDs on x2APIC processors. + - Avoid buggy discovery on old SGI Altix UVs with non-unique APIC IDs. + + Gather total machine memory on NetBSD. +* Tools + + lstopo + - Collapse identical PCI devices unless --no-collapse is given. + This avoids gigantic outputs when a PCI device contains dozens of + identical virtual functions. + - The ASCII art output is now called "ascii", for instance in + "lstopo -.ascii". + The former "txt" extension is retained for backward compatibility. + - Automatically scales graphical box width to the inner text in Cairo, + ASCII and Windows outputs. + - Add --rect to lstopo to force rectangular layout even for NUMA nodes. + - Add --restrict-flags to configure the behavior of --restrict. + - Objects may have a "Type" info attribute to specify a better type name + and display it in lstopo. + - Really export all verbose information to the given output file. + + hwloc-annotate + - May now operate on all types of objects, including I/O. + - May now insert Misc objects in the topology. + - Do not drop instruction caches and I/O devices from the output anymore. + + Fix lstopo path in hwloc-gather-topology after install. +* Misc + + Fix hwloc/cudart.h for machines with multiple PCI domains, + thanks to Imre Kerr for reporting the problem. + + Fix PCI Bridge-specific depth attribute. + + Fix hwloc_bitmap_intersect() for two infinite bitmaps. + + Fix some corner cases in the building of levels on large NUMA machines + with non-uniform NUMA groups and I/Os. + + Improve the performance of object insertion by cpuset for large + topologies. + + Prefix verbose XML import errors with the source name. + + Improve pkg-config checks and error messages. + + Fix excluding after a component with an argument in the HWLOC_COMPONENTS + environment variable. +* Documentation + + Fix the recommended way in documentation and examples to allocate memory + on some node, it should use HWLOC_MEMBIND_BIND. + Thanks to Nicolas Bouzat for reporting the issue. + + Add a "Miscellaneous objects" section in the documentation. + + Add a FAQ entry "What happens to my topology if I disable symmetric + multithreading, hyper-threading, etc. ?" to the documentation. + + +Version 1.10.1 +-------------- +* Actually remove disallowed NUMA nodes from nodesets when the whole-system + flag isn't enabled. +* Fix the gathering of PCI domains. Thanks to James Custer for reporting + the issue and providing a patch. +* Fix the merging of identical parent and child in presence of Misc objects. + Thanks to Dave Love for reporting the issue. +* Fix some misordering of children when merging with ignore_keep_structure() + in partially allowed topologies. +* Fix an overzealous assertion in the debug code when running on a single-PU + host with I/O. Thanks to Thomas Van Doren for reporting the issue. +* Don't forget to setup NUMA node object nodesets in x86 backend (for BSDs) + and OSF/Tru64 backend. +* Fix cpuid-x86 build error with gcc -O3 on x86-32. Thanks to Thomas Van Doren + for reporting the issue. +* Fix support for future very large caches in the x86 backend. +* Fix vendor/device names for SR-IOV PCI devices on Linux. +* Fix an unlikely crash in case of buggy hierarchical distance matrix. +* Fix PU os_index on some AIX releases. Thanks to Hendryk Bockelmann and + Erik Schnetter for helping debugging. +* Fix hwloc_bitmap_isincluded() in case of infinite sets. +* Change hwloc-ls.desktop into a lstopo.desktop and only install it if + lstopo is built with Cairo/X11 support. It cannot work with a non-graphical + lstopo or hwloc-ls. +* Add support for the renaming of Socket into Package in future releases. +* Add support for the replacement of HWLOC_OBJ_NODE with HWLOC_OBJ_NUMANODE + in future releases. +* Clarify the documentation of distance matrices in hwloc.h and in the manpage + of the hwloc-distances. Thanks to Dave Love for the suggestion. +* Improve some error messages by displaying more information about the + hwloc library in use. +* Document how to deal with the ABI break when upgrading to the upcoming 2.0 + See "How do I handle ABI breaks and API upgrades ?" in the FAQ. + + +Version 1.10.0 +-------------- +* API + + Add hwloc_topology_export_synthetic() to export a topology to a + synthetic string without using lstopo. See the Synthetic topologies + section in the documentation. + + Add hwloc_topology_set/get_userdata() to let the application save + a private pointer in the topology whenever it needs a way to find + its own object corresponding to a topology. + + Add hwloc_get_numanode_obj_by_os_index() and document that this function + as well as hwloc_get_pu_obj_by_os_index() are good at converting + nodesets and cpusets into objects. + + hwloc_distrib() does not ignore any objects anymore when there are + too many of them. They get merged with others instead. + Thanks to Tim Creech for reporting the issue. +* Tools + + hwloc-bind --get now executes the command after displaying + the binding instead of ignoring the command entirely. + Thanks to John Donners for the suggestion. + + Clarify that memory sizes shown in lstopo are local by default + unless specified (total memory added in the root object). +* Synthetic topologies + + Synthetic topology descriptions may now specify attributes such as + memory sizes and OS indexes. See the Synthetic topologies section + in the documentation. + + lstopo now exports in this fully-detailed format by default. + The new option --export-synthetic-flags may be used to revert + back the old format. +* Documentation + + Add the doc/examples/ subdirectory with several real-life examples, + including the already existing hwloc-hello.C for basics. + Thanks to Rob Aulwes for the suggestion. + + Improve the documentation of CPU and memory binding in the API. + + Add a FAQ entry about operating system errors, especially on AMD + platforms with buggy cache information. + + Add a FAQ entry about loading many topologies in a single program. +* Misc + + Work around buggy Linux kernels reporting 2 sockets instead + 1 socket with 2 NUMA nodes for each Xeon E5 v3 (Haswell) processor. + + pciutils/libpci support is now removed since libpciaccess works + well and there's also a Linux-specific PCI backend. For the record, + pciutils was GPL and therefore disabled by default since v1.6.2. + + Add --disable-cpuid configure flag to work around buggy processor + simulators reporting invalid CPUID information. + Thanks for Andrew Friedley for reporting the issue. + + Fix a racy use of libltdl when manipulating multiple topologies in + different threads. + Thanks to Andra Hugo for reporting the issue and testing patches. + + Fix some build failures in private/misc.h. + Thanks to Pavan Balaji and Ralph Castain for the reports. + + Fix failures to detect X11/Xutil.h on some Solaris platforms. + Thanks to Siegmar Gross for reporting the failure. + + The plugin ABI has changed, this release will not load plugins + built against previous hwloc releases. + + +Version 1.9.1 +------------- +* Fix a crash when the PCI locality is invalid. Attach to the root object + instead. Thanks to Nicolas Denoyelle for reporting the issue. +* Fix -f in lstopo manpage. Thanks to Jirka Hladky for reporting the issue. +* Fix hwloc_obj_type_sscanf() and others when strncasecmp() is not properly + available. Thanks to Nick Papior Andersen for reporting the problem. +* Mark Linux file descriptors as close-on-exec to avoid leaks on exec. +* Fix some minor memory leaks. + + +Version 1.9.0 +------------- +* API + + Add hwloc_obj_type_sscanf() to extend hwloc_obj_type_of_string() with + type-specific attributes such as Cache/Group depth and Cache type. + hwloc_obj_type_of_string() is moved to hwloc/deprecated.h. + + Add hwloc_linux_get_tid_last_cpu_location() for retrieving the + last CPU where a Linux thread given by TID ran. + + Add hwloc_distrib() to extend the old hwloc_distribute[v]() functions. + hwloc_distribute[v]() is moved to hwloc/deprecated.h. + + Don't mix total and local memory when displaying verbose object attributes + with hwloc_obj_attr_snprintf() or in lstopo. +* Backends + + Add CPUVendor, CPUModelNumber and CPUFamilyNumber info attributes for + x86, ia64 and Xeon Phi sockets on Linux, to extend the x86-specific + support added in v1.8.1. Requested by Ralph Castain. + + Add many CPU- and Platform-related info attributes on ARM and POWER + platforms, in the Machine and Socket objects. + + Add CUDA info attributes describing the number of multiprocessors and + cores and the size of the global, shared and L2 cache memories in CUDA + OS devices. + + Add OpenCL info attributes describing the number of compute units and + the global memory size in OpenCL OS devices. + + The synthetic backend now accepts extended types such as L2Cache, L1i or + Group3. lstopo also exports synthetic strings using these extended types. +* Tools + + lstopo + - Do not overwrite output files by default anymore. + Pass -f or --force to enforce it. + - Display OpenCL, CUDA and Xeon Phi numbers of cores and memory sizes + in the graphical output. + - Fix export to stdout when specifying a Cairo-based output type + with --of. + + hwloc-ps + - Add -e or --get-last-cpu-location to report where processes/threads + run instead of where they are bound. + - Report locations as likely-more-useful objects such as Cores or Sockets + instead of Caches when possible. + + hwloc-bind + - Fix failure on Windows when not using --pid. + - Add -e as a synonym to --get-last-cpu-location. + + hwloc-distrib + - Add --reverse to distribute using last objects first and singlify + into last bits first. Thanks to Jirka Hladky for the suggestion. + + hwloc-info + - Report unified caches when looking for data or instruction cache + ancestor objects. +* Misc + + Add experimental Visual Studio support under contrib/windows. + Thanks to Eloi Gaudry for his help and for providing the first draft. + + Fix some overzealous assertions and warnings about the ordering of + objects on a level with respect to cpusets. The ordering is only + guaranteed for complete cpusets (based on the first bit in sets). + + Fix some memory leaks when importing xml diffs and when exporting a + "too complex" entry. + + +Version 1.8.1 +------------- +* Fix the cpuid code on Windows 64bits so that the x86 backend gets + enabled as expected and can populate CPU information. + Thanks to Robin Scher for reporting the problem. +* Add CPUVendor/CPUModelNumber/CPUFamilyNumber attributes when running + on x86 architecture. Thanks to Ralph Castain for the suggestion. +* Work around buggy BIOS reporting duplicate NUMA nodes on Linux. + Thanks to Jeff Becker for reporting the problem and testing the patch. +* Add a name to the lstopo graphical window. Thanks to Michael Prokop + for reporting the issue. + + +Version 1.8.0 +------------- +* New components + + Add the "linuxpci" component that always works on Linux even when + libpciaccess and libpci aren't available (and even with a modified + file-system root). By default the old "pci" component runs first + because "linuxpci" lacks device names (obj->name is always NULL). +* API + + Add the topology difference API in hwloc/diff.h for manipulating + many similar topologies. + + Add hwloc_topology_dup() for duplicating an entire topology. + + hwloc.h and hwloc/helper.h have been reorganized to clarify the + documentation sections. The actual inline code has moved out of hwloc.h + into the new hwloc/inlines.h. + + Deprecated functions are now in hwloc/deprecated.h, and not in the + official documentation anymore. +* Tools + + Add hwloc-diff and hwloc-patch tools together with the new diff API. + + Add hwloc-compress-dir to (de)compress an entire directory of XML files + using hwloc-diff and hwloc-patch. + + Object colors in the graphical output of lstopo may be changed by adding + a "lstopoStyle" info attribute. See CUSTOM COLORS in the lstopo(1) manpage + for details. Thanks to Jirka Hladky for discussing the idea. + + hwloc-gather-topology may now gather I/O-related files on Linux when + --io is given. Only the linuxpci component supports discovering I/O + objects from these extended tarballs. + + hwloc-annotate now supports --ri to remove/replace info attributes with + a given name. + + hwloc-info supports "root" and "all" special locations for dumping + information about the root object. + + lstopo now supports --append-legend to append custom lines of text + to the legend in the graphical output. Thanks to Jirka Hladky for + discussing the idea. + + hwloc-calc and friends have a more robust parsing of locations given + on the command-line and they report useful error messages about it. + + Add --whole-system to hwloc-bind, hwloc-calc, hwloc-distances and + hwloc-distrib, and add --restrict to hwloc-bind for uniformity among + tools. +* Misc + + Calling hwloc_topology_load() or hwloc_topology_set_*() on an already + loaded topology now returns an error (deprecated since release 1.6.1). + + Fix the initialisation of cpusets and nodesets in Group objects added + when inserting PCI hostbridges. + + Never merge Group objects that were added explicitly by the user with + hwloc_custom_insert_group_object_by_parent(). + + Add a sanity check during dynamic plugin loading to prevent some + crashes when hwloc is dynamically loaded by another plugin mechanisms. + + Add --with-hwloc-plugins-path to specify the install/load directories + of plugins. + + Add the MICSerialNumber info attribute to the root object when running + hwloc inside a Xeon Phi to match the same attribute in the MIC OS device + when running in the host. + + +Version 1.7.2 +------------- +* Do not create invalid block OS devices on very old Linux kernel such + as RHEL4 2.6.9. +* Fix PCI subvendor/device IDs. +* Fix the management of Misc objects inserted by parent. + Thanks to Jirka Hladky for reporting the problem. +* Add a PortState into attribute to OpenFabrics OS devices. +* Add a MICSerialNumber info attribute to Xeon PHI/MIC OS devices. +* Improve verbose error messages when failing to load from XML. + + +Version 1.7.1 +------------- +* Fix a failed assertion in the distance grouping code when loading a XML + file that already contains some groups. + Thanks to Laercio Lima Pilla for reporting the problem. +* Remove unexpected Group objects when loading XML topologies with I/O + objects and NUMA distances. + Thanks to Elena Elkina for reporting the problem and testing patches. +* Fix PCI link speed discovery when using libpciaccess. +* Fix invalid libpciaccess virtual function device/vendor IDs when using + SR-IOV PCI devices on Linux. +* Fix GL component build with old NVCtrl releases. + Thanks to Jirka Hladky for reporting the problem. +* Fix embedding breakage caused by libltdl. + Thanks to Pavan Balaji for reporting the problem. +* Always use the system-wide libltdl instead of shipping one inside hwloc. +* Document issues when enabling plugins while embedding hwloc in another + project, in the documentation section Embedding hwloc in Other Software. +* Add a FAQ entry "How to get useful topology information on NetBSD?" + in the documentation. +* Somes fixes in the renaming code for embedding. +* Miscellaneous minor build fixes. + + +Version 1.7.0 +------------- +* New operating system backends + + Add BlueGene/Q compute node kernel (CNK) support. See the FAQ in the + documentation for details. Thanks to Jeff Hammond, Christopher Samuel + and Erik Schnetter for their help. + + Add NetBSD support, thanks to Aleksej Saushev. +* New I/O device discovery + + Add co-processor OS devices such as "mic0" for Intel Xeon Phi (MIC) + on Linux. Thanks to Jerome Vienne for helping. + + Add co-processor OS devices such as "cuda0" for NVIDIA CUDA-capable GPUs. + + Add co-processor OS devices such as "opencl0d0" for OpenCL GPU devices + on the AMD OpenCL implementation. + + Add GPU OS devices such as ":0.0" for NVIDIA X11 displays. + + Add GPU OS devices such as "nvml0" for NVIDIA GPUs. + Thanks to Marwan Abdellah and Stefan Eilemann for helping. + These new OS devices have some string info attributes such as CoProcType, + GPUModel, etc. to better identify them. + See the I/O Devices and Attributes documentation sections for details. +* New components + + Add the "opencl", "cuda", "nvml" and "gl" components for I/O device + discovery. + + "nvml" also improves the discovery of NVIDIA GPU PCIe link speed. + All of these new components may be built as plugins. They may also be + disabled entirely by passing --disable-opencl/cuda/nvml/gl to configure. + See the I/O Devices, Components and Plugins, and FAQ documentation + sections for details. +* API + + Add hwloc_topology_get_flags(). + + Add hwloc/plugins.h for building external plugins. + See the Adding new discovery components and plugins section. +* Interoperability + + Add hwloc/opencl.h, hwloc/nvml.h, hwloc/gl.h and hwloc/intel-mic.h + to retrieve the locality of OS devices that correspond to AMD OpenCL + GPU devices or indexes, to NVML devices or indexes, to NVIDIA X11 + displays, or to Intel Xeon Phi (MIC) device indexes. + + Add new helpers in hwloc/cuda.h and hwloc/cudart.h to convert + between CUDA devices or indexes and hwloc OS devices. + + Add hwloc_ibv_get_device_osdev() and clarify the requirements + of the OpenFabrics Verbs helpers in hwloc/openfabrics-verbs.h. +* Tools + + hwloc-info is not only a synonym of lstopo -s anymore, it also + dumps information about objects given on the command-line. +* Documentation + + Add a section "Existing components and plugins". + + Add a list of common OS devices in section "Software devices". + + Add a new FAQ entry "Why is lstopo slow?" about lstopo slowness + issues because of GPUs. + + Clarify the documentation of inline helpers in hwloc/myriexpress.h + and hwloc/openfabrics-verbs.h. +* Misc + + Improve cache detection on AIX. + + The HWLOC_COMPONENTS variable now excludes the components whose + names are prefixed with '-'. + + lstopo --ignore PU now works when displaying the topology in + graphical and textual mode (not when exporting to XML). + + Make sure I/O options always appear in lstopo usage, not only when + using pciutils/libpci. + + Remove some unneeded Linux specific includes from some interoperability + headers. + + Fix some inconsistencies in hwloc-distrib and hwloc-assembler-remote + manpages. Thanks to Guy Streeter for the report. + + Fix a memory leak on AIX when getting memory binding. + + Fix many small memory leaks on Linux. + + The `libpci' component is now called `pci' but the old name is still + accepted in the HWLOC_COMPONENTS variable for backward compatibility. + + +Version 1.6.2 +------------- +* Use libpciaccess instead of pciutils/libpci by default for I/O discovery. + pciutils/libpci is only used if --enable-libpci is given to configure + because its GPL license may taint hwloc. See the Installation section + in the documentation for details. +* Fix get_cpubind on Solaris when bound to a single PU with + processor_bind(). Thanks to Eugene Loh for reporting the problem + and providing a patch. + + +Version 1.6.1 +------------- +* Fix some crash or buggy detection in the x86 backend when Linux + cgroups/cpusets restrict the available CPUs. +* Fix the pkg-config output with --libs --static. + Thanks to Erik Schnetter for reporting one of the problems. +* Fix the output of hwloc-calc -H --hierarchical when using logical + indexes in the output. +* Calling hwloc_topology_load() multiple times on the same topology + is officially deprecated. hwloc will warn in such cases. +* Add some documentation about existing plugins/components, package + dependencies, and I/O devices specification on the command-line. + + +Version 1.6.0 +------------- +* Major changes + + Reorganize the backend infrastructure to support dynamic selection + of components and dynamic loading of plugins. For details, see the + new documentation section Components and plugins. + - The HWLOC_COMPONENTS variable lets one replace the default discovery + components. + - Dynamic loading of plugins may be enabled with --enable-plugins + (except on AIX and Windows). It will build libxml2 and libpci + support as separated modules. This helps reducing the dependencies + of the core hwloc library when distributed as a binary package. +* Backends + + Add CPUModel detection on Darwin and x86/FreeBSD. + Thanks to Robin Scher for providing ways to implement this. + + The x86 backend now adds CPUModel info attributes to socket objects + created by other backends that do not natively support this attribute. + + Fix detection on FreeBSD in case of cpuset restriction. Thanks to + Sebastian Kuzminsky for reporting the problem. +* XML + + Add hwloc_topology_set_userdata_import/export_callback(), + hwloc_export_obj_userdata() and _userdata_base64() to let + applications specify how to save/restore the custom data they placed + in the userdata private pointer field of hwloc objects. +* Tools + + Add hwloc-annotate program to add string info attributes to XML + topologies. + + Add --pid-cmd to hwloc-ps to append the output of a command to each + PID line. May be used for showing Open MPI process ranks, see the + hwloc-ps(1) manpage for details. + + hwloc-bind now exits with an error if binding fails; the executable + is not launched unless binding suceeeded or --force was given. + + Add --quiet to hwloc-calc and hwloc-bind to hide non-fatal error + messages. + + Fix command-line pid support in windows tools. + + All programs accept --verbose as a synonym to -v. +* Misc + + Fix some DIR descriptor leaks on Linux. + + Fix I/O device lists when some were filtered out after a XML import. + + Fix the removal of I/O objects when importing a I/O-enabled XML topology + without any I/O topology flag. + + When merging objects with HWLOC_IGNORE_TYPE_KEEP_STRUCTURE or + lstopo --merge, compare object types before deciding which one of two + identical object to remove (e.g. keep sockets in favor of caches). + + Add some GUID- and LID-related info attributes to OpenFabrics + OS devices. + + Only add CPUType socket attributes on Solaris/Sparc. Other cases + don't report reliable information (Solaris/x86), and a replacement + is available as the Architecture string info in the Machine object. + + Add missing Backend string info on Solaris in most cases. + + Document object attributes and string infos in a new Attributes + section in the documentation. + + Add a section about Synthetic topologies in the documentation. + + +Version 1.5.2 (some of these changes are in v1.6.2 but not in v1.6) +------------- +* Use libpciaccess instead of pciutils/libpci by default for I/O discovery. + pciutils/libpci is only used if --enable-libpci is given to configure + because its GPL license may taint hwloc. See the Installation section + in the documentation for details. +* Fix get_cpubind on Solaris when bound to a single PU with + processor_bind(). Thanks to Eugene Loh for reporting the problem + and providing a patch. +* Fix some DIR descriptor leaks on Linux. +* Fix I/O device lists when some were filtered out after a XML import. +* Add missing Backend string info on Solaris in most cases. +* Fix the removal of I/O objects when importing a I/O-enabled XML topology + without any I/O topology flag. +* Fix the output of hwloc-calc -H --hierarchical when using logical + indexes in the output. +* Fix the pkg-config output with --libs --static. + Thanks to Erik Schnetter for reporting one of the problems. + + +Version 1.5.1 +------------- +* Fix block OS device detection on Linux kernel 3.3 and later. + Thanks to Guy Streeter for reporting the problem and testing the fix. +* Fix the cpuid code in the x86 backend (for FreeBSD). Thanks to + Sebastian Kuzminsky for reporting problems and testing patches. +* Fix 64bit detection on FreeBSD. +* Fix some corner cases in the management of the thissystem flag with + respect to topology flags and environment variables. +* Fix some corner cases in command-line parsing checks in hwloc-distrib + and hwloc-distances. +* Make sure we do not miss some block OS devices on old Linux kernels + when a single PCI device has multiple IDE hosts/devices behind it. +* Do not disable I/O devices or instruction caches in hwloc-assembler output. + + +Version 1.5.0 +------------- +* Backends + + Do not limit the number of processors to 1024 on Solaris anymore. + + Gather total machine memory on FreeBSD. Thanks to Cyril Roelandt. + + XML topology files do not depend on the locale anymore. Float numbers + such as NUMA distances or PCI link speeds now always use a dot as a + decimal separator. + + Add instruction caches detection on Linux, AIX, Windows and Darwin. + + Add get_last_cpu_location() support for the current thread on AIX. + + Support binding on AIX when threads or processes were bound with + bindprocessor(). Thanks to Hendryk Bockelmann for reporting the issue + and testing patches, and to Farid Parpia for explaining the binding + interfaces. + + Improve AMD topology detection in the x86 backend (for FreeBSD) using + the topoext feature. +* API + + Increase HWLOC_API_VERSION to 0x00010500 so that API changes may be + detected at build-time. + + Add a cache type attribute describind Data, Instruction and Unified + caches. Caches with different types but same depth (for instance L1d + and L1i) are placed on different levels. + + Add hwloc_get_cache_type_depth() to retrieve the hwloc level depth of + of the given cache depth and type, for instance L1i or L2. + It helps disambiguating the case where hwloc_get_type_depth() returns + HWLOC_TYPE_DEPTH_MULTIPLE. + + Instruction caches are ignored unless HWLOC_TOPOLOGY_FLAG_ICACHES is + passed to hwloc_topology_set_flags() before load. + + Add hwloc_ibv_get_device_osdev_by_name() OpenFabrics helper in + openfabrics-verbs.h to find the hwloc OS device object corresponding to + an OpenFabrics device. +* Tools + + Add lstopo-no-graphics, a lstopo built without graphical support to + avoid dependencies on external libraries such as Cairo and X11. When + supported, graphical outputs are only available in the original lstopo + program. + - Packagers splitting lstopo and lstopo-no-graphics into different + packages are advised to use the alternatives system so that lstopo + points to the best available binary. + + Instruction caches are enabled in lstopo by default. Use --no-icaches + to disable them. + + Add -t/--threads to show threads in hwloc-ps. +* Removal of obsolete components + + Remove the old cpuset interface (hwloc/cpuset.h) which is deprecated and + superseded by the bitmap API (hwloc/bitmap.h) since v1.1. + hwloc_cpuset and nodeset types are still defined, but all hwloc_cpuset_* + compatibility wrappers are now gone. + + Remove Linux libnuma conversion helpers for the deprecated and + broken nodemask_t interface. + + Remove support for "Proc" type name, it was superseded by "PU" in v1.0. + + Remove hwloc-mask symlinks, it was replaced by hwloc-calc in v1.0. +* Misc + + Fix PCIe 3.0 link speed computation. + + Non-printable characters are dropped from strings during XML export. + + Fix importing of escaped characters with the minimalistic XML backend. + + Assert hwloc_is_thissystem() in several I/O related helpers. + + Fix some memory leaks in the x86 backend for FreeBSD. + + Minor fixes to ease native builds on Windows. + + Limit the number of retries when operating on all threads within a + process on Linux if the list of threads is heavily getting modified. + + +Version 1.4.3 +------------- +* This release is only meant to fix the pciutils license issue when upgrading + to hwloc v1.5 or later is not possible. It contains several other minor + fixes but ignores many of them that are only in v1.5 or later. +* Use libpciaccess instead of pciutils/libpci by default for I/O discovery. + pciutils/libpci is only used if --enable-libpci is given to configure + because its GPL license may taint hwloc. See the Installation section + in the documentation for details. +* Fix PCIe 3.0 link speed computation. +* Fix importing of escaped characters with the minimalistic XML backend. +* Fix a memory leak in the x86 backend. + + +Version 1.4.2 +------------- +* Fix build on Solaris 9 and earlier when fabsf() is not a compiler + built-in. Thanks to Igor Galić for reporting the problem. +* Fix support for more than 32 processors on Windows. Thanks to Hartmut + Kaiser for reporting the problem. +* Fix process-wide binding and cpulocation routines on Linux when some + threads disappear in the meantime. Thanks to Vlad Roubtsov for reporting + the issue. +* Make installed scripts executable. Thanks to Jirka Hladky for reporting + the problem. +* Fix libtool revision management when building for Windows. This fix was + also released as hwloc v1.4.1.1 Windows builds. Thanks to Hartmut Kaiser + for reporting the problem. +* Fix the __hwloc_inline keyword in public headers when compiling with a + C++ compiler. +* Add Port info attribute to network OS devices inside OpenFabrics PCI + devices so as to identify which interface corresponds to which port. +* Document requirements for interoperability helpers: I/O devices discovery + is required for some of them; the topology must match the current host + for most of them. + + +Version 1.4.1 +------------- +* This release contains all changes from v1.3.2. +* Fix hwloc_alloc_membind, thanks Karl Napf for reporting the issue. +* Fix memory leaks in some get_membind() functions. +* Fix helpers converting from Linux libnuma to hwloc (hwloc/linux-libnuma.h) + in case of out-of-order NUMA node ids. +* Fix some overzealous assertions in the distance grouping code. +* Workaround BIOS reporting empty I/O locality in CUDA and OpenFabrics + helpers on Linux. Thanks to Albert Solernou for reporting the problem. +* Install a valgrind suppressions file hwloc-valgrind.supp (see the FAQ). +* Fix memory binding documentation. Thanks to Karl Napf for reporting the + issues. + + +Version 1.4.0 (does not contain all v1.3.2 changes) +------------- +* Major features + + Add "custom" interface and "assembler" tools to build multi-node + topology. See the Multi-node Topologies section in the documentation + for details. +* Interface improvements + + Add symmetric_subtree object attribute to ease assumptions when consulting + regular symmetric topologies. + + Add a CPUModel and CPUType info attribute to Socket objects on Linux + and Solaris. + + Add hwloc_get_obj_index_inside_cpuset() to retrieve the "logical" index + of an object within a subtree of the topology. + + Add more NVIDIA CUDA helpers in cuda.h and cudart.h to find hwloc objects + corresponding to CUDA devices. +* Discovery improvements + + Add a group object above partial distance matrices to make sure + the matrices are available in the final topology, except when this + new object would contradict the existing hierarchy. + + Grouping by distances now also works when loading from XML. + + Fix some corner cases in object insertion, for instance when dealing + with NUMA nodes without any CPU. +* Backends + + Implement hwloc_get_area_membind() on Linux. + + Honor I/O topology flags when importing from XML. + + Further improve XML-related error checking and reporting. + + Hide synthetic topology error messages unless HWLOC_SYNTHETIC_VERBOSE=1. +* Tools + + Add synthetic exporting of symmetric topologies to lstopo. + + lstopo --horiz and --vert can now be applied to some specific object types. + + lstopo -v -p now displays distance matrices with physical indexes. + + Add hwloc-distances utility to list distances. +* Documentation + + Fix and/or document the behavior of most inline functions in hwloc/helper.h + when the topology contains some I/O or Misc objects. + + Backend documentation enhancements. +* Bug fixes + + Fix missing last bit in hwloc_linux_get_thread_cpubind(). + Thanks to Carolina Gómez-Tostón Gutiérrez for reporting the issue. + + Fix FreeBSD build without cpuid support. + + Fix several Windows build issues. + + Fix inline keyword definition in public headers. + + Fix dependencies in the embedded library. + + Improve visibility support detection. Thanks to Dave Love for providing + the patch. + + Remove references to internal symbols in the tools. + + +Version 1.3.3 +------------- +* This release is only meant to fix the pciutils license issue when upgrading + to hwloc v1.4 or later is not possible. It contains several other minor + fixes but ignores many of them that are only in v1.4 or later. +* Use libpciaccess instead of pciutils/libpci by default for I/O discovery. + pciutils/libpci is only used if --enable-libpci is given to configure + because its GPL license may taint hwloc. See the Installation section + in the documentation for details. + + +Version 1.3.2 +------------- +* Fix missing last bit in hwloc_linux_get_thread_cpubind(). + Thanks to Carolina Gómez-Tostón Gutiérrez for reporting the issue. +* Fix build with -mcmodel=medium. Thanks to Devendar Bureddy for reporting + the issue. +* Fix build with Solaris Studio 12 compiler when XML is disabled. + Thanks to Paul H. Hargrove for reporting the problem. +* Fix installation with old GNU sed, for instance on Red Hat 8. + Thanks to Paul H. Hargrove for reporting the problem. +* Fix PCI locality when Linux cgroups restrict the available CPUs. +* Fix floating point issue when grouping by distance on mips64 architecture. + Thanks to Paul H. Hargrove for reporting the problem. +* Fix conversion from/to Linux libnuma when some NUMA nodes have no memory. +* Fix support for gccfss compilers with broken ffs() support. Thanks to + Paul H. Hargrove for reporting the problem and providing a patch. +* Fix FreeBSD build without cpuid support. +* Fix several Windows build issues. +* Fix inline keyword definition in public headers. +* Fix dependencies in the embedded library. +* Detect when a compiler such as xlc may not report compile errors + properly, causing some configure checks to be wrong. Thanks to + Paul H. Hargrove for reporting the problem and providing a patch. +* Improve visibility support detection. Thanks to Dave Love for providing + the patch. +* Remove references to internal symbols in the tools. +* Fix installation on systems with limited command-line size. + Thanks to Paul H. Hargrove for reporting the problem. +* Further improve XML-related error checking and reporting. + + +Version 1.3.1 +------------- +* Fix pciutils detection with pkg-config when not installed in standard + directories. +* Fix visibility options detection with the Solaris Studio compiler. + Thanks to Igor Galić and Terry Dontje for reporting the problems. +* Fix support for old Linux sched.h headers such as those found + on Red Hat 8. Thanks to Paul H. Hargrove for reporting the problems. +* Fix inline and attribute support for Solaris compilers. Thanks to + Dave Love for reporting the problems. +* Print a short summary at the end of the configure output. Thanks to + Stefan Eilemann for the suggestion. +* Add --disable-libnuma configure option to disable libnuma-based + memory binding support on Linux. Thanks to Rayson Ho for the + suggestion. +* Make hwloc's configure script properly obey $PKG_CONFIG. Thanks to + Nathan Phillip Brink for raising the issue. +* Silence some harmless pciutils warnings, thanks to Paul H. Hargrove + for reporting the problem. +* Fix the documentation with respect to hwloc_pid_t and hwloc_thread_t + being either pid_t and pthread_t on Unix, or HANDLE on Windows. + + +Version 1.3.0 +------------- +* Major features + + Add I/O devices and bridges to the topology using the pciutils + library. Only enabled after setting the relevant flag with + hwloc_topology_set_flags() before hwloc_topology_load(). See the + I/O Devices section in the documentation for details. +* Discovery improvements + + Add associativity to the cache attributes. + + Add support for s390/z11 "books" on Linux. + + Add the HWLOC_GROUPING_ACCURACY environment variable to relax + distance-based grouping constraints. See the Environment Variables + section in the documentation for details about grouping behavior + and configuration. + + Allow user-given distance matrices to remove or replace those + discovered by the OS backend. +* XML improvements + + XML is now always supported: a minimalistic custom import/export + code is used when libxml2 is not available. It is only guaranteed + to read XML files generated by hwloc. + + hwloc_topology_export_xml() and export_xmlbuffer() now return an + integer. + + Add hwloc_free_xmlbuffer() to free the buffer allocated by + hwloc_topology_export_xmlbuffer(). + + Hide XML topology error messages unless HWLOC_XML_VERBOSE=1. +* Minor API updates + + Add hwloc_obj_add_info to customize object info attributes. +* Tools + + lstopo now displays I/O devices by default. Several options are + added to configure the I/O discovery. + + hwloc-calc and hwloc-bind now accept I/O devices as input. + + Add --restrict option to hwloc-calc and hwloc-distribute. + + Add --sep option to change the output field separator in hwloc-calc. + + Add --whole-system option to hwloc-ps. + + +Version 1.2.2 +------------- +* Fix build on AIX 5.2, thanks Utpal Kumar Ray for the report. +* Fix XML import of very large page sizes or counts on 32bits platform, + thanks to Karsten Hopp for the RedHat ticket. +* Fix crash when administrator limitations such as Linux cgroup require + to restrict distance matrices. Thanks to Ake Sandgren for reporting the + problem. +* Fix the removal of objects such as AMD Magny-Cours dual-node sockets + in case of administrator restrictions. +* Improve error reporting and messages in case of wrong synthetic topology + description. +* Several other minor internal fixes and documentation improvements. + + +Version 1.2.1 +------------- +* Improve support of AMD Bulldozer "Compute-Unit" modules by detecting + logical processors with different core IDs on Linux. +* Fix hwloc-ps crash when listing processes from another Linux cpuset. + Thanks to Carl Smith for reporting the problem. +* Fix build on AIX and Solaris. Thanks to Carl Smith and Andreas Kupries + for reporting the problems. +* Fix cache size detection on Darwin. Thanks to Erkcan Özcan for reporting + the problem. +* Make configure fail if --enable-xml or --enable-cairo is given and + proper support cannot be found. Thanks to Andreas Kupries for reporting + the XML problem. +* Fix spurious L1 cache detection on AIX. Thanks to Hendryk Bockelmann + for reporting the problem. +* Fix hwloc_get_last_cpu_location(THREAD) on Linux. Thanks to Gabriele + Fatigati for reporting the problem. +* Fix object distance detection on Solaris. +* Add pthread_self weak symbol to ease static linking. +* Minor documentation fixes. + + +Version 1.2.0 +------------- +* Major features + + Expose latency matrices in the API as an array of distance structures + within objects. Add several helpers to find distances. + + Add hwloc_topology_set_distance_matrix() and environment variables + to provide a matrix of distances between a given set of objects. + + Add hwloc_get_last_cpu_location() and hwloc_get_proc_last_cpu_location() + to retrieve the processors where a process or thread recently ran. + - Add the corresponding --get-last-cpu-location option to hwloc-bind. + + Add hwloc_topology_restrict() to restrict an existing topology to a + given cpuset. + - Add the corresponding --restrict option to lstopo. +* Minor API updates + + Add hwloc_bitmap_list_sscanf/snprintf/asprintf to convert between bitmaps + and strings such as 4-5,7-9,12,15- + + hwloc_bitmap_set/clr_range() now support infinite ranges. + + Clarify the difference between inserting Misc objects by cpuset or by + parent. + + hwloc_insert_misc_object_by_cpuset() now returns NULL in case of error. +* Discovery improvements + + x86 backend (for freebsd): add x2APIC support + + Support standard device-tree phandle, to get better support on e.g. ARM + systems providing it. + + Detect cache size on AIX. Thanks Christopher and IBM. + + Improve grouping to support asymmetric topologies. +* Tools + + Command-line tools now support "all" and "root" special locations + consisting in the entire topology, as well as type names with depth + attributes such as L2 or Group4. + + hwloc-calc improvements: + - Add --number-of/-N option to report the number of objects of a given + type or depth. + - -I is now equivalent to --intersect for listing the indexes of + objects of a given type or depth that intersects the input. + - Add -H to report the output as a hierarchical combination of types + and depths. + + Add --thissystem to lstopo. + + Add lstopo-win, a console-less lstopo variant on Windows. +* Miscellaneous + + Remove C99 usage from code base. + + Rename hwloc-gather-topology.sh into hwloc-gather-topology + + Fix AMD cache discovery on freebsd when there is no L3 cache, thanks + Andriy Gapon for the fix. + + +Version 1.1.2 +------------- +* Fix a segfault in the distance-based grouping code when some objects + are not placed in any group. Thanks to Bernd Kallies for reporting + the problem and providing a patch. +* Fix the command-line parsing of hwloc-bind --mempolicy interleave. + Thanks to Guy Streeter for reporting the problem. +* Stop truncating the output in hwloc_obj_attr_snprintf() and in the + corresponding lstopo output. Thanks to Guy Streeter for reporting the + problem. +* Fix object levels ordering in synthetic topologies. +* Fix potential incoherency between device tree and kernel information, + when SMT is disabled on Power machines. +* Fix and document the behavior of hwloc_topology_set_synthetic() in case + of invalid argument. Thanks to Guy Streeter for reporting the problem. +* Add some verbose error message reporting when it looks like the OS + gives erroneous information. +* Do not include unistd.h and stdint.h in public headers on Windows. +* Move config.h files into their own subdirectories to avoid name + conflicts when AC_CONFIG_HEADERS adds -I's for them. +* Remove the use of declaring variables inside "for" loops. +* Some other minor fixes. +* Many minor documentation fixes. + + +Version 1.1.1 +------------- +* Add hwloc_get_api_version() which returns the version of hwloc used + at runtime. Thanks to Guy Streeter for the suggestion. +* Fix the number of hugepages reported for NUMA nodes on Linux. +* Fix hwloc_bitmap_to_ulong() right after allocating the bitmap. + Thanks to Bernd Kallies for reporting the problem. +* Fix hwloc_bitmap_from_ith_ulong() to properly zero the first ulong. + Thanks to Guy Streeter for reporting the problem. +* Fix hwloc_get_membind_nodeset() on Linux. + Thanks to Bernd Kallies for reporting the problem and providing a patch. +* Fix some file descriptor leaks in the Linux discovery. +* Fix the minimum width of NUMA nodes, caches and the legend in the graphical + lstopo output. Thanks to Jirka Hladky for reporting the problem. +* Various fixes to bitmap conversion from/to taskset-strings. +* Fix and document snprintf functions behavior when the buffer size is too + small or zero. Thanks to Guy Streeter for reporting the problem. +* Fix configure to avoid spurious enabling of the cpuid backend. + Thanks to Tim Anderson for reporting the problem. +* Cleanup error management in hwloc-gather-topology.sh. + Thanks to Jirka Hladky for reporting the problem and providing a patch. +* Add a manpage and usage for hwloc-gather-topology.sh on Linux. + Thanks to Jirka Hladky for providing a patch. +* Memory binding documentation enhancements. + + +Version 1.1.0 +------------- + +* API + + Increase HWLOC_API_VERSION to 0x00010100 so that API changes may be + detected at build-time. + + Add a memory binding interface. + + The cpuset API (hwloc/cpuset.h) is now deprecated. It is replaced by + the bitmap API (hwloc/bitmap.h) which offers the same features with more + generic names since it applies to CPU sets, node sets and more. + Backward compatibility with the cpuset API and ABI is still provided but + it will be removed in a future release. + Old types (hwloc_cpuset_t, ...) are still available as a way to clarify + what kind of hwloc_bitmap_t each API function manipulates. + Upgrading to the new API only requires to replace hwloc_cpuset_ function + calls with the corresponding hwloc_bitmap_ calls, with the following + renaming exceptions: + - hwloc_cpuset_cpu -> hwloc_bitmap_only + - hwloc_cpuset_all_but_cpu -> hwloc_bitmap_allbut + - hwloc_cpuset_from_string -> hwloc_bitmap_sscanf + + Add an `infos' array in each object to store couples of info names and + values. It enables generic storage of things like the old dmi board infos + that were previously stored in machine specific attributes. + + Add linesize cache attribute. +* Features + + Bitmaps (and thus CPU sets and node sets) are dynamically (re-)allocated, + the maximal number of CPUs (HWLOC_NBMAXCPUS) has been removed. + + Improve the distance-based grouping code to better support irregular + distance matrices. + + Add support for device-tree to get cache information (useful on Power + architectures). +* Helpers + + Add NVIDIA CUDA helpers in cuda.h and cudart.h to ease interoperability + with CUDA Runtime and Driver APIs. + + Add Myrinet Express helper in myriexpress.h to ease interoperability. +* Tools + + lstopo now displays physical/OS indexes by default in graphical mode + (use -l to switch back to logical indexes). The textual output still uses + logical by default (use -p to switch to physical indexes). + + lstopo prefixes logical indexes with `L#' and physical indexes with `P#'. + Physical indexes are also printed as `P#N' instead of `phys=N' within + object attributes (in parentheses). + + Add a legend at the bottom of the lstopo graphical output, use --no-legend + to remove it. + + Add hwloc-ps to list process' bindings. + + Add --membind and --mempolicy options to hwloc-bind. + + Improve tools command-line options by adding a generic --input option + (and more) which replaces the old --xml, --synthetic and --fsys-root. + + Cleanup lstopo output configuration by adding --output-format. + + Add --intersect in hwloc-calc, and replace --objects with --largest. + + Add the ability to work on standard input in hwloc-calc. + + Add --from, --to and --at in hwloc-distrib. + + Add taskset-specific functions and command-line tools options to + manipulate CPU set strings in the format of the taskset program. + + Install hwloc-gather-topology.sh on Linux. + + +Version 1.0.3 +------------- + +* Fix support for Linux cpuset when emulated by a cgroup mount point. +* Remove unneeded runtime dependency on libibverbs.so in the library and + all utils programs. +* Fix hwloc_cpuset_to_linux_libnuma_ulongs in case of non-linear OS-indexes + for NUMA nodes. +* lstopo now displays physical/OS indexes by default in graphical mode + (use -l to switch back to logical indexes). The textual output still uses + logical by default (use -p to switch to physical indexes). + + +Version 1.0.2 +------------- + +* Public headers can now be included directly from C++ programs. +* Solaris fix for non-contiguous cpu numbers. Thanks to Rolf vandeVaart for + reporting the issue. +* Darwin 10.4 fix. Thanks to Olivier Cessenat for reporting the issue. +* Revert 1.0.1 patch that ignored sockets with unknown ID values since it + only slightly helped POWER7 machines with old Linux kernels while it + prevents recent kernels from getting the complete POWER7 topology. +* Fix hwloc_get_common_ancestor_obj(). +* Remove arch-specific bits in public headers. +* Some fixes in the lstopo graphical output. +* Various man page clarifications and minor updates. + + +Version 1.0.1 +------------- + +* Various Solaris fixes. Thanks to Yannick Martin for reporting the issue. +* Fix "non-native" builds on x86 platforms (e.g., when building 32 + bit executables with compilers that natively build 64 bit). +* Ignore sockets with unknown ID values (which fixes issues on POWER7 + machines). Thanks to Greg Bauer for reporting the issue. +* Various man page clarifications and minor updates. +* Fixed memory leaks in hwloc_setup_group_from_min_distance_clique(). +* Fix cache type filtering on MS Windows 7. Thanks to Αλέξανδρος + Παπαδογιαννάκ for reporting the issue. +* Fixed warnings when compiling with -DNDEBUG. + + +Version 1.0.0 +------------- + +* The ABI of the library has changed. +* Backend updates + + Add FreeBSD support. + + Add x86 cpuid based backend. + + Add Linux cgroup support to the Linux cpuset code. + + Support binding of entire multithreaded process on Linux. + + Fix and enable Group support in Windows. + + Cleanup XML export/import. +* Objects + + HWLOC_OBJ_PROC is renamed into HWLOC_OBJ_PU for "Processing Unit", + its stringified type name is now "PU". + + Use new HWLOC_OBJ_GROUP objects instead of MISC when grouping + objects according to NUMA distances or arbitrary OS aggregation. + + Rework memory attributes. + + Add different cpusets in each object to specify processors that + are offline, unavailable, ... + + Cleanup the storage of object names and DMI infos. +* Features + + Add support for looking up specific PID topology information. + + Add hwloc_topology_export_xml() to export the topology in a XML file. + + Add hwloc_topology_get_support() to retrieve the supported features + for the current topology context. + + Support non-SYSTEM object as the root of the tree, use MACHINE in + most common cases. + + Add hwloc_get_*cpubind() routines to retrieve the current binding + of processes and threads. +* API + + Add HWLOC_API_VERSION to help detect the currently used API version. + + Add missing ending "e" to *compare* functions. + + Add several routines to emulate PLPA functions. + + Rename and rework the cpuset and/or/xor/not/clear operators to output + their result in a dedicated argument instead of modifying one input. + + Deprecate hwloc_obj_snprintf() in favor of hwloc_obj_type/attr_snprintf(). + + Clarify the use of parent and ancestor in the API, do not use father. + + Replace hwloc_get_system_obj() with hwloc_get_root_obj(). + + Return -1 instead of HWLOC_OBJ_TYPE_MAX in the API since the latter + isn't public. + + Relax constraints in hwloc_obj_type_of_string(). + + Improve displaying of memory sizes. + + Add 0x prefix to cpuset strings. +* Tools + + lstopo now displays logical indexes by default, use --physical to + revert back to OS/physical indexes. + + Add colors in the lstopo graphical outputs to distinguish between online, + offline, reserved, ... objects. + + Extend lstopo to show cpusets, filter objects by type, ... + + Renamed hwloc-mask into hwloc-calc which supports many new options. +* Documentation + + Add a hwloc(7) manpage containing general information. + + Add documentation about how to switch from PLPA to hwloc. + + Cleanup the distributed documentation files. +* Miscellaneous + + Many compilers warning fixes. + + Cleanup the ABI by using the visibility attribute. + + Add project embedding support. + + +Version 0.9.4 (unreleased) +-------------------------- + +* Fix reseting colors to normal in lstopo -.txt output. +* Fix Linux pthread_t binding error report. + + +Version 0.9.3 +------------- + +* Fix autogen.sh to work with Autoconf 2.63. +* Fix various crashes in particular conditions: + - xml files with root attributes + - offline CPUs + - partial sysfs support + - unparseable /proc/cpuinfo + - ignoring NUMA level while Misc level have been generated +* Tweak documentation a bit +* Do not require the pthread library for binding the current thread on Linux +* Do not erroneously consider the sched_setaffinity prototype is the old version + when there is actually none. +* Fix _syscall3 compilation on archs for which we do not have the + sched_setaffinity system call number. +* Fix AIX binding. +* Fix libraries dependencies: now only lstopo depends on libtermcap, fix + binutils-gold link +* Have make check always build and run hwloc-hello.c +* Do not limit size of a cpuset. + + +Version 0.9.2 +------------- + +* Trivial documentation changes. + + +Version 0.9.1 +------------- + +* Re-branded to "hwloc" and moved to the Open MPI project, relicensed under the + BSD license. +* The prefix of all functions and tools is now hwloc, and some public + functions were also renamed for real. +* Group NUMA nodes into Misc objects according to their physical distance + that may be reported by the OS/BIOS. + May be ignored by setting HWLOC_IGNORE_DISTANCES=1 in the environment. +* Ignore offline CPUs on Solaris. +* Improved binding support on AIX. +* Add HP-UX support. +* CPU sets are now allocated/freed dynamically. +* Add command line options to tune the lstopo graphical output, add + semi-graphical textual output +* Extend topobind to support multiple cpusets or objects on the command + line as topomask does. +* Add an Infiniband-specific helper hwloc/openfabrics-verbs.h to retrieve + the physical location of IB devices. + + +Version 0.9 (libtopology) +------------------------- + +* First release. diff --git a/src/3rdparty/hwloc/README b/src/3rdparty/hwloc/README new file mode 100644 index 00000000..5567b4d1 --- /dev/null +++ b/src/3rdparty/hwloc/README @@ -0,0 +1,85 @@ +Introduction + +The Hardware Locality (hwloc) software project aims at easing the process of +discovering hardware resources in parallel architectures. It offers +command-line tools and a C API for consulting these resources, their locality, +attributes, and interconnection. hwloc primarily aims at helping +high-performance computing (HPC) applications, but is also applicable to any +project seeking to exploit code and/or data locality on modern computing +platforms. + +hwloc is actually made of two subprojects distributed together: + + * The original hwloc project for describing the internals of computing nodes. + It is described in details starting at section Hardware Locality (hwloc) + Introduction. + * The network-oriented companion called netloc (Network Locality), described + in details starting with section Network Locality (netloc). + +See also the Related pages tab above for links to other sections. + +Netloc may be disabled, but the original hwloc cannot. Both hwloc and netloc +APIs are documented after these sections. + +Installation + +hwloc (http://www.open-mpi.org/projects/hwloc/) is available under the BSD +license. It is hosted as a sub-project of the overall Open MPI project (http:// +www.open-mpi.org/). Note that hwloc does not require any functionality from +Open MPI -- it is a wholly separate (and much smaller!) project and code base. +It just happens to be hosted as part of the overall Open MPI project. + +Basic Installation + +Installation is the fairly common GNU-based process: + +shell$ ./configure --prefix=... +shell$ make +shell$ make install + +hwloc- and netloc-specific configure options and requirements are documented in +sections hwloc Installation and Netloc Installation respectively. + +Also note that if you install supplemental libraries in non-standard locations, +hwloc's configure script may not be able to find them without some help. You +may need to specify additional CPPFLAGS, LDFLAGS, or PKG_CONFIG_PATH values on +the configure command line. + +For example, if libpciaccess was installed into /opt/pciaccess, hwloc's +configure script may not find it be default. Try adding PKG_CONFIG_PATH to the +./configure command line, like this: + +./configure PKG_CONFIG_PATH=/opt/pciaccess/lib/pkgconfig ... + +Running the "lstopo" tool is a good way to check as a graphical output whether +hwloc properly detected the architecture of your node. Netloc command-line +tools can be used to display the network topology interconnecting your nodes. + +Installing from a Git clone + +Additionally, the code can be directly cloned from Git: + +shell$ git clone https://github.com/open-mpi/hwloc.git +shell$ cd hwloc +shell$ ./autogen.sh + +Note that GNU Autoconf >=2.63, Automake >=1.11 and Libtool >=2.2.6 are required +when building from a Git clone. + +Nightly development snapshots are available on the web site, they can be +configured and built without any need for Git or GNU Autotools. + +Questions and Bugs + +Bugs should be reported in the tracker (https://github.com/open-mpi/hwloc/ +issues). Opening a new issue automatically displays lots of hints about how to +debug and report issues. + +Questions may be sent to the users or developers mailing lists (http:// +www.open-mpi.org/community/lists/hwloc.php). + +There is also a #hwloc IRC channel on Freenode (irc.freenode.net). + + + +See https://www.open-mpi.org/projects/hwloc/doc/ for more hwloc documentation. diff --git a/src/3rdparty/hwloc/VERSION b/src/3rdparty/hwloc/VERSION new file mode 100644 index 00000000..5ebc6bb4 --- /dev/null +++ b/src/3rdparty/hwloc/VERSION @@ -0,0 +1,47 @@ +# This is the VERSION file for hwloc, describing the precise version +# of hwloc in this distribution. The various components of the version +# number below are combined to form a single version number string. + +# major, minor, and release are generally combined in the form +# ... If release is zero, then it is omitted. + +# Please update HWLOC_VERSION* in contrib/windows/hwloc_config.h too. + +major=2 +minor=0 +release=4 + +# greek is used for alpha or beta release tags. If it is non-empty, +# it will be appended to the version number. It does not have to be +# numeric. Common examples include a1 (alpha release 1), b1 (beta +# release 1), sc2005 (Super Computing 2005 release). The only +# requirement is that it must be entirely printable ASCII characters +# and have no white space. + +greek= + +# The date when this release was created + +date="Jun 03, 2019" + +# If snapshot=1, then use the value from snapshot_version as the +# entire hwloc version (i.e., ignore major, minor, release, and +# greek). This is only set to 1 when making snapshot tarballs. +snapshot=0 +snapshot_version=${major}.${minor}.${release}${greek}-git + +# The shared library version of hwloc's public library. This version +# is maintained in accordance with the "Library Interface Versions" +# chapter from the GNU Libtool documentation. Notes: + +# 1. Since version numbers are associated with *releases*, the version +# number maintained on the hwloc git master (and developer branches) +# is always 0:0:0. + +# 2. Version numbers are described in the Libtool current:revision:age +# format. + +libhwloc_so_version=15:3:0 +libnetloc_so_version=0:0:0 + +# Please also update the lines in contrib/windows/libhwloc.vcxproj diff --git a/src/3rdparty/hwloc/include/hwloc.h b/src/3rdparty/hwloc/include/hwloc.h new file mode 100644 index 00000000..ee6da6fd --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc.h @@ -0,0 +1,2270 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2019 Inria. All rights reserved. + * Copyright © 2009-2012 Université Bordeaux + * Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +/*===================================================================== + * PLEASE GO READ THE DOCUMENTATION! + * ------------------------------------------------ + * $tarball_directory/doc/doxygen-doc/ + * or + * http://www.open-mpi.org/projects/hwloc/doc/ + *===================================================================== + * + * FAIR WARNING: Do NOT expect to be able to figure out all the + * subtleties of hwloc by simply reading function prototypes and + * constant descrptions here in this file. + * + * Hwloc has wonderful documentation in both PDF and HTML formats for + * your reading pleasure. The formal documentation explains a LOT of + * hwloc-specific concepts, provides definitions, and discusses the + * "big picture" for many of the things that you'll find here in this + * header file. + * + * The PDF/HTML documentation was generated via Doxygen; much of what + * you'll see in there is also here in this file. BUT THERE IS A LOT + * THAT IS IN THE PDF/HTML THAT IS ***NOT*** IN hwloc.h! + * + * There are entire paragraph-length descriptions, discussions, and + * pretty prictures to explain subtle corner cases, provide concrete + * examples, etc. + * + * Please, go read the documentation. :-) + * + * Moreover there are several examples of hwloc use under doc/examples + * in the source tree. + * + *=====================================================================*/ + +/** \file + * \brief The hwloc API. + * + * See hwloc/bitmap.h for bitmap specific macros. + * See hwloc/helper.h for high-level topology traversal helpers. + * See hwloc/inlines.h for the actual inline code of some functions below. + * See hwloc/export.h for exporting topologies to XML or to synthetic descriptions. + * See hwloc/distances.h for querying and modifying distances between objects. + * See hwloc/diff.h for manipulating differences between similar topologies. + */ + +#ifndef HWLOC_H +#define HWLOC_H + +#include +#include +#include +#include +#include + +/* + * Symbol transforms + */ +#include + +/* + * Bitmap definitions + */ + +#include + + +#ifdef __cplusplus +extern "C" { +#endif + + +/** \defgroup hwlocality_api_version API version + * @{ + */ + +/** \brief Indicate at build time which hwloc API version is being used. + * + * This number is updated to (X<<16)+(Y<<8)+Z when a new release X.Y.Z + * actually modifies the API. + * + * Users may check for available features at build time using this number + * (see \ref faq_upgrade). + * + * \note This should not be confused with HWLOC_VERSION, the library version. + * Two stable releases of the same series usually have the same ::HWLOC_API_VERSION + * even if their HWLOC_VERSION are different. + */ +#define HWLOC_API_VERSION 0x00020000 + +/** \brief Indicate at runtime which hwloc API version was used at build time. + * + * Should be ::HWLOC_API_VERSION if running on the same version. + */ +HWLOC_DECLSPEC unsigned hwloc_get_api_version(void); + +/** \brief Current component and plugin ABI version (see hwloc/plugins.h) */ +#define HWLOC_COMPONENT_ABI 5 + +/** @} */ + + + +/** \defgroup hwlocality_object_sets Object Sets (hwloc_cpuset_t and hwloc_nodeset_t) + * + * Hwloc uses bitmaps to represent two distinct kinds of object sets: + * CPU sets (::hwloc_cpuset_t) and NUMA node sets (::hwloc_nodeset_t). + * These types are both typedefs to a common back end type + * (::hwloc_bitmap_t), and therefore all the hwloc bitmap functions + * are applicable to both ::hwloc_cpuset_t and ::hwloc_nodeset_t (see + * \ref hwlocality_bitmap). + * + * The rationale for having two different types is that even though + * the actions one wants to perform on these types are the same (e.g., + * enable and disable individual items in the set/mask), they're used + * in very different contexts: one for specifying which processors to + * use and one for specifying which NUMA nodes to use. Hence, the + * name difference is really just to reflect the intent of where the + * type is used. + * + * @{ + */ + +/** \brief A CPU set is a bitmap whose bits are set according to CPU + * physical OS indexes. + * + * It may be consulted and modified with the bitmap API as any + * ::hwloc_bitmap_t (see hwloc/bitmap.h). + * + * Each bit may be converted into a PU object using + * hwloc_get_pu_obj_by_os_index(). + */ +typedef hwloc_bitmap_t hwloc_cpuset_t; +/** \brief A non-modifiable ::hwloc_cpuset_t. */ +typedef hwloc_const_bitmap_t hwloc_const_cpuset_t; + +/** \brief A node set is a bitmap whose bits are set according to NUMA + * memory node physical OS indexes. + * + * It may be consulted and modified with the bitmap API as any + * ::hwloc_bitmap_t (see hwloc/bitmap.h). + * Each bit may be converted into a NUMA node object using + * hwloc_get_numanode_obj_by_os_index(). + * + * When binding memory on a system without any NUMA node, + * the single main memory bank is considered as NUMA node #0. + * + * See also \ref hwlocality_helper_nodeset_convert. + */ +typedef hwloc_bitmap_t hwloc_nodeset_t; +/** \brief A non-modifiable ::hwloc_nodeset_t. + */ +typedef hwloc_const_bitmap_t hwloc_const_nodeset_t; + +/** @} */ + + + +/** \defgroup hwlocality_object_types Object Types + * @{ + */ + +/** \brief Type of topology object. + * + * \note Do not rely on the ordering or completeness of the values as new ones + * may be defined in the future! If you need to compare types, use + * hwloc_compare_types() instead. + */ +#define HWLOC_OBJ_TYPE_MIN HWLOC_OBJ_MACHINE /**< \private Sentinel value */ +typedef enum { + HWLOC_OBJ_MACHINE, /**< \brief Machine. + * A set of processors and memory with cache + * coherency. + * + * This type is always used for the root object of a topology, + * and never used anywhere else. + * Hence its parent is always \c NULL. + */ + + HWLOC_OBJ_PACKAGE, /**< \brief Physical package. + * The physical package that usually gets inserted + * into a socket on the motherboard. + * A processor package usually contains multiple cores. + */ + HWLOC_OBJ_CORE, /**< \brief Core. + * A computation unit (may be shared by several + * logical processors). + */ + HWLOC_OBJ_PU, /**< \brief Processing Unit, or (Logical) Processor. + * An execution unit (may share a core with some + * other logical processors, e.g. in the case of + * an SMT core). + * + * This is the smallest object representing CPU resources, + * it cannot have any child except Misc objects. + * + * Objects of this kind are always reported and can + * thus be used as fallback when others are not. + */ + + HWLOC_OBJ_L1CACHE, /**< \brief Level 1 Data (or Unified) Cache. */ + HWLOC_OBJ_L2CACHE, /**< \brief Level 2 Data (or Unified) Cache. */ + HWLOC_OBJ_L3CACHE, /**< \brief Level 3 Data (or Unified) Cache. */ + HWLOC_OBJ_L4CACHE, /**< \brief Level 4 Data (or Unified) Cache. */ + HWLOC_OBJ_L5CACHE, /**< \brief Level 5 Data (or Unified) Cache. */ + + HWLOC_OBJ_L1ICACHE, /**< \brief Level 1 instruction Cache (filtered out by default). */ + HWLOC_OBJ_L2ICACHE, /**< \brief Level 2 instruction Cache (filtered out by default). */ + HWLOC_OBJ_L3ICACHE, /**< \brief Level 3 instruction Cache (filtered out by default). */ + + HWLOC_OBJ_GROUP, /**< \brief Group objects. + * Objects which do not fit in the above but are + * detected by hwloc and are useful to take into + * account for affinity. For instance, some operating systems + * expose their arbitrary processors aggregation this + * way. And hwloc may insert such objects to group + * NUMA nodes according to their distances. + * See also \ref faq_groups. + * + * These objects are removed when they do not bring + * any structure (see ::HWLOC_TYPE_FILTER_KEEP_STRUCTURE). + */ + + HWLOC_OBJ_NUMANODE, /**< \brief NUMA node. + * An object that contains memory that is directly + * and byte-accessible to the host processors. + * It is usually close to some cores (the corresponding objects + * are descendants of the NUMA node object in the hwloc tree). + * + * There is always at least one such object in the topology + * even if the machine is not NUMA. + * + * Memory objects are not listed in the main children list, + * but rather in the dedicated Memory children list. + * + * NUMA nodes have a special depth ::HWLOC_TYPE_DEPTH_NUMANODE + * instead of a normal depth just like other objects in the + * main tree. + */ + + HWLOC_OBJ_BRIDGE, /**< \brief Bridge (filtered out by default). + * Any bridge that connects the host or an I/O bus, + * to another I/O bus. + * They are not added to the topology unless I/O discovery + * is enabled with hwloc_topology_set_flags(). + * I/O objects are not listed in the main children list, + * but rather in the dedicated io children list. + * I/O objects have NULL CPU and node sets. + */ + HWLOC_OBJ_PCI_DEVICE, /**< \brief PCI device (filtered out by default). + * They are not added to the topology unless I/O discovery + * is enabled with hwloc_topology_set_flags(). + * I/O objects are not listed in the main children list, + * but rather in the dedicated io children list. + * I/O objects have NULL CPU and node sets. + */ + HWLOC_OBJ_OS_DEVICE, /**< \brief Operating system device (filtered out by default). + * They are not added to the topology unless I/O discovery + * is enabled with hwloc_topology_set_flags(). + * I/O objects are not listed in the main children list, + * but rather in the dedicated io children list. + * I/O objects have NULL CPU and node sets. + */ + + HWLOC_OBJ_MISC, /**< \brief Miscellaneous objects (filtered out by default). + * Objects without particular meaning, that can e.g. be + * added by the application for its own use, or by hwloc + * for miscellaneous objects such as MemoryModule (DIMMs). + * These objects are not listed in the main children list, + * but rather in the dedicated misc children list. + * Misc objects may only have Misc objects as children, + * and those are in the dedicated misc children list as well. + * Misc objects have NULL CPU and node sets. + */ + + HWLOC_OBJ_TYPE_MAX /**< \private Sentinel value */ +} hwloc_obj_type_t; + +/** \brief Cache type. */ +typedef enum hwloc_obj_cache_type_e { + HWLOC_OBJ_CACHE_UNIFIED, /**< \brief Unified cache. */ + HWLOC_OBJ_CACHE_DATA, /**< \brief Data cache. */ + HWLOC_OBJ_CACHE_INSTRUCTION /**< \brief Instruction cache (filtered out by default). */ +} hwloc_obj_cache_type_t; + +/** \brief Type of one side (upstream or downstream) of an I/O bridge. */ +typedef enum hwloc_obj_bridge_type_e { + HWLOC_OBJ_BRIDGE_HOST, /**< \brief Host-side of a bridge, only possible upstream. */ + HWLOC_OBJ_BRIDGE_PCI /**< \brief PCI-side of a bridge. */ +} hwloc_obj_bridge_type_t; + +/** \brief Type of a OS device. */ +typedef enum hwloc_obj_osdev_type_e { + HWLOC_OBJ_OSDEV_BLOCK, /**< \brief Operating system block device. + * For instance "sda" on Linux. */ + HWLOC_OBJ_OSDEV_GPU, /**< \brief Operating system GPU device. + * For instance ":0.0" for a GL display, + * "card0" for a Linux DRM device. */ + HWLOC_OBJ_OSDEV_NETWORK, /**< \brief Operating system network device. + * For instance the "eth0" interface on Linux. */ + HWLOC_OBJ_OSDEV_OPENFABRICS, /**< \brief Operating system openfabrics device. + * For instance the "mlx4_0" InfiniBand HCA, + * or "hfi1_0" Omni-Path interface on Linux. */ + HWLOC_OBJ_OSDEV_DMA, /**< \brief Operating system dma engine device. + * For instance the "dma0chan0" DMA channel on Linux. */ + HWLOC_OBJ_OSDEV_COPROC /**< \brief Operating system co-processor device. + * For instance "mic0" for a Xeon Phi (MIC) on Linux, + * "opencl0d0" for a OpenCL device, + * "cuda0" for a CUDA device. */ +} hwloc_obj_osdev_type_t; + +/** \brief Compare the depth of two object types + * + * Types shouldn't be compared as they are, since newer ones may be added in + * the future. This function returns less than, equal to, or greater than zero + * respectively if \p type1 objects usually include \p type2 objects, are the + * same as \p type2 objects, or are included in \p type2 objects. If the types + * can not be compared (because neither is usually contained in the other), + * ::HWLOC_TYPE_UNORDERED is returned. Object types containing CPUs can always + * be compared (usually, a system contains machines which contain nodes which + * contain packages which contain caches, which contain cores, which contain + * processors). + * + * \note ::HWLOC_OBJ_PU will always be the deepest, + * while ::HWLOC_OBJ_MACHINE is always the highest. + * + * \note This does not mean that the actual topology will respect that order: + * e.g. as of today cores may also contain caches, and packages may also contain + * nodes. This is thus just to be seen as a fallback comparison method. + */ +HWLOC_DECLSPEC int hwloc_compare_types (hwloc_obj_type_t type1, hwloc_obj_type_t type2) __hwloc_attribute_const; + +enum hwloc_compare_types_e { + HWLOC_TYPE_UNORDERED = INT_MAX /**< \brief Value returned by hwloc_compare_types() when types can not be compared. \hideinitializer */ +}; + +/** @} */ + + + +/** \defgroup hwlocality_objects Object Structure and Attributes + * @{ + */ + +union hwloc_obj_attr_u; + +/** \brief Structure of a topology object + * + * Applications must not modify any field except \p hwloc_obj.userdata. + */ +struct hwloc_obj { + /* physical information */ + hwloc_obj_type_t type; /**< \brief Type of object */ + char *subtype; /**< \brief Subtype string to better describe the type field. */ + + unsigned os_index; /**< \brief OS-provided physical index number. + * It is not guaranteed unique across the entire machine, + * except for PUs and NUMA nodes. + * Set to HWLOC_UNKNOWN_INDEX if unknown or irrelevant for this object. + */ +#define HWLOC_UNKNOWN_INDEX (unsigned)-1 + + char *name; /**< \brief Object-specific name if any. + * Mostly used for identifying OS devices and Misc objects where + * a name string is more useful than numerical indexes. + */ + + hwloc_uint64_t total_memory; /**< \brief Total memory (in bytes) in NUMA nodes below this object. */ + + union hwloc_obj_attr_u *attr; /**< \brief Object type-specific Attributes, + * may be \c NULL if no attribute value was found */ + + /* global position */ + int depth; /**< \brief Vertical index in the hierarchy. + * + * For normal objects, this is the depth of the horizontal level + * that contains this object and its cousins of the same type. + * If the topology is symmetric, this is equal to the parent depth + * plus one, and also equal to the number of parent/child links + * from the root object to here. + * + * For special objects (NUMA nodes, I/O and Misc) that are not + * in the main tree, this is a special negative value that + * corresponds to their dedicated level, + * see hwloc_get_type_depth() and ::hwloc_get_type_depth_e. + * Those special values can be passed to hwloc functions such + * hwloc_get_nbobjs_by_depth() as usual. + */ + unsigned logical_index; /**< \brief Horizontal index in the whole list of similar objects, + * hence guaranteed unique across the entire machine. + * Could be a "cousin_rank" since it's the rank within the "cousin" list below + * Note that this index may change when restricting the topology + * or when inserting a group. + */ + + /* cousins are all objects of the same type (and depth) across the entire topology */ + struct hwloc_obj *next_cousin; /**< \brief Next object of same type and depth */ + struct hwloc_obj *prev_cousin; /**< \brief Previous object of same type and depth */ + + /* children of the same parent are siblings, even if they may have different type and depth */ + struct hwloc_obj *parent; /**< \brief Parent, \c NULL if root (Machine object) */ + unsigned sibling_rank; /**< \brief Index in parent's \c children[] array. Or the index in parent's Memory, I/O or Misc children list. */ + struct hwloc_obj *next_sibling; /**< \brief Next object below the same parent (inside the same list of children). */ + struct hwloc_obj *prev_sibling; /**< \brief Previous object below the same parent (inside the same list of children). */ + /** @name List and array of normal children below this object (except Memory, I/O and Misc children). */ + /**@{*/ + unsigned arity; /**< \brief Number of normal children. + * Memory, Misc and I/O children are not listed here + * but rather in their dedicated children list. + */ + struct hwloc_obj **children; /**< \brief Normal children, \c children[0 .. arity -1] */ + struct hwloc_obj *first_child; /**< \brief First normal child */ + struct hwloc_obj *last_child; /**< \brief Last normal child */ + /**@}*/ + + int symmetric_subtree; /**< \brief Set if the subtree of normal objects below this object is symmetric, + * which means all normal children and their children have identical subtrees. + * + * Memory, I/O and Misc children are ignored. + * + * If set in the topology root object, lstopo may export the topology + * as a synthetic string. + */ + + /** @name List of Memory children below this object. */ + /**@{*/ + unsigned memory_arity; /**< \brief Number of Memory children. + * These children are listed in \p memory_first_child. + */ + struct hwloc_obj *memory_first_child; /**< \brief First Memory child. + * NUMA nodes are listed here (\p memory_arity and \p memory_first_child) + * instead of in the normal children list. + * See also hwloc_obj_type_is_memory(). + */ + /**@}*/ + + /** @name List of I/O children below this object. */ + /**@{*/ + unsigned io_arity; /**< \brief Number of I/O children. + * These children are listed in \p io_first_child. + */ + struct hwloc_obj *io_first_child; /**< \brief First I/O child. + * Bridges, PCI and OS devices are listed here (\p io_arity and \p io_first_child) + * instead of in the normal children list. + * See also hwloc_obj_type_is_io(). + */ + /**@}*/ + + /** @name List of Misc children below this object. */ + /**@{*/ + unsigned misc_arity; /**< \brief Number of Misc children. + * These children are listed in \p misc_first_child. + */ + struct hwloc_obj *misc_first_child; /**< \brief First Misc child. + * Misc objects are listed here (\p misc_arity and \p misc_first_child) + * instead of in the normal children list. + */ + /**@}*/ + + /* cpusets and nodesets */ + hwloc_cpuset_t cpuset; /**< \brief CPUs covered by this object + * + * This is the set of CPUs for which there are PU objects in the topology + * under this object, i.e. which are known to be physically contained in this + * object and known how (the children path between this object and the PU + * objects). + * + * If the ::HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM configuration flag is set, + * some of these CPUs may not be allowed for binding, + * see hwloc_topology_get_allowed_cpuset(). + * + * \note All objects have non-NULL CPU and node sets except Misc and I/O objects. + * + * \note Its value must not be changed, hwloc_bitmap_dup() must be used instead. + */ + hwloc_cpuset_t complete_cpuset; /**< \brief The complete CPU set of logical processors of this object, + * + * This may include not only the same as the cpuset field, but also some CPUs for + * which topology information is unknown or incomplete, some offlines CPUs, and + * the CPUs that are ignored when the ::HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM flag + * is not set. + * Thus no corresponding PU object may be found in the topology, because the + * precise position is undefined. It is however known that it would be somewhere + * under this object. + * + * \note Its value must not be changed, hwloc_bitmap_dup() must be used instead. + */ + + hwloc_nodeset_t nodeset; /**< \brief NUMA nodes covered by this object or containing this object + * + * This is the set of NUMA nodes for which there are NUMA node objects in the + * topology under or above this object, i.e. which are known to be physically + * contained in this object or containing it and known how (the children path + * between this object and the NUMA node objects). + * + * In the end, these nodes are those that are close to the current object. + * + * If the ::HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM configuration flag is set, + * some of these nodes may not be allowed for allocation, + * see hwloc_topology_get_allowed_nodeset(). + * + * If there are no NUMA nodes in the machine, all the memory is close to this + * object, so only the first bit may be set in \p nodeset. + * + * \note All objects have non-NULL CPU and node sets except Misc and I/O objects. + * + * \note Its value must not be changed, hwloc_bitmap_dup() must be used instead. + */ + hwloc_nodeset_t complete_nodeset; /**< \brief The complete NUMA node set of this object, + * + * This may include not only the same as the nodeset field, but also some NUMA + * nodes for which topology information is unknown or incomplete, some offlines + * nodes, and the nodes that are ignored when the ::HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM + * flag is not set. + * Thus no corresponding NUMA node object may be found in the topology, because the + * precise position is undefined. It is however known that it would be + * somewhere under this object. + * + * If there are no NUMA nodes in the machine, all the memory is close to this + * object, so only the first bit is set in \p complete_nodeset. + * + * \note Its value must not be changed, hwloc_bitmap_dup() must be used instead. + */ + + struct hwloc_info_s *infos; /**< \brief Array of stringified info type=name. */ + unsigned infos_count; /**< \brief Size of infos array. */ + + /* misc */ + void *userdata; /**< \brief Application-given private data pointer, + * initialized to \c NULL, use it as you wish. + * See hwloc_topology_set_userdata_export_callback() in hwloc/export.h + * if you wish to export this field to XML. */ + + hwloc_uint64_t gp_index; /**< \brief Global persistent index. + * Generated by hwloc, unique across the topology (contrary to os_index) + * and persistent across topology changes (contrary to logical_index). + * Mostly used internally, but could also be used by application to identify objects. + */ +}; +/** + * \brief Convenience typedef; a pointer to a struct hwloc_obj. + */ +typedef struct hwloc_obj * hwloc_obj_t; + +/** \brief Object type-specific Attributes */ +union hwloc_obj_attr_u { + /** \brief NUMA node-specific Object Attributes */ + struct hwloc_numanode_attr_s { + hwloc_uint64_t local_memory; /**< \brief Local memory (in bytes) */ + unsigned page_types_len; /**< \brief Size of array \p page_types */ + /** \brief Array of local memory page types, \c NULL if no local memory and \p page_types is 0. + * + * The array is sorted by increasing \p size fields. + * It contains \p page_types_len slots. + */ + struct hwloc_memory_page_type_s { + hwloc_uint64_t size; /**< \brief Size of pages */ + hwloc_uint64_t count; /**< \brief Number of pages of this size */ + } * page_types; + } numanode; + + /** \brief Cache-specific Object Attributes */ + struct hwloc_cache_attr_s { + hwloc_uint64_t size; /**< \brief Size of cache in bytes */ + unsigned depth; /**< \brief Depth of cache (e.g., L1, L2, ...etc.) */ + unsigned linesize; /**< \brief Cache-line size in bytes. 0 if unknown */ + int associativity; /**< \brief Ways of associativity, + * -1 if fully associative, 0 if unknown */ + hwloc_obj_cache_type_t type; /**< \brief Cache type */ + } cache; + /** \brief Group-specific Object Attributes */ + struct hwloc_group_attr_s { + unsigned depth; /**< \brief Depth of group object. + * It may change if intermediate Group objects are added. */ + unsigned kind; /**< \brief Internally-used kind of group. */ + unsigned subkind; /**< \brief Internally-used subkind to distinguish different levels of groups with same kind */ + unsigned char dont_merge; /**< \brief Flag preventing groups from being automatically merged with identical parent or children. */ + } group; + /** \brief PCI Device specific Object Attributes */ + struct hwloc_pcidev_attr_s { + unsigned short domain; + unsigned char bus, dev, func; + unsigned short class_id; + unsigned short vendor_id, device_id, subvendor_id, subdevice_id; + unsigned char revision; + float linkspeed; /* in GB/s */ + } pcidev; + /** \brief Bridge specific Object Attribues */ + struct hwloc_bridge_attr_s { + union { + struct hwloc_pcidev_attr_s pci; + } upstream; + hwloc_obj_bridge_type_t upstream_type; + union { + struct { + unsigned short domain; + unsigned char secondary_bus, subordinate_bus; + } pci; + } downstream; + hwloc_obj_bridge_type_t downstream_type; + unsigned depth; + } bridge; + /** \brief OS Device specific Object Attributes */ + struct hwloc_osdev_attr_s { + hwloc_obj_osdev_type_t type; + } osdev; +}; + +/** \brief Object info + * + * \sa hwlocality_info_attr + */ +struct hwloc_info_s { + char *name; /**< \brief Info name */ + char *value; /**< \brief Info value */ +}; + +/** @} */ + + + +/** \defgroup hwlocality_creation Topology Creation and Destruction + * @{ + */ + +struct hwloc_topology; +/** \brief Topology context + * + * To be initialized with hwloc_topology_init() and built with hwloc_topology_load(). + */ +typedef struct hwloc_topology * hwloc_topology_t; + +/** \brief Allocate a topology context. + * + * \param[out] topologyp is assigned a pointer to the new allocated context. + * + * \return 0 on success, -1 on error. + */ +HWLOC_DECLSPEC int hwloc_topology_init (hwloc_topology_t *topologyp); + +/** \brief Build the actual topology + * + * Build the actual topology once initialized with hwloc_topology_init() and + * tuned with \ref hwlocality_configuration and \ref hwlocality_setsource routines. + * No other routine may be called earlier using this topology context. + * + * \param topology is the topology to be loaded with objects. + * + * \return 0 on success, -1 on error. + * + * \note On failure, the topology is reinitialized. It should be either + * destroyed with hwloc_topology_destroy() or configured and loaded again. + * + * \note This function may be called only once per topology. + * + * \note The binding of the current thread or process may temporarily change + * during this call but it will be restored before it returns. + * + * \sa hwlocality_configuration and hwlocality_setsource + */ +HWLOC_DECLSPEC int hwloc_topology_load(hwloc_topology_t topology); + +/** \brief Terminate and free a topology context + * + * \param topology is the topology to be freed + */ +HWLOC_DECLSPEC void hwloc_topology_destroy (hwloc_topology_t topology); + +/** \brief Duplicate a topology. + * + * The entire topology structure as well as its objects + * are duplicated into a new one. + * + * This is useful for keeping a backup while modifying a topology. + * + * \note Object userdata is not duplicated since hwloc does not know what it point to. + * The objects of both old and new topologies will point to the same userdata. + */ +HWLOC_DECLSPEC int hwloc_topology_dup(hwloc_topology_t *newtopology, hwloc_topology_t oldtopology); + +/** \brief Verify that the topology is compatible with the current hwloc library. + * + * This is useful when using the same topology structure (in memory) + * in different libraries that may use different hwloc installations + * (for instance if one library embeds a specific version of hwloc, + * while another library uses a default system-wide hwloc installation). + * + * If all libraries/programs use the same hwloc installation, this function + * always returns success. + * + * \return \c 0 on success. + * + * \return \c -1 with \p errno set to \c EINVAL if incompatible. + * + * \note If sharing between processes with hwloc_shmem_topology_write(), + * the relevant check is already performed inside hwloc_shmem_topology_adopt(). + */ +HWLOC_DECLSPEC int hwloc_topology_abi_check(hwloc_topology_t topology); + +/** \brief Run internal checks on a topology structure + * + * The program aborts if an inconsistency is detected in the given topology. + * + * \param topology is the topology to be checked + * + * \note This routine is only useful to developers. + * + * \note The input topology should have been previously loaded with + * hwloc_topology_load(). + */ +HWLOC_DECLSPEC void hwloc_topology_check(hwloc_topology_t topology); + +/** @} */ + + + +/** \defgroup hwlocality_levels Object levels, depths and types + * @{ + * + * Be sure to see the figure in \ref termsanddefs that shows a + * complete topology tree, including depths, child/sibling/cousin + * relationships, and an example of an asymmetric topology where one + * package has fewer caches than its peers. + */ + +/** \brief Get the depth of the hierarchical tree of objects. + * + * This is the depth of ::HWLOC_OBJ_PU objects plus one. + * + * \note NUMA nodes, I/O and Misc objects are ignored when computing + * the depth of the tree (they are placed on special levels). + */ +HWLOC_DECLSPEC int hwloc_topology_get_depth(hwloc_topology_t __hwloc_restrict topology) __hwloc_attribute_pure; + +/** \brief Returns the depth of objects of type \p type. + * + * If no object of this type is present on the underlying architecture, or if + * the OS doesn't provide this kind of information, the function returns + * ::HWLOC_TYPE_DEPTH_UNKNOWN. + * + * If type is absent but a similar type is acceptable, see also + * hwloc_get_type_or_below_depth() and hwloc_get_type_or_above_depth(). + * + * If ::HWLOC_OBJ_GROUP is given, the function may return ::HWLOC_TYPE_DEPTH_MULTIPLE + * if multiple levels of Groups exist. + * + * If a NUMA node, I/O or Misc object type is given, the function returns a virtual + * value because these objects are stored in special levels that are not CPU-related. + * This virtual depth may be passed to other hwloc functions such as + * hwloc_get_obj_by_depth() but it should not be considered as an actual + * depth by the application. In particular, it should not be compared with + * any other object depth or with the entire topology depth. + * \sa hwloc_get_memory_parents_depth(). + * + * \sa hwloc_type_sscanf_as_depth() for returning the depth of objects + * whose type is given as a string. + */ +HWLOC_DECLSPEC int hwloc_get_type_depth (hwloc_topology_t topology, hwloc_obj_type_t type); + +enum hwloc_get_type_depth_e { + HWLOC_TYPE_DEPTH_UNKNOWN = -1, /**< \brief No object of given type exists in the topology. \hideinitializer */ + HWLOC_TYPE_DEPTH_MULTIPLE = -2, /**< \brief Objects of given type exist at different depth in the topology (only for Groups). \hideinitializer */ + HWLOC_TYPE_DEPTH_NUMANODE = -3, /**< \brief Virtual depth for NUMA nodes. \hideinitializer */ + HWLOC_TYPE_DEPTH_BRIDGE = -4, /**< \brief Virtual depth for bridge object level. \hideinitializer */ + HWLOC_TYPE_DEPTH_PCI_DEVICE = -5, /**< \brief Virtual depth for PCI device object level. \hideinitializer */ + HWLOC_TYPE_DEPTH_OS_DEVICE = -6, /**< \brief Virtual depth for software device object level. \hideinitializer */ + HWLOC_TYPE_DEPTH_MISC = -7 /**< \brief Virtual depth for Misc object. \hideinitializer */ +}; + +/** \brief Return the depth of parents where memory objects are attached. + * + * Memory objects have virtual negative depths because they are not part of + * the main CPU-side hierarchy of objects. This depth should not be compared + * with other level depths. + * + * If all Memory objects are attached to Normal parents at the same depth, + * this parent depth may be compared to other as usual, for instance + * for knowing whether NUMA nodes is attached above or below Packages. + * + * \return The depth of Normal parents of all memory children + * if all these parents have the same depth. For instance the depth of + * the Package level if all NUMA nodes are attached to Package objects. + * + * \return ::HWLOC_TYPE_DEPTH_MULTIPLE if Normal parents of all + * memory children do not have the same depth. For instance if some + * NUMA nodes are attached to Packages while others are attached to + * Groups. + */ +HWLOC_DECLSPEC int hwloc_get_memory_parents_depth (hwloc_topology_t topology); + +/** \brief Returns the depth of objects of type \p type or below + * + * If no object of this type is present on the underlying architecture, the + * function returns the depth of the first "present" object typically found + * inside \p type. + * + * This function is only meaningful for normal object types. + * If a memory, I/O or Misc object type is given, the corresponding virtual + * depth is always returned (see hwloc_get_type_depth()). + * + * May return ::HWLOC_TYPE_DEPTH_MULTIPLE for ::HWLOC_OBJ_GROUP just like + * hwloc_get_type_depth(). + */ +static __hwloc_inline int +hwloc_get_type_or_below_depth (hwloc_topology_t topology, hwloc_obj_type_t type) __hwloc_attribute_pure; + +/** \brief Returns the depth of objects of type \p type or above + * + * If no object of this type is present on the underlying architecture, the + * function returns the depth of the first "present" object typically + * containing \p type. + * + * This function is only meaningful for normal object types. + * If a memory, I/O or Misc object type is given, the corresponding virtual + * depth is always returned (see hwloc_get_type_depth()). + * + * May return ::HWLOC_TYPE_DEPTH_MULTIPLE for ::HWLOC_OBJ_GROUP just like + * hwloc_get_type_depth(). + */ +static __hwloc_inline int +hwloc_get_type_or_above_depth (hwloc_topology_t topology, hwloc_obj_type_t type) __hwloc_attribute_pure; + +/** \brief Returns the type of objects at depth \p depth. + * + * \p depth should between 0 and hwloc_topology_get_depth()-1. + * + * \return (hwloc_obj_type_t)-1 if depth \p depth does not exist. + */ +HWLOC_DECLSPEC hwloc_obj_type_t hwloc_get_depth_type (hwloc_topology_t topology, int depth) __hwloc_attribute_pure; + +/** \brief Returns the width of level at depth \p depth. + */ +HWLOC_DECLSPEC unsigned hwloc_get_nbobjs_by_depth (hwloc_topology_t topology, int depth) __hwloc_attribute_pure; + +/** \brief Returns the width of level type \p type + * + * If no object for that type exists, 0 is returned. + * If there are several levels with objects of that type, -1 is returned. + */ +static __hwloc_inline int +hwloc_get_nbobjs_by_type (hwloc_topology_t topology, hwloc_obj_type_t type) __hwloc_attribute_pure; + +/** \brief Returns the top-object of the topology-tree. + * + * Its type is ::HWLOC_OBJ_MACHINE. + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_root_obj (hwloc_topology_t topology) __hwloc_attribute_pure; + +/** \brief Returns the topology object at logical index \p idx from depth \p depth */ +HWLOC_DECLSPEC hwloc_obj_t hwloc_get_obj_by_depth (hwloc_topology_t topology, int depth, unsigned idx) __hwloc_attribute_pure; + +/** \brief Returns the topology object at logical index \p idx with type \p type + * + * If no object for that type exists, \c NULL is returned. + * If there are several levels with objects of that type (::HWLOC_OBJ_GROUP), + * \c NULL is returned and the caller may fallback to hwloc_get_obj_by_depth(). + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_obj_by_type (hwloc_topology_t topology, hwloc_obj_type_t type, unsigned idx) __hwloc_attribute_pure; + +/** \brief Returns the next object at depth \p depth. + * + * If \p prev is \c NULL, return the first object at depth \p depth. + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_next_obj_by_depth (hwloc_topology_t topology, int depth, hwloc_obj_t prev); + +/** \brief Returns the next object of type \p type. + * + * If \p prev is \c NULL, return the first object at type \p type. If + * there are multiple or no depth for given type, return \c NULL and + * let the caller fallback to hwloc_get_next_obj_by_depth(). + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_next_obj_by_type (hwloc_topology_t topology, hwloc_obj_type_t type, + hwloc_obj_t prev); + +/** @} */ + + + +/** \defgroup hwlocality_object_strings Converting between Object Types and Attributes, and Strings + * @{ + */ + +/** \brief Return a constant stringified object type. + * + * This function is the basic way to convert a generic type into a string. + * The output string may be parsed back by hwloc_type_sscanf(). + * + * hwloc_obj_type_snprintf() may return a more precise output for a specific + * object, but it requires the caller to provide the output buffer. + */ +HWLOC_DECLSPEC const char * hwloc_obj_type_string (hwloc_obj_type_t type) __hwloc_attribute_const; + +/** \brief Stringify the type of a given topology object into a human-readable form. + * + * Contrary to hwloc_obj_type_string(), this function includes object-specific + * attributes (such as the Group depth, the Bridge type, or OS device type) + * in the output, and it requires the caller to provide the output buffer. + * + * The output is guaranteed to be the same for all objects of a same topology level. + * + * If \p verbose is 1, longer type names are used, e.g. L1Cache instead of L1. + * + * The output string may be parsed back by hwloc_type_sscanf(). + * + * If \p size is 0, \p string may safely be \c NULL. + * + * \return the number of character that were actually written if not truncating, + * or that would have been written (not including the ending \\0). + */ +HWLOC_DECLSPEC int hwloc_obj_type_snprintf(char * __hwloc_restrict string, size_t size, + hwloc_obj_t obj, + int verbose); + +/** \brief Stringify the attributes of a given topology object into a human-readable form. + * + * Attribute values are separated by \p separator. + * + * Only the major attributes are printed in non-verbose mode. + * + * If \p size is 0, \p string may safely be \c NULL. + * + * \return the number of character that were actually written if not truncating, + * or that would have been written (not including the ending \\0). + */ +HWLOC_DECLSPEC int hwloc_obj_attr_snprintf(char * __hwloc_restrict string, size_t size, + hwloc_obj_t obj, const char * __hwloc_restrict separator, + int verbose); + +/** \brief Return an object type and attributes from a type string. + * + * Convert strings such as "Package" or "L1iCache" into the corresponding types. + * Matching is case-insensitive, and only the first letters are actually + * required to match. + * + * The matched object type is set in \p typep (which cannot be \c NULL). + * + * Type-specific attributes, for instance Cache type, Cache depth, Group depth, + * Bridge type or OS Device type may be returned in \p attrp. + * Attributes that are not specified in the string (for instance "Group" + * without a depth, or "L2Cache" without a cache type) are set to -1. + * + * \p attrp is only filled if not \c NULL and if its size specified in \p attrsize + * is large enough. It should be at least as large as union hwloc_obj_attr_u. + * + * \return 0 if a type was correctly identified, otherwise -1. + * + * \note This function is guaranteed to match any string returned by + * hwloc_obj_type_string() or hwloc_obj_type_snprintf(). + * + * \note This is an extended version of the now deprecated hwloc_obj_type_sscanf(). + */ +HWLOC_DECLSPEC int hwloc_type_sscanf(const char *string, + hwloc_obj_type_t *typep, + union hwloc_obj_attr_u *attrp, size_t attrsize); + +/** \brief Return an object type and its level depth from a type string. + * + * Convert strings such as "Package" or "L1iCache" into the corresponding types + * and return in \p depthp the depth of the corresponding level in the + * topology \p topology. + * + * If no object of this type is present on the underlying architecture, + * ::HWLOC_TYPE_DEPTH_UNKNOWN is returned. + * + * If multiple such levels exist (for instance if giving Group without any depth), + * the function may return ::HWLOC_TYPE_DEPTH_MULTIPLE instead. + * + * The matched object type is set in \p typep if \p typep is non \c NULL. + * + * \note This function is similar to hwloc_type_sscanf() followed + * by hwloc_get_type_depth() but it also automatically disambiguates + * multiple group levels etc. + * + * \note This function is guaranteed to match any string returned by + * hwloc_obj_type_string() or hwloc_obj_type_snprintf(). + */ +HWLOC_DECLSPEC int hwloc_type_sscanf_as_depth(const char *string, + hwloc_obj_type_t *typep, + hwloc_topology_t topology, int *depthp); + +/** @} */ + + + +/** \defgroup hwlocality_info_attr Consulting and Adding Key-Value Info Attributes + * + * @{ + */ + +/** \brief Search the given key name in object infos and return the corresponding value. + * + * If multiple keys match the given name, only the first one is returned. + * + * \return \c NULL if no such key exists. + */ +static __hwloc_inline const char * +hwloc_obj_get_info_by_name(hwloc_obj_t obj, const char *name) __hwloc_attribute_pure; + +/** \brief Add the given info name and value pair to the given object. + * + * The info is appended to the existing info array even if another key + * with the same name already exists. + * + * The input strings are copied before being added in the object infos. + * + * \return \c 0 on success, \c -1 on error. + * + * \note This function may be used to enforce object colors in the lstopo + * graphical output by using "lstopoStyle" as a name and "Background=#rrggbb" + * as a value. See CUSTOM COLORS in the lstopo(1) manpage for details. + * + * \note If \p value contains some non-printable characters, they will + * be dropped when exporting to XML, see hwloc_topology_export_xml() in hwloc/export.h. + */ +HWLOC_DECLSPEC int hwloc_obj_add_info(hwloc_obj_t obj, const char *name, const char *value); + +/** @} */ + + + +/** \defgroup hwlocality_cpubinding CPU binding + * + * Some operating systems only support binding threads or processes to a single PU. + * Others allow binding to larger sets such as entire Cores or Packages or + * even random sets of invididual PUs. In such operating system, the scheduler + * is free to run the task on one of these PU, then migrate it to another PU, etc. + * It is often useful to call hwloc_bitmap_singlify() on the target CPU set before + * passing it to the binding function to avoid these expensive migrations. + * See the documentation of hwloc_bitmap_singlify() for details. + * + * Some operating systems do not provide all hwloc-supported + * mechanisms to bind processes, threads, etc. + * hwloc_topology_get_support() may be used to query about the actual CPU + * binding support in the currently used operating system. + * + * When the requested binding operation is not available and the + * ::HWLOC_CPUBIND_STRICT flag was passed, the function returns -1. + * \p errno is set to \c ENOSYS when it is not possible to bind the requested kind of object + * processes/threads. errno is set to \c EXDEV when the requested cpuset + * can not be enforced (e.g. some systems only allow one CPU, and some + * other systems only allow one NUMA node). + * + * If ::HWLOC_CPUBIND_STRICT was not passed, the function may fail as well, + * or the operating system may use a slightly different operation + * (with side-effects, smaller binding set, etc.) + * when the requested operation is not exactly supported. + * + * The most portable version that should be preferred over the others, + * whenever possible, is the following one which just binds the current program, + * assuming it is single-threaded: + * + * \code + * hwloc_set_cpubind(topology, set, 0), + * \endcode + * + * If the program may be multithreaded, the following one should be preferred + * to only bind the current thread: + * + * \code + * hwloc_set_cpubind(topology, set, HWLOC_CPUBIND_THREAD), + * \endcode + * + * \sa Some example codes are available under doc/examples/ in the source tree. + * + * \note To unbind, just call the binding function with either a full cpuset or + * a cpuset equal to the system cpuset. + * + * \note On some operating systems, CPU binding may have effects on memory binding, see + * ::HWLOC_CPUBIND_NOMEMBIND + * + * \note Running lstopo \--top or hwloc-ps can be a very convenient tool to check + * how binding actually happened. + * @{ + */ + +/** \brief Process/Thread binding flags. + * + * These bit flags can be used to refine the binding policy. + * + * The default (0) is to bind the current process, assumed to be + * single-threaded, in a non-strict way. This is the most portable + * way to bind as all operating systems usually provide it. + * + * \note Not all systems support all kinds of binding. See the + * "Detailed Description" section of \ref hwlocality_cpubinding for a + * description of errors that can occur. + */ +typedef enum { + /** \brief Bind all threads of the current (possibly) multithreaded process. + * \hideinitializer */ + HWLOC_CPUBIND_PROCESS = (1<<0), + + /** \brief Bind current thread of current process. + * \hideinitializer */ + HWLOC_CPUBIND_THREAD = (1<<1), + + /** \brief Request for strict binding from the OS. + * + * By default, when the designated CPUs are all busy while other + * CPUs are idle, operating systems may execute the thread/process + * on those other CPUs instead of the designated CPUs, to let them + * progress anyway. Strict binding means that the thread/process + * will _never_ execute on other cpus than the designated CPUs, even + * when those are busy with other tasks and other CPUs are idle. + * + * \note Depending on the operating system, strict binding may not + * be possible (e.g., the OS does not implement it) or not allowed + * (e.g., for an administrative reasons), and the function will fail + * in that case. + * + * When retrieving the binding of a process, this flag checks + * whether all its threads actually have the same binding. If the + * flag is not given, the binding of each thread will be + * accumulated. + * + * \note This flag is meaningless when retrieving the binding of a + * thread. + * \hideinitializer + */ + HWLOC_CPUBIND_STRICT = (1<<2), + + /** \brief Avoid any effect on memory binding + * + * On some operating systems, some CPU binding function would also + * bind the memory on the corresponding NUMA node. It is often not + * a problem for the application, but if it is, setting this flag + * will make hwloc avoid using OS functions that would also bind + * memory. This will however reduce the support of CPU bindings, + * i.e. potentially return -1 with errno set to ENOSYS in some + * cases. + * + * This flag is only meaningful when used with functions that set + * the CPU binding. It is ignored when used with functions that get + * CPU binding information. + * \hideinitializer + */ + HWLOC_CPUBIND_NOMEMBIND = (1<<3) +} hwloc_cpubind_flags_t; + +/** \brief Bind current process or thread on cpus given in physical bitmap \p set. + * + * \return -1 with errno set to ENOSYS if the action is not supported + * \return -1 with errno set to EXDEV if the binding cannot be enforced + */ +HWLOC_DECLSPEC int hwloc_set_cpubind(hwloc_topology_t topology, hwloc_const_cpuset_t set, int flags); + +/** \brief Get current process or thread binding. + * + * Writes into \p set the physical cpuset which the process or thread (according to \e + * flags) was last bound to. + */ +HWLOC_DECLSPEC int hwloc_get_cpubind(hwloc_topology_t topology, hwloc_cpuset_t set, int flags); + +/** \brief Bind a process \p pid on cpus given in physical bitmap \p set. + * + * \note \p hwloc_pid_t is \p pid_t on Unix platforms, + * and \p HANDLE on native Windows platforms. + * + * \note As a special case on Linux, if a tid (thread ID) is supplied + * instead of a pid (process ID) and ::HWLOC_CPUBIND_THREAD is passed in flags, + * the binding is applied to that specific thread. + * + * \note On non-Linux systems, ::HWLOC_CPUBIND_THREAD can not be used in \p flags. + */ +HWLOC_DECLSPEC int hwloc_set_proc_cpubind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_cpuset_t set, int flags); + +/** \brief Get the current physical binding of process \p pid. + * + * \note \p hwloc_pid_t is \p pid_t on Unix platforms, + * and \p HANDLE on native Windows platforms. + * + * \note As a special case on Linux, if a tid (thread ID) is supplied + * instead of a pid (process ID) and HWLOC_CPUBIND_THREAD is passed in flags, + * the binding for that specific thread is returned. + * + * \note On non-Linux systems, HWLOC_CPUBIND_THREAD can not be used in \p flags. + */ +HWLOC_DECLSPEC int hwloc_get_proc_cpubind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_cpuset_t set, int flags); + +#ifdef hwloc_thread_t +/** \brief Bind a thread \p thread on cpus given in physical bitmap \p set. + * + * \note \p hwloc_thread_t is \p pthread_t on Unix platforms, + * and \p HANDLE on native Windows platforms. + * + * \note ::HWLOC_CPUBIND_PROCESS can not be used in \p flags. + */ +HWLOC_DECLSPEC int hwloc_set_thread_cpubind(hwloc_topology_t topology, hwloc_thread_t thread, hwloc_const_cpuset_t set, int flags); +#endif + +#ifdef hwloc_thread_t +/** \brief Get the current physical binding of thread \p tid. + * + * \note \p hwloc_thread_t is \p pthread_t on Unix platforms, + * and \p HANDLE on native Windows platforms. + * + * \note ::HWLOC_CPUBIND_PROCESS can not be used in \p flags. + */ +HWLOC_DECLSPEC int hwloc_get_thread_cpubind(hwloc_topology_t topology, hwloc_thread_t thread, hwloc_cpuset_t set, int flags); +#endif + +/** \brief Get the last physical CPU where the current process or thread ran. + * + * The operating system may move some tasks from one processor + * to another at any time according to their binding, + * so this function may return something that is already + * outdated. + * + * \p flags can include either ::HWLOC_CPUBIND_PROCESS or ::HWLOC_CPUBIND_THREAD to + * specify whether the query should be for the whole process (union of all CPUs + * on which all threads are running), or only the current thread. If the + * process is single-threaded, flags can be set to zero to let hwloc use + * whichever method is available on the underlying OS. + */ +HWLOC_DECLSPEC int hwloc_get_last_cpu_location(hwloc_topology_t topology, hwloc_cpuset_t set, int flags); + +/** \brief Get the last physical CPU where a process ran. + * + * The operating system may move some tasks from one processor + * to another at any time according to their binding, + * so this function may return something that is already + * outdated. + * + * \note \p hwloc_pid_t is \p pid_t on Unix platforms, + * and \p HANDLE on native Windows platforms. + * + * \note As a special case on Linux, if a tid (thread ID) is supplied + * instead of a pid (process ID) and ::HWLOC_CPUBIND_THREAD is passed in flags, + * the last CPU location of that specific thread is returned. + * + * \note On non-Linux systems, ::HWLOC_CPUBIND_THREAD can not be used in \p flags. + */ +HWLOC_DECLSPEC int hwloc_get_proc_last_cpu_location(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_cpuset_t set, int flags); + +/** @} */ + + + +/** \defgroup hwlocality_membinding Memory binding + * + * Memory binding can be done three ways: + * + * - explicit memory allocation thanks to hwloc_alloc_membind() and friends: + * the binding will have effect on the memory allocated by these functions. + * - implicit memory binding through binding policy: hwloc_set_membind() and + * friends only define the current policy of the process, which will be + * applied to the subsequent calls to malloc() and friends. + * - migration of existing memory ranges, thanks to hwloc_set_area_membind() + * and friends, which move already-allocated data. + * + * Not all operating systems support all three ways. + * hwloc_topology_get_support() may be used to query about the actual memory + * binding support in the currently used operating system. + * + * When the requested binding operation is not available and the + * ::HWLOC_MEMBIND_STRICT flag was passed, the function returns -1. + * \p errno will be set to \c ENOSYS when the system does support + * the specified action or policy + * (e.g., some systems only allow binding memory on a per-thread + * basis, whereas other systems only allow binding memory for all + * threads in a process). + * \p errno will be set to EXDEV when the requested set can not be enforced + * (e.g., some systems only allow binding memory to a single NUMA node). + * + * If ::HWLOC_MEMBIND_STRICT was not passed, the function may fail as well, + * or the operating system may use a slightly different operation + * (with side-effects, smaller binding set, etc.) + * when the requested operation is not exactly supported. + * + * The most portable form that should be preferred over the others + * whenever possible is as follows. + * It allocates some memory hopefully bound to the specified set. + * To do so, hwloc will possibly have to change the current memory + * binding policy in order to actually get the memory bound, if the OS + * does not provide any other way to simply allocate bound memory + * without changing the policy for all allocations. That is the + * difference with hwloc_alloc_membind(), which will never change the + * current memory binding policy. + * + * \code + * hwloc_alloc_membind_policy(topology, size, set, + * HWLOC_MEMBIND_BIND, 0); + * \endcode + * + * Each hwloc memory binding function takes a bitmap argument that + * is a CPU set by default, or a NUMA memory node set if the flag + * ::HWLOC_MEMBIND_BYNODESET is specified. + * See \ref hwlocality_object_sets and \ref hwlocality_bitmap for a + * discussion of CPU sets and NUMA memory node sets. + * It is also possible to convert between CPU set and node set using + * hwloc_cpuset_to_nodeset() or hwloc_cpuset_from_nodeset(). + * + * Memory binding by CPU set cannot work for CPU-less NUMA memory nodes. + * Binding by nodeset should therefore be preferred whenever possible. + * + * \sa Some example codes are available under doc/examples/ in the source tree. + * + * \note On some operating systems, memory binding affects the CPU + * binding; see ::HWLOC_MEMBIND_NOCPUBIND + * @{ + */ + +/** \brief Memory binding policy. + * + * These constants can be used to choose the binding policy. Only one policy can + * be used at a time (i.e., the values cannot be OR'ed together). + * + * Not all systems support all kinds of binding. + * hwloc_topology_get_support() may be used to query about the actual memory + * binding policy support in the currently used operating system. + * See the "Detailed Description" section of \ref hwlocality_membinding + * for a description of errors that can occur. + */ +typedef enum { + /** \brief Reset the memory allocation policy to the system default. + * Depending on the operating system, this may correspond to + * ::HWLOC_MEMBIND_FIRSTTOUCH (Linux), + * or ::HWLOC_MEMBIND_BIND (AIX, HP-UX, Solaris, Windows). + * This policy is never returned by get membind functions. + * The nodeset argument is ignored. + * \hideinitializer */ + HWLOC_MEMBIND_DEFAULT = 0, + + /** \brief Allocate each memory page individually on the local NUMA + * node of the thread that touches it. + * + * The given nodeset should usually be hwloc_topology_get_topology_nodeset() + * so that the touching thread may run and allocate on any node in the system. + * + * On AIX, if the nodeset is smaller, pages are allocated locally (if the local + * node is in the nodeset) or from a random non-local node (otherwise). + * \hideinitializer */ + HWLOC_MEMBIND_FIRSTTOUCH = 1, + + /** \brief Allocate memory on the specified nodes. + * \hideinitializer */ + HWLOC_MEMBIND_BIND = 2, + + /** \brief Allocate memory on the given nodes in an interleaved + * / round-robin manner. The precise layout of the memory across + * multiple NUMA nodes is OS/system specific. Interleaving can be + * useful when threads distributed across the specified NUMA nodes + * will all be accessing the whole memory range concurrently, since + * the interleave will then balance the memory references. + * \hideinitializer */ + HWLOC_MEMBIND_INTERLEAVE = 3, + + /** \brief For each page bound with this policy, by next time + * it is touched (and next time only), it is moved from its current + * location to the local NUMA node of the thread where the memory + * reference occurred (if it needs to be moved at all). + * \hideinitializer */ + HWLOC_MEMBIND_NEXTTOUCH = 4, + + /** \brief Returned by get_membind() functions when multiple + * threads or parts of a memory area have differing memory binding + * policies. + * Also returned when binding is unknown because binding hooks are empty + * when the topology is loaded from XML without HWLOC_THISSYSTEM=1, etc. + * \hideinitializer */ + HWLOC_MEMBIND_MIXED = -1 +} hwloc_membind_policy_t; + +/** \brief Memory binding flags. + * + * These flags can be used to refine the binding policy. + * All flags can be logically OR'ed together with the exception of + * ::HWLOC_MEMBIND_PROCESS and ::HWLOC_MEMBIND_THREAD; + * these two flags are mutually exclusive. + * + * Not all systems support all kinds of binding. + * hwloc_topology_get_support() may be used to query about the actual memory + * binding support in the currently used operating system. + * See the "Detailed Description" section of \ref hwlocality_membinding + * for a description of errors that can occur. + */ +typedef enum { + /** \brief Set policy for all threads of the specified (possibly + * multithreaded) process. This flag is mutually exclusive with + * ::HWLOC_MEMBIND_THREAD. + * \hideinitializer */ + HWLOC_MEMBIND_PROCESS = (1<<0), + + /** \brief Set policy for a specific thread of the current process. + * This flag is mutually exclusive with ::HWLOC_MEMBIND_PROCESS. + * \hideinitializer */ + HWLOC_MEMBIND_THREAD = (1<<1), + + /** Request strict binding from the OS. The function will fail if + * the binding can not be guaranteed / completely enforced. + * + * This flag has slightly different meanings depending on which + * function it is used with. + * \hideinitializer */ + HWLOC_MEMBIND_STRICT = (1<<2), + + /** \brief Migrate existing allocated memory. If the memory cannot + * be migrated and the ::HWLOC_MEMBIND_STRICT flag is passed, an error + * will be returned. + * \hideinitializer */ + HWLOC_MEMBIND_MIGRATE = (1<<3), + + /** \brief Avoid any effect on CPU binding. + * + * On some operating systems, some underlying memory binding + * functions also bind the application to the corresponding CPU(s). + * Using this flag will cause hwloc to avoid using OS functions that + * could potentially affect CPU bindings. Note, however, that using + * NOCPUBIND may reduce hwloc's overall memory binding + * support. Specifically: some of hwloc's memory binding functions + * may fail with errno set to ENOSYS when used with NOCPUBIND. + * \hideinitializer + */ + HWLOC_MEMBIND_NOCPUBIND = (1<<4), + + /** \brief Consider the bitmap argument as a nodeset. + * + * The bitmap argument is considered a nodeset if this flag is given, + * or a cpuset otherwise by default. + * + * Memory binding by CPU set cannot work for CPU-less NUMA memory nodes. + * Binding by nodeset should therefore be preferred whenever possible. + * \hideinitializer + */ + HWLOC_MEMBIND_BYNODESET = (1<<5) +} hwloc_membind_flags_t; + +/** \brief Set the default memory binding policy of the current + * process or thread to prefer the NUMA node(s) specified by \p set + * + * If neither ::HWLOC_MEMBIND_PROCESS nor ::HWLOC_MEMBIND_THREAD is + * specified, the current process is assumed to be single-threaded. + * This is the most portable form as it permits hwloc to use either + * process-based OS functions or thread-based OS functions, depending + * on which are available. + * + * If ::HWLOC_MEMBIND_BYNODESET is specified, set is considered a nodeset. + * Otherwise it's a cpuset. + * + * \return -1 with errno set to ENOSYS if the action is not supported + * \return -1 with errno set to EXDEV if the binding cannot be enforced + */ +HWLOC_DECLSPEC int hwloc_set_membind(hwloc_topology_t topology, hwloc_const_bitmap_t set, hwloc_membind_policy_t policy, int flags); + +/** \brief Query the default memory binding policy and physical locality of the + * current process or thread. + * + * This function has two output parameters: \p set and \p policy. + * The values returned in these parameters depend on both the \p flags + * passed in and the current memory binding policies and nodesets in + * the queried target. + * + * Passing the ::HWLOC_MEMBIND_PROCESS flag specifies that the query + * target is the current policies and nodesets for all the threads in + * the current process. Passing ::HWLOC_MEMBIND_THREAD specifies that + * the query target is the current policy and nodeset for only the + * thread invoking this function. + * + * If neither of these flags are passed (which is the most portable + * method), the process is assumed to be single threaded. This allows + * hwloc to use either process-based OS functions or thread-based OS + * functions, depending on which are available. + * + * ::HWLOC_MEMBIND_STRICT is only meaningful when ::HWLOC_MEMBIND_PROCESS + * is also specified. In this case, hwloc will check the default + * memory policies and nodesets for all threads in the process. If + * they are not identical, -1 is returned and errno is set to EXDEV. + * If they are identical, the values are returned in \p set and \p + * policy. + * + * Otherwise, if ::HWLOC_MEMBIND_PROCESS is specified (and + * ::HWLOC_MEMBIND_STRICT is \em not specified), the default set + * from each thread is logically OR'ed together. + * If all threads' default policies are the same, \p policy is set to + * that policy. If they are different, \p policy is set to + * ::HWLOC_MEMBIND_MIXED. + * + * In the ::HWLOC_MEMBIND_THREAD case (or when neither + * ::HWLOC_MEMBIND_PROCESS or ::HWLOC_MEMBIND_THREAD is specified), there + * is only one set and policy; they are returned in \p set and + * \p policy, respectively. + * + * If ::HWLOC_MEMBIND_BYNODESET is specified, set is considered a nodeset. + * Otherwise it's a cpuset. + * + * If any other flags are specified, -1 is returned and errno is set + * to EINVAL. + */ +HWLOC_DECLSPEC int hwloc_get_membind(hwloc_topology_t topology, hwloc_bitmap_t set, hwloc_membind_policy_t * policy, int flags); + +/** \brief Set the default memory binding policy of the specified + * process to prefer the NUMA node(s) specified by \p set + * + * If ::HWLOC_MEMBIND_BYNODESET is specified, set is considered a nodeset. + * Otherwise it's a cpuset. + * + * \return -1 with errno set to ENOSYS if the action is not supported + * \return -1 with errno set to EXDEV if the binding cannot be enforced + * + * \note \p hwloc_pid_t is \p pid_t on Unix platforms, + * and \p HANDLE on native Windows platforms. + */ +HWLOC_DECLSPEC int hwloc_set_proc_membind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_bitmap_t set, hwloc_membind_policy_t policy, int flags); + +/** \brief Query the default memory binding policy and physical locality of the + * specified process. + * + * This function has two output parameters: \p set and \p policy. + * The values returned in these parameters depend on both the \p flags + * passed in and the current memory binding policies and nodesets in + * the queried target. + * + * Passing the ::HWLOC_MEMBIND_PROCESS flag specifies that the query + * target is the current policies and nodesets for all the threads in + * the specified process. If ::HWLOC_MEMBIND_PROCESS is not specified + * (which is the most portable method), the process is assumed to be + * single threaded. This allows hwloc to use either process-based OS + * functions or thread-based OS functions, depending on which are + * available. + * + * Note that it does not make sense to pass ::HWLOC_MEMBIND_THREAD to + * this function. + * + * If ::HWLOC_MEMBIND_STRICT is specified, hwloc will check the default + * memory policies and nodesets for all threads in the specified + * process. If they are not identical, -1 is returned and errno is + * set to EXDEV. If they are identical, the values are returned in \p + * set and \p policy. + * + * Otherwise, \p set is set to the logical OR of all threads' + * default set. If all threads' default policies + * are the same, \p policy is set to that policy. If they are + * different, \p policy is set to ::HWLOC_MEMBIND_MIXED. + * + * If ::HWLOC_MEMBIND_BYNODESET is specified, set is considered a nodeset. + * Otherwise it's a cpuset. + * + * If any other flags are specified, -1 is returned and errno is set + * to EINVAL. + * + * \note \p hwloc_pid_t is \p pid_t on Unix platforms, + * and \p HANDLE on native Windows platforms. + */ +HWLOC_DECLSPEC int hwloc_get_proc_membind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_bitmap_t set, hwloc_membind_policy_t * policy, int flags); + +/** \brief Bind the already-allocated memory identified by (addr, len) + * to the NUMA node(s) specified by \p set. + * + * If ::HWLOC_MEMBIND_BYNODESET is specified, set is considered a nodeset. + * Otherwise it's a cpuset. + * + * \return 0 if \p len is 0. + * \return -1 with errno set to ENOSYS if the action is not supported + * \return -1 with errno set to EXDEV if the binding cannot be enforced + */ +HWLOC_DECLSPEC int hwloc_set_area_membind(hwloc_topology_t topology, const void *addr, size_t len, hwloc_const_bitmap_t set, hwloc_membind_policy_t policy, int flags); + +/** \brief Query the CPUs near the physical NUMA node(s) and binding policy of + * the memory identified by (\p addr, \p len ). + * + * This function has two output parameters: \p set and \p policy. + * The values returned in these parameters depend on both the \p flags + * passed in and the memory binding policies and nodesets of the pages + * in the address range. + * + * If ::HWLOC_MEMBIND_STRICT is specified, the target pages are first + * checked to see if they all have the same memory binding policy and + * nodeset. If they do not, -1 is returned and errno is set to EXDEV. + * If they are identical across all pages, the set and policy are + * returned in \p set and \p policy, respectively. + * + * If ::HWLOC_MEMBIND_STRICT is not specified, the union of all NUMA + * node(s) containing pages in the address range is calculated. + * If all pages in the target have the same policy, it is returned in + * \p policy. Otherwise, \p policy is set to ::HWLOC_MEMBIND_MIXED. + * + * If ::HWLOC_MEMBIND_BYNODESET is specified, set is considered a nodeset. + * Otherwise it's a cpuset. + * + * If any other flags are specified, -1 is returned and errno is set + * to EINVAL. + * + * If \p len is 0, -1 is returned and errno is set to EINVAL. + */ +HWLOC_DECLSPEC int hwloc_get_area_membind(hwloc_topology_t topology, const void *addr, size_t len, hwloc_bitmap_t set, hwloc_membind_policy_t * policy, int flags); + +/** \brief Get the NUMA nodes where memory identified by (\p addr, \p len ) is physically allocated. + * + * Fills \p set according to the NUMA nodes where the memory area pages + * are physically allocated. If no page is actually allocated yet, + * \p set may be empty. + * + * If pages spread to multiple nodes, it is not specified whether they spread + * equitably, or whether most of them are on a single node, etc. + * + * The operating system may move memory pages from one processor + * to another at any time according to their binding, + * so this function may return something that is already + * outdated. + * + * If ::HWLOC_MEMBIND_BYNODESET is specified in \p flags, set is + * considered a nodeset. Otherwise it's a cpuset. + * + * If \p len is 0, \p set is emptied. + */ +HWLOC_DECLSPEC int hwloc_get_area_memlocation(hwloc_topology_t topology, const void *addr, size_t len, hwloc_bitmap_t set, int flags); + +/** \brief Allocate some memory + * + * This is equivalent to malloc(), except that it tries to allocate + * page-aligned memory from the OS. + * + * \note The allocated memory should be freed with hwloc_free(). + */ +HWLOC_DECLSPEC void *hwloc_alloc(hwloc_topology_t topology, size_t len); + +/** \brief Allocate some memory on NUMA memory nodes specified by \p set + * + * \return NULL with errno set to ENOSYS if the action is not supported + * and ::HWLOC_MEMBIND_STRICT is given + * \return NULL with errno set to EXDEV if the binding cannot be enforced + * and ::HWLOC_MEMBIND_STRICT is given + * \return NULL with errno set to ENOMEM if the memory allocation failed + * even before trying to bind. + * + * If ::HWLOC_MEMBIND_BYNODESET is specified, set is considered a nodeset. + * Otherwise it's a cpuset. + * + * \note The allocated memory should be freed with hwloc_free(). + */ +HWLOC_DECLSPEC void *hwloc_alloc_membind(hwloc_topology_t topology, size_t len, hwloc_const_bitmap_t set, hwloc_membind_policy_t policy, int flags) __hwloc_attribute_malloc; + +/** \brief Allocate some memory on NUMA memory nodes specified by \p set + * + * This is similar to hwloc_alloc_membind_nodeset() except that it is allowed to change + * the current memory binding policy, thus providing more binding support, at + * the expense of changing the current state. + * + * If ::HWLOC_MEMBIND_BYNODESET is specified, set is considered a nodeset. + * Otherwise it's a cpuset. + */ +static __hwloc_inline void * +hwloc_alloc_membind_policy(hwloc_topology_t topology, size_t len, hwloc_const_bitmap_t set, hwloc_membind_policy_t policy, int flags) __hwloc_attribute_malloc; + +/** \brief Free memory that was previously allocated by hwloc_alloc() + * or hwloc_alloc_membind(). + */ +HWLOC_DECLSPEC int hwloc_free(hwloc_topology_t topology, void *addr, size_t len); + +/** @} */ + + + +/** \defgroup hwlocality_setsource Changing the Source of Topology Discovery + * + * If none of the functions below is called, the default is to detect all the objects + * of the machine that the caller is allowed to access. + * + * This default behavior may also be modified through environment variables + * if the application did not modify it already. + * Setting HWLOC_XMLFILE in the environment enforces the discovery from a XML + * file as if hwloc_topology_set_xml() had been called. + * Setting HWLOC_SYNTHETIC enforces a synthetic topology as if + * hwloc_topology_set_synthetic() had been called. + * + * Finally, HWLOC_THISSYSTEM enforces the return value of + * hwloc_topology_is_thissystem(). + * + * @{ + */ + +/** \brief Change which process the topology is viewed from. + * + * On some systems, processes may have different views of the machine, for + * instance the set of allowed CPUs. By default, hwloc exposes the view from + * the current process. Calling hwloc_topology_set_pid() permits to make it + * expose the topology of the machine from the point of view of another + * process. + * + * \note \p hwloc_pid_t is \p pid_t on Unix platforms, + * and \p HANDLE on native Windows platforms. + * + * \note -1 is returned and errno is set to ENOSYS on platforms that do not + * support this feature. + */ +HWLOC_DECLSPEC int hwloc_topology_set_pid(hwloc_topology_t __hwloc_restrict topology, hwloc_pid_t pid); + +/** \brief Enable synthetic topology. + * + * Gather topology information from the given \p description, + * a space-separated string of describing + * the object type and arity at each level. + * All types may be omitted (space-separated string of numbers) so that + * hwloc chooses all types according to usual topologies. + * See also the \ref synthetic. + * + * Setting the environment variable HWLOC_SYNTHETIC + * may also result in this behavior. + * + * If \p description was properly parsed and describes a valid topology + * configuration, this function returns 0. + * Otherwise -1 is returned and errno is set to EINVAL. + * + * Note that this function does not actually load topology + * information; it just tells hwloc where to load it from. You'll + * still need to invoke hwloc_topology_load() to actually load the + * topology information. + * + * \note For convenience, this backend provides empty binding hooks which just + * return success. + * + * \note On success, the synthetic component replaces the previously enabled + * component (if any), but the topology is not actually modified until + * hwloc_topology_load(). + */ +HWLOC_DECLSPEC int hwloc_topology_set_synthetic(hwloc_topology_t __hwloc_restrict topology, const char * __hwloc_restrict description); + +/** \brief Enable XML-file based topology. + * + * Gather topology information from the XML file given at \p xmlpath. + * Setting the environment variable HWLOC_XMLFILE may also result in this behavior. + * This file may have been generated earlier with hwloc_topology_export_xml() in hwloc/export.h, + * or lstopo file.xml. + * + * Note that this function does not actually load topology + * information; it just tells hwloc where to load it from. You'll + * still need to invoke hwloc_topology_load() to actually load the + * topology information. + * + * \return -1 with errno set to EINVAL on failure to read the XML file. + * + * \note See also hwloc_topology_set_userdata_import_callback() + * for importing application-specific object userdata. + * + * \note For convenience, this backend provides empty binding hooks which just + * return success. To have hwloc still actually call OS-specific hooks, the + * ::HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM has to be set to assert that the loaded + * file is really the underlying system. + * + * \note On success, the XML component replaces the previously enabled + * component (if any), but the topology is not actually modified until + * hwloc_topology_load(). + */ +HWLOC_DECLSPEC int hwloc_topology_set_xml(hwloc_topology_t __hwloc_restrict topology, const char * __hwloc_restrict xmlpath); + +/** \brief Enable XML based topology using a memory buffer (instead of + * a file, as with hwloc_topology_set_xml()). + * + * Gather topology information from the XML memory buffer given at \p + * buffer and of length \p size. This buffer may have been filled + * earlier with hwloc_topology_export_xmlbuffer() in hwloc/export.h. + * + * Note that this function does not actually load topology + * information; it just tells hwloc where to load it from. You'll + * still need to invoke hwloc_topology_load() to actually load the + * topology information. + * + * \return -1 with errno set to EINVAL on failure to read the XML buffer. + * + * \note See also hwloc_topology_set_userdata_import_callback() + * for importing application-specific object userdata. + * + * \note For convenience, this backend provides empty binding hooks which just + * return success. To have hwloc still actually call OS-specific hooks, the + * ::HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM has to be set to assert that the loaded + * file is really the underlying system. + * + * \note On success, the XML component replaces the previously enabled + * component (if any), but the topology is not actually modified until + * hwloc_topology_load(). + */ +HWLOC_DECLSPEC int hwloc_topology_set_xmlbuffer(hwloc_topology_t __hwloc_restrict topology, const char * __hwloc_restrict buffer, int size); + +/** @} */ + + + +/** \defgroup hwlocality_configuration Topology Detection Configuration and Query + * + * Several functions can optionally be called between hwloc_topology_init() and + * hwloc_topology_load() to configure how the detection should be performed, + * e.g. to ignore some objects types, define a synthetic topology, etc. + * + * @{ + */ + +/** \brief Flags to be set onto a topology context before load. + * + * Flags should be given to hwloc_topology_set_flags(). + * They may also be returned by hwloc_topology_get_flags(). + */ +enum hwloc_topology_flags_e { + /** \brief Detect the whole system, ignore reservations. + * + * Gather all resources, even if some were disabled by the administrator. + * For instance, ignore Linux Cgroup/Cpusets and gather all processors and memory nodes. + * + * When this flag is not set, PUs and NUMA nodes that are disallowed are not added to the topology. + * Parent objects (package, core, cache, etc.) are added only if some of their children are allowed. + * + * When this flag is set, the actual sets of allowed PUs and NUMA nodes are given + * by hwloc_topology_get_allowed_cpuset() and hwloc_topology_get_allowed_nodeset(). + * They may be smaller than the root object cpuset and nodeset. + * + * When this flag is not set, all existing PUs and NUMA nodes in the topology + * are allowed. hwloc_topology_get_allowed_cpuset() and hwloc_topology_get_allowed_nodeset() + * are equal to the root object cpuset and nodeset. + * + * If the current topology is exported to XML and reimported later, this flag + * should be set again in the reimported topology so that disallowed resources + * are reimported as well. + * \hideinitializer + */ + HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM = (1UL<<0), + + /** \brief Assume that the selected backend provides the topology for the + * system on which we are running. + * + * This forces hwloc_topology_is_thissystem() to return 1, i.e. makes hwloc assume that + * the selected backend provides the topology for the system on which we are running, + * even if it is not the OS-specific backend but the XML backend for instance. + * This means making the binding functions actually call the OS-specific + * system calls and really do binding, while the XML backend would otherwise + * provide empty hooks just returning success. + * + * Setting the environment variable HWLOC_THISSYSTEM may also result in the + * same behavior. + * + * This can be used for efficiency reasons to first detect the topology once, + * save it to an XML file, and quickly reload it later through the XML + * backend, but still having binding functions actually do bind. + * \hideinitializer + */ + HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM = (1UL<<1), + + /** \brief Get the set of allowed resources from the local operating system even if the topology was loaded from XML or synthetic description. + * + * If the topology was loaded from XML or from a synthetic string, + * restrict it by applying the current process restrictions such as + * Linux Cgroup/Cpuset. + * + * This is useful when the topology is not loaded directly from + * the local machine (e.g. for performance reason) and it comes + * with all resources, while the running process is restricted + * to only parts of the machine. + * + * This flag is ignored unless ::HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM is + * also set since the loaded topology must match the underlying machine + * where restrictions will be gathered from. + * + * Setting the environment variable HWLOC_THISSYSTEM_ALLOWED_RESOURCES + * would result in the same behavior. + * \hideinitializer + */ + HWLOC_TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES = (1UL<<2) +}; + +/** \brief Set OR'ed flags to non-yet-loaded topology. + * + * Set a OR'ed set of ::hwloc_topology_flags_e onto a topology that was not yet loaded. + * + * If this function is called multiple times, the last invokation will erase + * and replace the set of flags that was previously set. + * + * The flags set in a topology may be retrieved with hwloc_topology_get_flags() + */ +HWLOC_DECLSPEC int hwloc_topology_set_flags (hwloc_topology_t topology, unsigned long flags); + +/** \brief Get OR'ed flags of a topology. + * + * Get the OR'ed set of ::hwloc_topology_flags_e of a topology. + * + * \return the flags previously set with hwloc_topology_set_flags(). + */ +HWLOC_DECLSPEC unsigned long hwloc_topology_get_flags (hwloc_topology_t topology); + +/** \brief Does the topology context come from this system? + * + * \return 1 if this topology context was built using the system + * running this program. + * \return 0 instead (for instance if using another file-system root, + * a XML topology file, or a synthetic topology). + */ +HWLOC_DECLSPEC int hwloc_topology_is_thissystem(hwloc_topology_t __hwloc_restrict topology) __hwloc_attribute_pure; + +/** \brief Flags describing actual discovery support for this topology. */ +struct hwloc_topology_discovery_support { + /** \brief Detecting the number of PU objects is supported. */ + unsigned char pu; + /** \brief Detecting the number of NUMA nodes is supported. */ + unsigned char numa; + /** \brief Detecting the amount of memory in NUMA nodes is supported. */ + unsigned char numa_memory; +}; + +/** \brief Flags describing actual PU binding support for this topology. + * + * A flag may be set even if the feature isn't supported in all cases + * (e.g. binding to random sets of non-contiguous objects). + */ +struct hwloc_topology_cpubind_support { + /** Binding the whole current process is supported. */ + unsigned char set_thisproc_cpubind; + /** Getting the binding of the whole current process is supported. */ + unsigned char get_thisproc_cpubind; + /** Binding a whole given process is supported. */ + unsigned char set_proc_cpubind; + /** Getting the binding of a whole given process is supported. */ + unsigned char get_proc_cpubind; + /** Binding the current thread only is supported. */ + unsigned char set_thisthread_cpubind; + /** Getting the binding of the current thread only is supported. */ + unsigned char get_thisthread_cpubind; + /** Binding a given thread only is supported. */ + unsigned char set_thread_cpubind; + /** Getting the binding of a given thread only is supported. */ + unsigned char get_thread_cpubind; + /** Getting the last processors where the whole current process ran is supported */ + unsigned char get_thisproc_last_cpu_location; + /** Getting the last processors where a whole process ran is supported */ + unsigned char get_proc_last_cpu_location; + /** Getting the last processors where the current thread ran is supported */ + unsigned char get_thisthread_last_cpu_location; +}; + +/** \brief Flags describing actual memory binding support for this topology. + * + * A flag may be set even if the feature isn't supported in all cases + * (e.g. binding to random sets of non-contiguous objects). + */ +struct hwloc_topology_membind_support { + /** Binding the whole current process is supported. */ + unsigned char set_thisproc_membind; + /** Getting the binding of the whole current process is supported. */ + unsigned char get_thisproc_membind; + /** Binding a whole given process is supported. */ + unsigned char set_proc_membind; + /** Getting the binding of a whole given process is supported. */ + unsigned char get_proc_membind; + /** Binding the current thread only is supported. */ + unsigned char set_thisthread_membind; + /** Getting the binding of the current thread only is supported. */ + unsigned char get_thisthread_membind; + /** Binding a given memory area is supported. */ + unsigned char set_area_membind; + /** Getting the binding of a given memory area is supported. */ + unsigned char get_area_membind; + /** Allocating a bound memory area is supported. */ + unsigned char alloc_membind; + /** First-touch policy is supported. */ + unsigned char firsttouch_membind; + /** Bind policy is supported. */ + unsigned char bind_membind; + /** Interleave policy is supported. */ + unsigned char interleave_membind; + /** Next-touch migration policy is supported. */ + unsigned char nexttouch_membind; + /** Migration flags is supported. */ + unsigned char migrate_membind; + /** Getting the last NUMA nodes where a memory area was allocated is supported */ + unsigned char get_area_memlocation; +}; + +/** \brief Set of flags describing actual support for this topology. + * + * This is retrieved with hwloc_topology_get_support() and will be valid until + * the topology object is destroyed. Note: the values are correct only after + * discovery. + */ +struct hwloc_topology_support { + struct hwloc_topology_discovery_support *discovery; + struct hwloc_topology_cpubind_support *cpubind; + struct hwloc_topology_membind_support *membind; +}; + +/** \brief Retrieve the topology support. + * + * Each flag indicates whether a feature is supported. + * If set to 0, the feature is not supported. + * If set to 1, the feature is supported, but the corresponding + * call may still fail in some corner cases. + * + * These features are also listed by hwloc-info \--support + */ +HWLOC_DECLSPEC const struct hwloc_topology_support *hwloc_topology_get_support(hwloc_topology_t __hwloc_restrict topology); + +/** \brief Type filtering flags. + * + * By default, most objects are kept (::HWLOC_TYPE_FILTER_KEEP_ALL). + * Instruction caches, I/O and Misc objects are ignored by default (::HWLOC_TYPE_FILTER_KEEP_NONE). + * Group levels are ignored unless they bring structure (::HWLOC_TYPE_FILTER_KEEP_STRUCTURE). + * + * Note that group objects are also ignored individually (without the entire level) + * when they do not bring structure. + */ +enum hwloc_type_filter_e { + /** \brief Keep all objects of this type. + * + * Cannot be set for ::HWLOC_OBJ_GROUP (groups are designed only to add more structure to the topology). + * \hideinitializer + */ + HWLOC_TYPE_FILTER_KEEP_ALL = 0, + + /** \brief Ignore all objects of this type. + * + * The bottom-level type ::HWLOC_OBJ_PU, the ::HWLOC_OBJ_NUMANODE type, and + * the top-level type ::HWLOC_OBJ_MACHINE may not be ignored. + * \hideinitializer + */ + HWLOC_TYPE_FILTER_KEEP_NONE = 1, + + /** \brief Only ignore objects if their entire level does not bring any structure. + * + * Keep the entire level of objects if at least one of these objects adds + * structure to the topology. An object brings structure when it has multiple + * children and it is not the only child of its parent. + * + * If all objects in the level are the only child of their parent, and if none + * of them has multiple children, the entire level is removed. + * + * Cannot be set for I/O and Misc objects since the topology structure does not matter there. + * \hideinitializer + */ + HWLOC_TYPE_FILTER_KEEP_STRUCTURE = 2, + + /** \brief Only keep likely-important objects of the given type. + * + * It is only useful for I/O object types. + * For ::HWLOC_OBJ_PCI_DEVICE and ::HWLOC_OBJ_OS_DEVICE, it means that only objects + * of major/common kinds are kept (storage, network, OpenFabrics, Intel MICs, CUDA, + * OpenCL, NVML, and displays). + * Also, only OS devices directly attached on PCI (e.g. no USB) are reported. + * For ::HWLOC_OBJ_BRIDGE, it means that bridges are kept only if they have children. + * + * This flag equivalent to ::HWLOC_TYPE_FILTER_KEEP_ALL for Normal, Memory and Misc types + * since they are likely important. + * \hideinitializer + */ + HWLOC_TYPE_FILTER_KEEP_IMPORTANT = 3 +}; + +/** \brief Set the filtering for the given object type. + */ +HWLOC_DECLSPEC int hwloc_topology_set_type_filter(hwloc_topology_t topology, hwloc_obj_type_t type, enum hwloc_type_filter_e filter); + +/** \brief Get the current filtering for the given object type. + */ +HWLOC_DECLSPEC int hwloc_topology_get_type_filter(hwloc_topology_t topology, hwloc_obj_type_t type, enum hwloc_type_filter_e *filter); + +/** \brief Set the filtering for all object types. + * + * If some types do not support this filtering, they are silently ignored. + */ +HWLOC_DECLSPEC int hwloc_topology_set_all_types_filter(hwloc_topology_t topology, enum hwloc_type_filter_e filter); + +/** \brief Set the filtering for all cache object types. + */ +HWLOC_DECLSPEC int hwloc_topology_set_cache_types_filter(hwloc_topology_t topology, enum hwloc_type_filter_e filter); + +/** \brief Set the filtering for all instruction cache object types. + */ +HWLOC_DECLSPEC int hwloc_topology_set_icache_types_filter(hwloc_topology_t topology, enum hwloc_type_filter_e filter); + +/** \brief Set the filtering for all I/O object types. + */ +HWLOC_DECLSPEC int hwloc_topology_set_io_types_filter(hwloc_topology_t topology, enum hwloc_type_filter_e filter); + +/** \brief Set the topology-specific userdata pointer. + * + * Each topology may store one application-given private data pointer. + * It is initialized to \c NULL. + * hwloc will never modify it. + * + * Use it as you wish, after hwloc_topology_init() and until hwloc_topolog_destroy(). + * + * This pointer is not exported to XML. + */ +HWLOC_DECLSPEC void hwloc_topology_set_userdata(hwloc_topology_t topology, const void *userdata); + +/** \brief Retrieve the topology-specific userdata pointer. + * + * Retrieve the application-given private data pointer that was + * previously set with hwloc_topology_set_userdata(). + */ +HWLOC_DECLSPEC void * hwloc_topology_get_userdata(hwloc_topology_t topology); + +/** @} */ + + + +/** \defgroup hwlocality_tinker Modifying a loaded Topology + * @{ + */ + +/** \brief Flags to be given to hwloc_topology_restrict(). */ +enum hwloc_restrict_flags_e { + /** \brief Remove all objects that became CPU-less. + * By default, only objects that contain no PU and no memory are removed. + * \hideinitializer + */ + HWLOC_RESTRICT_FLAG_REMOVE_CPULESS = (1UL<<0), + + /** \brief Move Misc objects to ancestors if their parents are removed during restriction. + * If this flag is not set, Misc objects are removed when their parents are removed. + * \hideinitializer + */ + HWLOC_RESTRICT_FLAG_ADAPT_MISC = (1UL<<1), + + /** \brief Move I/O objects to ancestors if their parents are removed during restriction. + * If this flag is not set, I/O devices and bridges are removed when their parents are removed. + * \hideinitializer + */ + HWLOC_RESTRICT_FLAG_ADAPT_IO = (1UL<<2) +}; + +/** \brief Restrict the topology to the given CPU set. + * + * Topology \p topology is modified so as to remove all objects that + * are not included (or partially included) in the CPU set \p cpuset. + * All objects CPU and node sets are restricted accordingly. + * + * \p flags is a OR'ed set of ::hwloc_restrict_flags_e. + * + * \note This call may not be reverted by restricting back to a larger + * cpuset. Once dropped during restriction, objects may not be brought + * back, except by loading another topology with hwloc_topology_load(). + * + * \return 0 on success. + * + * \return -1 with errno set to EINVAL if the input cpuset is invalid. + * The topology is not modified in this case. + * + * \return -1 with errno set to ENOMEM on failure to allocate internal data. + * The topology is reinitialized in this case. It should be either + * destroyed with hwloc_topology_destroy() or configured and loaded again. + */ +HWLOC_DECLSPEC int hwloc_topology_restrict(hwloc_topology_t __hwloc_restrict topology, hwloc_const_cpuset_t cpuset, unsigned long flags); + +/** \brief Add a MISC object as a leaf of the topology + * + * A new MISC object will be created and inserted into the topology at the + * position given by parent. It is appended to the list of existing Misc children, + * without ever adding any intermediate hierarchy level. This is useful for + * annotating the topology without actually changing the hierarchy. + * + * \p name is supposed to be unique across all Misc objects in the topology. + * It will be duplicated to setup the new object attributes. + * + * The new leaf object will not have any \p cpuset. + * + * \return the newly-created object + * + * \return \c NULL on error. + * + * \return \c NULL if Misc objects are filtered-out of the topology (::HWLOC_TYPE_FILTER_KEEP_NONE). + * + * \note If \p name contains some non-printable characters, they will + * be dropped when exporting to XML, see hwloc_topology_export_xml() in hwloc/export.h. + */ +HWLOC_DECLSPEC hwloc_obj_t hwloc_topology_insert_misc_object(hwloc_topology_t topology, hwloc_obj_t parent, const char *name); + +/** \brief Allocate a Group object to insert later with hwloc_topology_insert_group_object(). + * + * This function returns a new Group object. + * The caller should (at least) initialize its sets before inserting the object. + * See hwloc_topology_insert_group_object(). + * + * The \p subtype object attribute may be set to display something else + * than "Group" as the type name for this object in lstopo. + * Custom name/value info pairs may be added with hwloc_obj_add_info() after + * insertion. + * + * The \p kind group attribute should be 0. The \p subkind group attribute may + * be set to identify multiple Groups of the same level. + * + * It is recommended not to set any other object attribute before insertion, + * since the Group may get discarded during insertion. + * + * The object will be destroyed if passed to hwloc_topology_insert_group_object() + * without any set defined. + */ +HWLOC_DECLSPEC hwloc_obj_t hwloc_topology_alloc_group_object(hwloc_topology_t topology); + +/** \brief Add more structure to the topology by adding an intermediate Group + * + * The caller should first allocate a new Group object with hwloc_topology_alloc_group_object(). + * Then it must setup at least one of its CPU or node sets to specify + * the final location of the Group in the topology. + * Then the object can be passed to this function for actual insertion in the topology. + * + * The group \p dont_merge attribute may be set to prevent the core from + * ever merging this object with another object hierarchically-identical. + * + * Either the cpuset or nodeset field (or both, if compatible) must be set + * to a non-empty bitmap. The complete_cpuset or complete_nodeset may be set + * instead if inserting with respect to the complete topology + * (including disallowed, offline or unknown objects). + * + * It grouping several objects, hwloc_obj_add_other_obj_sets() is an easy way + * to build the Group sets iteratively. + * + * These sets cannot be larger than the current topology, or they would get + * restricted silently. + * + * The core will setup the other sets after actual insertion. + * + * \return The inserted object if it was properly inserted. + * + * \return An existing object if the Group was discarded because the topology already + * contained an object at the same location (the Group did not add any locality information). + * Any name/info key pair set before inserting is appended to the existing object. + * + * \return \c NULL if the insertion failed because of conflicting sets in topology tree. + * + * \return \c NULL if Group objects are filtered-out of the topology (::HWLOC_TYPE_FILTER_KEEP_NONE). + * + * \return \c NULL if the object was discarded because no set was initialized in the Group + * before insert, or all of them were empty. + */ +HWLOC_DECLSPEC hwloc_obj_t hwloc_topology_insert_group_object(hwloc_topology_t topology, hwloc_obj_t group); + +/** \brief Setup object cpusets/nodesets by OR'ing another object's sets. + * + * For each defined cpuset or nodeset in \p src, allocate the corresponding set + * in \p dst and add \p src to it by OR'ing sets. + * + * This function is convenient between hwloc_topology_alloc_group_object() + * and hwloc_topology_insert_group_object(). It builds the sets of the new Group + * that will be inserted as a new intermediate parent of several objects. + */ +HWLOC_DECLSPEC int hwloc_obj_add_other_obj_sets(hwloc_obj_t dst, hwloc_obj_t src); + +/** @} */ + + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +/* high-level helpers */ +#include + +/* inline code of some functions above */ +#include + +/* exporting to XML or synthetic */ +#include + +/* distances */ +#include + +/* topology diffs */ +#include + +/* deprecated headers */ +#include + +#endif /* HWLOC_H */ diff --git a/src/3rdparty/hwloc/include/hwloc/autogen/config.h b/src/3rdparty/hwloc/include/hwloc/autogen/config.h new file mode 100644 index 00000000..14d4481d --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/autogen/config.h @@ -0,0 +1,59 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2018 Inria. All rights reserved. + * Copyright © 2009-2012 Université Bordeaux + * Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +/* The configuration file */ + +#ifndef HWLOC_CONFIG_H +#define HWLOC_CONFIG_H + +#define HWLOC_VERSION "2.0.4" +#define HWLOC_VERSION_MAJOR 2 +#define HWLOC_VERSION_MINOR 0 +#define HWLOC_VERSION_RELEASE 4 +#define HWLOC_VERSION_GREEK "" + +#define __hwloc_restrict +#define __hwloc_inline __inline + +#define __hwloc_attribute_unused +#define __hwloc_attribute_malloc +#define __hwloc_attribute_const +#define __hwloc_attribute_pure +#define __hwloc_attribute_deprecated +#define __hwloc_attribute_may_alias +#define __hwloc_attribute_warn_unused_result + +/* Defined to 1 if you have the `windows.h' header. */ +#define HWLOC_HAVE_WINDOWS_H 1 +#define hwloc_pid_t HANDLE +#define hwloc_thread_t HANDLE + +#include +#include +typedef DWORDLONG hwloc_uint64_t; + +#if defined( _USRDLL ) /* dynamic linkage */ +#if defined( DECLSPEC_EXPORTS ) +#define HWLOC_DECLSPEC __declspec(dllexport) +#else +#define HWLOC_DECLSPEC __declspec(dllimport) +#endif +#else /* static linkage */ +#define HWLOC_DECLSPEC +#endif + +/* Whether we need to re-define all the hwloc public symbols or not */ +#define HWLOC_SYM_TRANSFORM 0 + +/* The hwloc symbol prefix */ +#define HWLOC_SYM_PREFIX hwloc_ + +/* The hwloc symbol prefix in all caps */ +#define HWLOC_SYM_PREFIX_CAPS HWLOC_ + +#endif /* HWLOC_CONFIG_H */ diff --git a/src/3rdparty/hwloc/include/hwloc/bitmap.h b/src/3rdparty/hwloc/include/hwloc/bitmap.h new file mode 100644 index 00000000..bae623c8 --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/bitmap.h @@ -0,0 +1,467 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2018 Inria. All rights reserved. + * Copyright © 2009-2012 Université Bordeaux + * Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +/** \file + * \brief The bitmap API, for use in hwloc itself. + */ + +#ifndef HWLOC_BITMAP_H +#define HWLOC_BITMAP_H + +#include +#include + + +#ifdef __cplusplus +extern "C" { +#endif + + +/** \defgroup hwlocality_bitmap The bitmap API + * + * The ::hwloc_bitmap_t type represents a set of integers (positive or null). + * A bitmap may be of infinite size (all bits are set after some point). + * A bitmap may even be full if all bits are set. + * + * Bitmaps are used by hwloc for sets of OS processors + * (which may actually be hardware threads) as by ::hwloc_cpuset_t + * (a typedef for ::hwloc_bitmap_t), or sets of NUMA memory nodes + * as ::hwloc_nodeset_t (also a typedef for ::hwloc_bitmap_t). + * Those are used for cpuset and nodeset fields in the ::hwloc_obj structure, + * see \ref hwlocality_object_sets. + * + * Both CPU and node sets are always indexed by OS physical number. + * However users should usually not build CPU and node sets manually + * (e.g. with hwloc_bitmap_set()). + * One should rather use existing object sets and combine them with + * hwloc_bitmap_or(), etc. + * For instance, binding the current thread on a pair of cores may be performed with: + * \code + * hwloc_obj_t core1 = ... , core2 = ... ; + * hwloc_bitmap_t set = hwloc_bitmap_alloc(); + * hwloc_bitmap_or(set, core1->cpuset, core2->cpuset); + * hwloc_set_cpubind(topology, set, HWLOC_CPUBIND_THREAD); + * hwloc_bitmap_free(set); + * \endcode + * + * \note Most functions below return an int that may be negative in case of + * error. The usual error case would be an internal failure to realloc/extend + * the storage of the bitmap (\p errno would be set to \c ENOMEM). + * + * \note Several examples of using the bitmap API are available under the + * doc/examples/ directory in the source tree. + * Regression tests such as tests/hwloc/hwloc_bitmap*.c also make intensive use + * of this API. + * @{ + */ + + +/** \brief + * Set of bits represented as an opaque pointer to an internal bitmap. + */ +typedef struct hwloc_bitmap_s * hwloc_bitmap_t; +/** \brief a non-modifiable ::hwloc_bitmap_t */ +typedef const struct hwloc_bitmap_s * hwloc_const_bitmap_t; + + +/* + * Bitmap allocation, freeing and copying. + */ + +/** \brief Allocate a new empty bitmap. + * + * \returns A valid bitmap or \c NULL. + * + * The bitmap should be freed by a corresponding call to + * hwloc_bitmap_free(). + */ +HWLOC_DECLSPEC hwloc_bitmap_t hwloc_bitmap_alloc(void) __hwloc_attribute_malloc; + +/** \brief Allocate a new full bitmap. */ +HWLOC_DECLSPEC hwloc_bitmap_t hwloc_bitmap_alloc_full(void) __hwloc_attribute_malloc; + +/** \brief Free bitmap \p bitmap. + * + * If \p bitmap is \c NULL, no operation is performed. + */ +HWLOC_DECLSPEC void hwloc_bitmap_free(hwloc_bitmap_t bitmap); + +/** \brief Duplicate bitmap \p bitmap by allocating a new bitmap and copying \p bitmap contents. + * + * If \p bitmap is \c NULL, \c NULL is returned. + */ +HWLOC_DECLSPEC hwloc_bitmap_t hwloc_bitmap_dup(hwloc_const_bitmap_t bitmap) __hwloc_attribute_malloc; + +/** \brief Copy the contents of bitmap \p src into the already allocated bitmap \p dst */ +HWLOC_DECLSPEC int hwloc_bitmap_copy(hwloc_bitmap_t dst, hwloc_const_bitmap_t src); + + +/* + * Bitmap/String Conversion + */ + +/** \brief Stringify a bitmap. + * + * Up to \p buflen characters may be written in buffer \p buf. + * + * If \p buflen is 0, \p buf may safely be \c NULL. + * + * \return the number of character that were actually written if not truncating, + * or that would have been written (not including the ending \\0). + */ +HWLOC_DECLSPEC int hwloc_bitmap_snprintf(char * __hwloc_restrict buf, size_t buflen, hwloc_const_bitmap_t bitmap); + +/** \brief Stringify a bitmap into a newly allocated string. + * + * \return -1 on error. + */ +HWLOC_DECLSPEC int hwloc_bitmap_asprintf(char ** strp, hwloc_const_bitmap_t bitmap); + +/** \brief Parse a bitmap string and stores it in bitmap \p bitmap. + */ +HWLOC_DECLSPEC int hwloc_bitmap_sscanf(hwloc_bitmap_t bitmap, const char * __hwloc_restrict string); + +/** \brief Stringify a bitmap in the list format. + * + * Lists are comma-separated indexes or ranges. + * Ranges are dash separated indexes. + * The last range may not have an ending indexes if the bitmap is infinitely set. + * + * Up to \p buflen characters may be written in buffer \p buf. + * + * If \p buflen is 0, \p buf may safely be \c NULL. + * + * \return the number of character that were actually written if not truncating, + * or that would have been written (not including the ending \\0). + */ +HWLOC_DECLSPEC int hwloc_bitmap_list_snprintf(char * __hwloc_restrict buf, size_t buflen, hwloc_const_bitmap_t bitmap); + +/** \brief Stringify a bitmap into a newly allocated list string. + * + * \return -1 on error. + */ +HWLOC_DECLSPEC int hwloc_bitmap_list_asprintf(char ** strp, hwloc_const_bitmap_t bitmap); + +/** \brief Parse a list string and stores it in bitmap \p bitmap. + */ +HWLOC_DECLSPEC int hwloc_bitmap_list_sscanf(hwloc_bitmap_t bitmap, const char * __hwloc_restrict string); + +/** \brief Stringify a bitmap in the taskset-specific format. + * + * The taskset command manipulates bitmap strings that contain a single + * (possible very long) hexadecimal number starting with 0x. + * + * Up to \p buflen characters may be written in buffer \p buf. + * + * If \p buflen is 0, \p buf may safely be \c NULL. + * + * \return the number of character that were actually written if not truncating, + * or that would have been written (not including the ending \\0). + */ +HWLOC_DECLSPEC int hwloc_bitmap_taskset_snprintf(char * __hwloc_restrict buf, size_t buflen, hwloc_const_bitmap_t bitmap); + +/** \brief Stringify a bitmap into a newly allocated taskset-specific string. + * + * \return -1 on error. + */ +HWLOC_DECLSPEC int hwloc_bitmap_taskset_asprintf(char ** strp, hwloc_const_bitmap_t bitmap); + +/** \brief Parse a taskset-specific bitmap string and stores it in bitmap \p bitmap. + */ +HWLOC_DECLSPEC int hwloc_bitmap_taskset_sscanf(hwloc_bitmap_t bitmap, const char * __hwloc_restrict string); + + +/* + * Building bitmaps. + */ + +/** \brief Empty the bitmap \p bitmap */ +HWLOC_DECLSPEC void hwloc_bitmap_zero(hwloc_bitmap_t bitmap); + +/** \brief Fill bitmap \p bitmap with all possible indexes (even if those objects don't exist or are otherwise unavailable) */ +HWLOC_DECLSPEC void hwloc_bitmap_fill(hwloc_bitmap_t bitmap); + +/** \brief Empty the bitmap \p bitmap and add bit \p id */ +HWLOC_DECLSPEC int hwloc_bitmap_only(hwloc_bitmap_t bitmap, unsigned id); + +/** \brief Fill the bitmap \p and clear the index \p id */ +HWLOC_DECLSPEC int hwloc_bitmap_allbut(hwloc_bitmap_t bitmap, unsigned id); + +/** \brief Setup bitmap \p bitmap from unsigned long \p mask */ +HWLOC_DECLSPEC int hwloc_bitmap_from_ulong(hwloc_bitmap_t bitmap, unsigned long mask); + +/** \brief Setup bitmap \p bitmap from unsigned long \p mask used as \p i -th subset */ +HWLOC_DECLSPEC int hwloc_bitmap_from_ith_ulong(hwloc_bitmap_t bitmap, unsigned i, unsigned long mask); + + +/* + * Modifying bitmaps. + */ + +/** \brief Add index \p id in bitmap \p bitmap */ +HWLOC_DECLSPEC int hwloc_bitmap_set(hwloc_bitmap_t bitmap, unsigned id); + +/** \brief Add indexes from \p begin to \p end in bitmap \p bitmap. + * + * If \p end is \c -1, the range is infinite. + */ +HWLOC_DECLSPEC int hwloc_bitmap_set_range(hwloc_bitmap_t bitmap, unsigned begin, int end); + +/** \brief Replace \p i -th subset of bitmap \p bitmap with unsigned long \p mask */ +HWLOC_DECLSPEC int hwloc_bitmap_set_ith_ulong(hwloc_bitmap_t bitmap, unsigned i, unsigned long mask); + +/** \brief Remove index \p id from bitmap \p bitmap */ +HWLOC_DECLSPEC int hwloc_bitmap_clr(hwloc_bitmap_t bitmap, unsigned id); + +/** \brief Remove indexes from \p begin to \p end in bitmap \p bitmap. + * + * If \p end is \c -1, the range is infinite. + */ +HWLOC_DECLSPEC int hwloc_bitmap_clr_range(hwloc_bitmap_t bitmap, unsigned begin, int end); + +/** \brief Keep a single index among those set in bitmap \p bitmap + * + * May be useful before binding so that the process does not + * have a chance of migrating between multiple logical CPUs + * in the original mask. + * Instead of running the task on any PU inside the given CPU set, + * the operating system scheduler will be forced to run it on a single + * of these PUs. + * It avoids a migration overhead and cache-line ping-pongs between PUs. + * + * \note This function is NOT meant to distribute multiple processes + * within a single CPU set. It always return the same single bit when + * called multiple times on the same input set. hwloc_distrib() may + * be used for generating CPU sets to distribute multiple tasks below + * a single multi-PU object. + * + * \note This function cannot be applied to an object set directly. It + * should be applied to a copy (which may be obtained with hwloc_bitmap_dup()). + */ +HWLOC_DECLSPEC int hwloc_bitmap_singlify(hwloc_bitmap_t bitmap); + + +/* + * Consulting bitmaps. + */ + +/** \brief Convert the beginning part of bitmap \p bitmap into unsigned long \p mask */ +HWLOC_DECLSPEC unsigned long hwloc_bitmap_to_ulong(hwloc_const_bitmap_t bitmap) __hwloc_attribute_pure; + +/** \brief Convert the \p i -th subset of bitmap \p bitmap into unsigned long mask */ +HWLOC_DECLSPEC unsigned long hwloc_bitmap_to_ith_ulong(hwloc_const_bitmap_t bitmap, unsigned i) __hwloc_attribute_pure; + +/** \brief Test whether index \p id is part of bitmap \p bitmap. + * + * \return 1 if the bit at index \p id is set in bitmap \p bitmap, 0 otherwise. + */ +HWLOC_DECLSPEC int hwloc_bitmap_isset(hwloc_const_bitmap_t bitmap, unsigned id) __hwloc_attribute_pure; + +/** \brief Test whether bitmap \p bitmap is empty + * + * \return 1 if bitmap is empty, 0 otherwise. + */ +HWLOC_DECLSPEC int hwloc_bitmap_iszero(hwloc_const_bitmap_t bitmap) __hwloc_attribute_pure; + +/** \brief Test whether bitmap \p bitmap is completely full + * + * \return 1 if bitmap is full, 0 otherwise. + * + * \note A full bitmap is always infinitely set. + */ +HWLOC_DECLSPEC int hwloc_bitmap_isfull(hwloc_const_bitmap_t bitmap) __hwloc_attribute_pure; + +/** \brief Compute the first index (least significant bit) in bitmap \p bitmap + * + * \return -1 if no index is set in \p bitmap. + */ +HWLOC_DECLSPEC int hwloc_bitmap_first(hwloc_const_bitmap_t bitmap) __hwloc_attribute_pure; + +/** \brief Compute the next index in bitmap \p bitmap which is after index \p prev + * + * If \p prev is -1, the first index is returned. + * + * \return -1 if no index with higher index is set in \p bitmap. + */ +HWLOC_DECLSPEC int hwloc_bitmap_next(hwloc_const_bitmap_t bitmap, int prev) __hwloc_attribute_pure; + +/** \brief Compute the last index (most significant bit) in bitmap \p bitmap + * + * \return -1 if no index is set in \p bitmap, or if \p bitmap is infinitely set. + */ +HWLOC_DECLSPEC int hwloc_bitmap_last(hwloc_const_bitmap_t bitmap) __hwloc_attribute_pure; + +/** \brief Compute the "weight" of bitmap \p bitmap (i.e., number of + * indexes that are in the bitmap). + * + * \return the number of indexes that are in the bitmap. + * + * \return -1 if \p bitmap is infinitely set. + */ +HWLOC_DECLSPEC int hwloc_bitmap_weight(hwloc_const_bitmap_t bitmap) __hwloc_attribute_pure; + +/** \brief Compute the first unset index (least significant bit) in bitmap \p bitmap + * + * \return -1 if no index is unset in \p bitmap. + */ +HWLOC_DECLSPEC int hwloc_bitmap_first_unset(hwloc_const_bitmap_t bitmap) __hwloc_attribute_pure; + +/** \brief Compute the next unset index in bitmap \p bitmap which is after index \p prev + * + * If \p prev is -1, the first unset index is returned. + * + * \return -1 if no index with higher index is unset in \p bitmap. + */ +HWLOC_DECLSPEC int hwloc_bitmap_next_unset(hwloc_const_bitmap_t bitmap, int prev) __hwloc_attribute_pure; + +/** \brief Compute the last unset index (most significant bit) in bitmap \p bitmap + * + * \return -1 if no index is unset in \p bitmap, or if \p bitmap is infinitely set. + */ +HWLOC_DECLSPEC int hwloc_bitmap_last_unset(hwloc_const_bitmap_t bitmap) __hwloc_attribute_pure; + +/** \brief Loop macro iterating on bitmap \p bitmap + * + * The loop must start with hwloc_bitmap_foreach_begin() and end + * with hwloc_bitmap_foreach_end() followed by a terminating ';'. + * + * \p index is the loop variable; it should be an unsigned int. The + * first iteration will set \p index to the lowest index in the bitmap. + * Successive iterations will iterate through, in order, all remaining + * indexes set in the bitmap. To be specific: each iteration will return a + * value for \p index such that hwloc_bitmap_isset(bitmap, index) is true. + * + * The assert prevents the loop from being infinite if the bitmap is infinitely set. + * + * \hideinitializer + */ +#define hwloc_bitmap_foreach_begin(id, bitmap) \ +do { \ + assert(hwloc_bitmap_weight(bitmap) != -1); \ + for (id = hwloc_bitmap_first(bitmap); \ + (unsigned) id != (unsigned) -1; \ + id = hwloc_bitmap_next(bitmap, id)) { + +/** \brief End of loop macro iterating on a bitmap. + * + * Needs a terminating ';'. + * + * \sa hwloc_bitmap_foreach_begin() + * \hideinitializer + */ +#define hwloc_bitmap_foreach_end() \ + } \ +} while (0) + + +/* + * Combining bitmaps. + */ + +/** \brief Or bitmaps \p bitmap1 and \p bitmap2 and store the result in bitmap \p res + * + * \p res can be the same as \p bitmap1 or \p bitmap2 + */ +HWLOC_DECLSPEC int hwloc_bitmap_or (hwloc_bitmap_t res, hwloc_const_bitmap_t bitmap1, hwloc_const_bitmap_t bitmap2); + +/** \brief And bitmaps \p bitmap1 and \p bitmap2 and store the result in bitmap \p res + * + * \p res can be the same as \p bitmap1 or \p bitmap2 + */ +HWLOC_DECLSPEC int hwloc_bitmap_and (hwloc_bitmap_t res, hwloc_const_bitmap_t bitmap1, hwloc_const_bitmap_t bitmap2); + +/** \brief And bitmap \p bitmap1 and the negation of \p bitmap2 and store the result in bitmap \p res + * + * \p res can be the same as \p bitmap1 or \p bitmap2 + */ +HWLOC_DECLSPEC int hwloc_bitmap_andnot (hwloc_bitmap_t res, hwloc_const_bitmap_t bitmap1, hwloc_const_bitmap_t bitmap2); + +/** \brief Xor bitmaps \p bitmap1 and \p bitmap2 and store the result in bitmap \p res + * + * \p res can be the same as \p bitmap1 or \p bitmap2 + */ +HWLOC_DECLSPEC int hwloc_bitmap_xor (hwloc_bitmap_t res, hwloc_const_bitmap_t bitmap1, hwloc_const_bitmap_t bitmap2); + +/** \brief Negate bitmap \p bitmap and store the result in bitmap \p res + * + * \p res can be the same as \p bitmap + */ +HWLOC_DECLSPEC int hwloc_bitmap_not (hwloc_bitmap_t res, hwloc_const_bitmap_t bitmap); + + +/* + * Comparing bitmaps. + */ + +/** \brief Test whether bitmaps \p bitmap1 and \p bitmap2 intersects. + * + * \return 1 if bitmaps intersect, 0 otherwise. + */ +HWLOC_DECLSPEC int hwloc_bitmap_intersects (hwloc_const_bitmap_t bitmap1, hwloc_const_bitmap_t bitmap2) __hwloc_attribute_pure; + +/** \brief Test whether bitmap \p sub_bitmap is part of bitmap \p super_bitmap. + * + * \return 1 if \p sub_bitmap is included in \p super_bitmap, 0 otherwise. + * + * \note The empty bitmap is considered included in any other bitmap. + */ +HWLOC_DECLSPEC int hwloc_bitmap_isincluded (hwloc_const_bitmap_t sub_bitmap, hwloc_const_bitmap_t super_bitmap) __hwloc_attribute_pure; + +/** \brief Test whether bitmap \p bitmap1 is equal to bitmap \p bitmap2. + * + * \return 1 if bitmaps are equal, 0 otherwise. + */ +HWLOC_DECLSPEC int hwloc_bitmap_isequal (hwloc_const_bitmap_t bitmap1, hwloc_const_bitmap_t bitmap2) __hwloc_attribute_pure; + +/** \brief Compare bitmaps \p bitmap1 and \p bitmap2 using their lowest index. + * + * A bitmap is considered smaller if its least significant bit is smaller. + * The empty bitmap is considered higher than anything (because its least significant bit does not exist). + * + * \return -1 if \p bitmap1 is considered smaller than \p bitmap2. + * \return 1 if \p bitmap1 is considered larger than \p bitmap2. + * + * For instance comparing binary bitmaps 0011 and 0110 returns -1 + * (hence 0011 is considered smaller than 0110) + * because least significant bit of 0011 (0001) is smaller than least significant bit of 0110 (0010). + * Comparing 01001 and 00110 would also return -1 for the same reason. + * + * \return 0 if bitmaps are considered equal, even if they are not strictly equal. + * They just need to have the same least significant bit. + * For instance, comparing binary bitmaps 0010 and 0110 returns 0 because they have the same least significant bit. + */ +HWLOC_DECLSPEC int hwloc_bitmap_compare_first(hwloc_const_bitmap_t bitmap1, hwloc_const_bitmap_t bitmap2) __hwloc_attribute_pure; + +/** \brief Compare bitmaps \p bitmap1 and \p bitmap2 in lexicographic order. + * + * Lexicographic comparison of bitmaps, starting for their highest indexes. + * Compare last indexes first, then second, etc. + * The empty bitmap is considered lower than anything. + * + * \return -1 if \p bitmap1 is considered smaller than \p bitmap2. + * \return 1 if \p bitmap1 is considered larger than \p bitmap2. + * \return 0 if bitmaps are equal (contrary to hwloc_bitmap_compare_first()). + * + * For instance comparing binary bitmaps 0011 and 0110 returns -1 + * (hence 0011 is considered smaller than 0110). + * Comparing 00101 and 01010 returns -1 too. + * + * \note This is different from the non-existing hwloc_bitmap_compare_last() + * which would only compare the highest index of each bitmap. + */ +HWLOC_DECLSPEC int hwloc_bitmap_compare(hwloc_const_bitmap_t bitmap1, hwloc_const_bitmap_t bitmap2) __hwloc_attribute_pure; + +/** @} */ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* HWLOC_BITMAP_H */ diff --git a/src/3rdparty/hwloc/include/hwloc/cuda.h b/src/3rdparty/hwloc/include/hwloc/cuda.h new file mode 100644 index 00000000..77c8473e --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/cuda.h @@ -0,0 +1,220 @@ +/* + * Copyright © 2010-2017 Inria. All rights reserved. + * Copyright © 2010-2011 Université Bordeaux + * Copyright © 2011 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +/** \file + * \brief Macros to help interaction between hwloc and the CUDA Driver API. + * + * Applications that use both hwloc and the CUDA Driver API may want to + * include this file so as to get topology information for CUDA devices. + * + */ + +#ifndef HWLOC_CUDA_H +#define HWLOC_CUDA_H + +#include +#include +#include +#ifdef HWLOC_LINUX_SYS +#include +#endif + +#include + + +#ifdef __cplusplus +extern "C" { +#endif + + +/** \defgroup hwlocality_cuda Interoperability with the CUDA Driver API + * + * This interface offers ways to retrieve topology information about + * CUDA devices when using the CUDA Driver API. + * + * @{ + */ + +/** \brief Return the domain, bus and device IDs of the CUDA device \p cudevice. + * + * Device \p cudevice must match the local machine. + */ +static __hwloc_inline int +hwloc_cuda_get_device_pci_ids(hwloc_topology_t topology __hwloc_attribute_unused, + CUdevice cudevice, int *domain, int *bus, int *dev) +{ + CUresult cres; + +#if CUDA_VERSION >= 4000 + cres = cuDeviceGetAttribute(domain, CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID, cudevice); + if (cres != CUDA_SUCCESS) { + errno = ENOSYS; + return -1; + } +#else + *domain = 0; +#endif + cres = cuDeviceGetAttribute(bus, CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, cudevice); + if (cres != CUDA_SUCCESS) { + errno = ENOSYS; + return -1; + } + cres = cuDeviceGetAttribute(dev, CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID, cudevice); + if (cres != CUDA_SUCCESS) { + errno = ENOSYS; + return -1; + } + + return 0; +} + +/** \brief Get the CPU set of logical processors that are physically + * close to device \p cudevice. + * + * Return the CPU set describing the locality of the CUDA device \p cudevice. + * + * Topology \p topology and device \p cudevice must match the local machine. + * I/O devices detection and the CUDA component are not needed in the topology. + * + * The function only returns the locality of the device. + * If more information about the device is needed, OS objects should + * be used instead, see hwloc_cuda_get_device_osdev() + * and hwloc_cuda_get_device_osdev_by_index(). + * + * This function is currently only implemented in a meaningful way for + * Linux; other systems will simply get a full cpuset. + */ +static __hwloc_inline int +hwloc_cuda_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused, + CUdevice cudevice, hwloc_cpuset_t set) +{ +#ifdef HWLOC_LINUX_SYS + /* If we're on Linux, use the sysfs mechanism to get the local cpus */ +#define HWLOC_CUDA_DEVICE_SYSFS_PATH_MAX 128 + char path[HWLOC_CUDA_DEVICE_SYSFS_PATH_MAX]; + int domainid, busid, deviceid; + + if (hwloc_cuda_get_device_pci_ids(topology, cudevice, &domainid, &busid, &deviceid)) + return -1; + + if (!hwloc_topology_is_thissystem(topology)) { + errno = EINVAL; + return -1; + } + + sprintf(path, "/sys/bus/pci/devices/%04x:%02x:%02x.0/local_cpus", domainid, busid, deviceid); + if (hwloc_linux_read_path_as_cpumask(path, set) < 0 + || hwloc_bitmap_iszero(set)) + hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology)); +#else + /* Non-Linux systems simply get a full cpuset */ + hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology)); +#endif + return 0; +} + +/** \brief Get the hwloc PCI device object corresponding to the + * CUDA device \p cudevice. + * + * Return the PCI device object describing the CUDA device \p cudevice. + * Return NULL if there is none. + * + * Topology \p topology and device \p cudevice must match the local machine. + * I/O devices detection must be enabled in topology \p topology. + * The CUDA component is not needed in the topology. + */ +static __hwloc_inline hwloc_obj_t +hwloc_cuda_get_device_pcidev(hwloc_topology_t topology, CUdevice cudevice) +{ + int domain, bus, dev; + + if (hwloc_cuda_get_device_pci_ids(topology, cudevice, &domain, &bus, &dev)) + return NULL; + + return hwloc_get_pcidev_by_busid(topology, domain, bus, dev, 0); +} + +/** \brief Get the hwloc OS device object corresponding to CUDA device \p cudevice. + * + * Return the hwloc OS device object that describes the given + * CUDA device \p cudevice. Return NULL if there is none. + * + * Topology \p topology and device \p cudevice must match the local machine. + * I/O devices detection and the CUDA component must be enabled in the topology. + * If not, the locality of the object may still be found using + * hwloc_cuda_get_device_cpuset(). + * + * \note This function cannot work if PCI devices are filtered out. + * + * \note The corresponding hwloc PCI device may be found by looking + * at the result parent pointer (unless PCI devices are filtered out). + */ +static __hwloc_inline hwloc_obj_t +hwloc_cuda_get_device_osdev(hwloc_topology_t topology, CUdevice cudevice) +{ + hwloc_obj_t osdev = NULL; + int domain, bus, dev; + + if (hwloc_cuda_get_device_pci_ids(topology, cudevice, &domain, &bus, &dev)) + return NULL; + + osdev = NULL; + while ((osdev = hwloc_get_next_osdev(topology, osdev)) != NULL) { + hwloc_obj_t pcidev = osdev->parent; + if (strncmp(osdev->name, "cuda", 4)) + continue; + if (pcidev + && pcidev->type == HWLOC_OBJ_PCI_DEVICE + && (int) pcidev->attr->pcidev.domain == domain + && (int) pcidev->attr->pcidev.bus == bus + && (int) pcidev->attr->pcidev.dev == dev + && pcidev->attr->pcidev.func == 0) + return osdev; + /* if PCI are filtered out, we need a info attr to match on */ + } + + return NULL; +} + +/** \brief Get the hwloc OS device object corresponding to the + * CUDA device whose index is \p idx. + * + * Return the OS device object describing the CUDA device whose + * index is \p idx. Return NULL if there is none. + * + * The topology \p topology does not necessarily have to match the current + * machine. For instance the topology may be an XML import of a remote host. + * I/O devices detection and the CUDA component must be enabled in the topology. + * + * \note The corresponding PCI device object can be obtained by looking + * at the OS device parent object (unless PCI devices are filtered out). + * + * \note This function is identical to hwloc_cudart_get_device_osdev_by_index(). + */ +static __hwloc_inline hwloc_obj_t +hwloc_cuda_get_device_osdev_by_index(hwloc_topology_t topology, unsigned idx) +{ + hwloc_obj_t osdev = NULL; + while ((osdev = hwloc_get_next_osdev(topology, osdev)) != NULL) { + if (HWLOC_OBJ_OSDEV_COPROC == osdev->attr->osdev.type + && osdev->name + && !strncmp("cuda", osdev->name, 4) + && atoi(osdev->name + 4) == (int) idx) + return osdev; + } + return NULL; +} + +/** @} */ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* HWLOC_CUDA_H */ diff --git a/src/3rdparty/hwloc/include/hwloc/cudart.h b/src/3rdparty/hwloc/include/hwloc/cudart.h new file mode 100644 index 00000000..63c7f59c --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/cudart.h @@ -0,0 +1,177 @@ +/* + * Copyright © 2010-2017 Inria. All rights reserved. + * Copyright © 2010-2011 Université Bordeaux + * Copyright © 2011 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +/** \file + * \brief Macros to help interaction between hwloc and the CUDA Runtime API. + * + * Applications that use both hwloc and the CUDA Runtime API may want to + * include this file so as to get topology information for CUDA devices. + * + */ + +#ifndef HWLOC_CUDART_H +#define HWLOC_CUDART_H + +#include +#include +#include +#ifdef HWLOC_LINUX_SYS +#include +#endif + +#include /* for CUDA_VERSION */ +#include + + +#ifdef __cplusplus +extern "C" { +#endif + + +/** \defgroup hwlocality_cudart Interoperability with the CUDA Runtime API + * + * This interface offers ways to retrieve topology information about + * CUDA devices when using the CUDA Runtime API. + * + * @{ + */ + +/** \brief Return the domain, bus and device IDs of the CUDA device whose index is \p idx. + * + * Device index \p idx must match the local machine. + */ +static __hwloc_inline int +hwloc_cudart_get_device_pci_ids(hwloc_topology_t topology __hwloc_attribute_unused, + int idx, int *domain, int *bus, int *dev) +{ + cudaError_t cerr; + struct cudaDeviceProp prop; + + cerr = cudaGetDeviceProperties(&prop, idx); + if (cerr) { + errno = ENOSYS; + return -1; + } + +#if CUDA_VERSION >= 4000 + *domain = prop.pciDomainID; +#else + *domain = 0; +#endif + + *bus = prop.pciBusID; + *dev = prop.pciDeviceID; + + return 0; +} + +/** \brief Get the CPU set of logical processors that are physically + * close to device \p idx. + * + * Return the CPU set describing the locality of the CUDA device + * whose index is \p idx. + * + * Topology \p topology and device \p idx must match the local machine. + * I/O devices detection and the CUDA component are not needed in the topology. + * + * The function only returns the locality of the device. + * If more information about the device is needed, OS objects should + * be used instead, see hwloc_cudart_get_device_osdev_by_index(). + * + * This function is currently only implemented in a meaningful way for + * Linux; other systems will simply get a full cpuset. + */ +static __hwloc_inline int +hwloc_cudart_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused, + int idx, hwloc_cpuset_t set) +{ +#ifdef HWLOC_LINUX_SYS + /* If we're on Linux, use the sysfs mechanism to get the local cpus */ +#define HWLOC_CUDART_DEVICE_SYSFS_PATH_MAX 128 + char path[HWLOC_CUDART_DEVICE_SYSFS_PATH_MAX]; + int domain, bus, dev; + + if (hwloc_cudart_get_device_pci_ids(topology, idx, &domain, &bus, &dev)) + return -1; + + if (!hwloc_topology_is_thissystem(topology)) { + errno = EINVAL; + return -1; + } + + sprintf(path, "/sys/bus/pci/devices/%04x:%02x:%02x.0/local_cpus", (unsigned) domain, (unsigned) bus, (unsigned) dev); + if (hwloc_linux_read_path_as_cpumask(path, set) < 0 + || hwloc_bitmap_iszero(set)) + hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology)); +#else + /* Non-Linux systems simply get a full cpuset */ + hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology)); +#endif + return 0; +} + +/** \brief Get the hwloc PCI device object corresponding to the + * CUDA device whose index is \p idx. + * + * Return the PCI device object describing the CUDA device whose + * index is \p idx. Return NULL if there is none. + * + * Topology \p topology and device \p idx must match the local machine. + * I/O devices detection must be enabled in topology \p topology. + * The CUDA component is not needed in the topology. + */ +static __hwloc_inline hwloc_obj_t +hwloc_cudart_get_device_pcidev(hwloc_topology_t topology, int idx) +{ + int domain, bus, dev; + + if (hwloc_cudart_get_device_pci_ids(topology, idx, &domain, &bus, &dev)) + return NULL; + + return hwloc_get_pcidev_by_busid(topology, domain, bus, dev, 0); +} + +/** \brief Get the hwloc OS device object corresponding to the + * CUDA device whose index is \p idx. + * + * Return the OS device object describing the CUDA device whose + * index is \p idx. Return NULL if there is none. + * + * The topology \p topology does not necessarily have to match the current + * machine. For instance the topology may be an XML import of a remote host. + * I/O devices detection and the CUDA component must be enabled in the topology. + * If not, the locality of the object may still be found using + * hwloc_cudart_get_device_cpuset(). + * + * \note The corresponding PCI device object can be obtained by looking + * at the OS device parent object (unless PCI devices are filtered out). + * + * \note This function is identical to hwloc_cuda_get_device_osdev_by_index(). + */ +static __hwloc_inline hwloc_obj_t +hwloc_cudart_get_device_osdev_by_index(hwloc_topology_t topology, unsigned idx) +{ + hwloc_obj_t osdev = NULL; + while ((osdev = hwloc_get_next_osdev(topology, osdev)) != NULL) { + if (HWLOC_OBJ_OSDEV_COPROC == osdev->attr->osdev.type + && osdev->name + && !strncmp("cuda", osdev->name, 4) + && atoi(osdev->name + 4) == (int) idx) + return osdev; + } + return NULL; +} + +/** @} */ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* HWLOC_CUDART_H */ diff --git a/src/3rdparty/hwloc/include/hwloc/deprecated.h b/src/3rdparty/hwloc/include/hwloc/deprecated.h new file mode 100644 index 00000000..8f3b1459 --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/deprecated.h @@ -0,0 +1,206 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2017 Inria. All rights reserved. + * Copyright © 2009-2012 Université Bordeaux + * Copyright © 2009-2010 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +/** + * This file contains the inline code of functions declared in hwloc.h + */ + +#ifndef HWLOC_DEPRECATED_H +#define HWLOC_DEPRECATED_H + +#ifndef HWLOC_H +#error Please include the main hwloc.h instead +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* backward compat with v1.11 before System removal */ +#define HWLOC_OBJ_SYSTEM HWLOC_OBJ_MACHINE +/* backward compat with v1.10 before Socket->Package renaming */ +#define HWLOC_OBJ_SOCKET HWLOC_OBJ_PACKAGE +/* backward compat with v1.10 before Node->NUMANode clarification */ +#define HWLOC_OBJ_NODE HWLOC_OBJ_NUMANODE + +/** \brief Insert a misc object by parent. + * + * Identical to hwloc_topology_insert_misc_object(). + */ +static __hwloc_inline hwloc_obj_t +hwloc_topology_insert_misc_object_by_parent(hwloc_topology_t topology, hwloc_obj_t parent, const char *name) __hwloc_attribute_deprecated; +static __hwloc_inline hwloc_obj_t +hwloc_topology_insert_misc_object_by_parent(hwloc_topology_t topology, hwloc_obj_t parent, const char *name) +{ + return hwloc_topology_insert_misc_object(topology, parent, name); +} + +/** \brief Stringify the cpuset containing a set of objects. + * + * If \p size is 0, \p string may safely be \c NULL. + * + * \return the number of character that were actually written if not truncating, + * or that would have been written (not including the ending \\0). + */ +static __hwloc_inline int +hwloc_obj_cpuset_snprintf(char *str, size_t size, size_t nobj, struct hwloc_obj * const *objs) __hwloc_attribute_deprecated; +static __hwloc_inline int +hwloc_obj_cpuset_snprintf(char *str, size_t size, size_t nobj, struct hwloc_obj * const *objs) +{ + hwloc_bitmap_t set = hwloc_bitmap_alloc(); + int res; + unsigned i; + + hwloc_bitmap_zero(set); + for(i=0; icpuset) + hwloc_bitmap_or(set, set, objs[i]->cpuset); + + res = hwloc_bitmap_snprintf(str, size, set); + hwloc_bitmap_free(set); + return res; +} + +/** \brief Convert a type string into a type and some attributes. + * + * Deprecated by hwloc_type_sscanf() + */ +static __hwloc_inline int +hwloc_obj_type_sscanf(const char *string, hwloc_obj_type_t *typep, int *depthattrp, void *typeattrp, size_t typeattrsize) __hwloc_attribute_deprecated; +static __hwloc_inline int +hwloc_obj_type_sscanf(const char *string, hwloc_obj_type_t *typep, int *depthattrp, void *typeattrp, size_t typeattrsize) +{ + union hwloc_obj_attr_u attr; + int err = hwloc_type_sscanf(string, typep, &attr, sizeof(attr)); + if (err < 0) + return err; + if (hwloc_obj_type_is_cache(*typep)) { + if (depthattrp) + *depthattrp = (int) attr.cache.depth; + if (typeattrp && typeattrsize >= sizeof(hwloc_obj_cache_type_t)) + memcpy(typeattrp, &attr.cache.type, sizeof(hwloc_obj_cache_type_t)); + } else if (*typep == HWLOC_OBJ_GROUP) { + if (depthattrp) + *depthattrp = (int) attr.group.depth; + } + return 0; +} + +/** \brief Set the default memory binding policy of the current + * process or thread to prefer the NUMA node(s) specified by physical \p nodeset + */ +static __hwloc_inline int +hwloc_set_membind_nodeset(hwloc_topology_t topology, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags) __hwloc_attribute_deprecated; +static __hwloc_inline int +hwloc_set_membind_nodeset(hwloc_topology_t topology, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags) +{ + return hwloc_set_membind(topology, nodeset, policy, flags | HWLOC_MEMBIND_BYNODESET); +} + +/** \brief Query the default memory binding policy and physical locality of the + * current process or thread. + */ +static __hwloc_inline int +hwloc_get_membind_nodeset(hwloc_topology_t topology, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags) __hwloc_attribute_deprecated; +static __hwloc_inline int +hwloc_get_membind_nodeset(hwloc_topology_t topology, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags) +{ + return hwloc_get_membind(topology, nodeset, policy, flags | HWLOC_MEMBIND_BYNODESET); +} + +/** \brief Set the default memory binding policy of the specified + * process to prefer the NUMA node(s) specified by physical \p nodeset + */ +static __hwloc_inline int +hwloc_set_proc_membind_nodeset(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags) __hwloc_attribute_deprecated; +static __hwloc_inline int +hwloc_set_proc_membind_nodeset(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags) +{ + return hwloc_set_proc_membind(topology, pid, nodeset, policy, flags | HWLOC_MEMBIND_BYNODESET); +} + +/** \brief Query the default memory binding policy and physical locality of the + * specified process. + */ +static __hwloc_inline int +hwloc_get_proc_membind_nodeset(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags) __hwloc_attribute_deprecated; +static __hwloc_inline int +hwloc_get_proc_membind_nodeset(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags) +{ + return hwloc_get_proc_membind(topology, pid, nodeset, policy, flags | HWLOC_MEMBIND_BYNODESET); +} + +/** \brief Bind the already-allocated memory identified by (addr, len) + * to the NUMA node(s) in physical \p nodeset. + */ +static __hwloc_inline int +hwloc_set_area_membind_nodeset(hwloc_topology_t topology, const void *addr, size_t len, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags) __hwloc_attribute_deprecated; +static __hwloc_inline int +hwloc_set_area_membind_nodeset(hwloc_topology_t topology, const void *addr, size_t len, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags) +{ + return hwloc_set_area_membind(topology, addr, len, nodeset, policy, flags | HWLOC_MEMBIND_BYNODESET); +} + +/** \brief Query the physical NUMA node(s) and binding policy of the memory + * identified by (\p addr, \p len ). + */ +static __hwloc_inline int +hwloc_get_area_membind_nodeset(hwloc_topology_t topology, const void *addr, size_t len, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags) __hwloc_attribute_deprecated; +static __hwloc_inline int +hwloc_get_area_membind_nodeset(hwloc_topology_t topology, const void *addr, size_t len, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags) +{ + return hwloc_get_area_membind(topology, addr, len, nodeset, policy, flags | HWLOC_MEMBIND_BYNODESET); +} + +/** \brief Allocate some memory on the given physical nodeset \p nodeset + */ +static __hwloc_inline void * +hwloc_alloc_membind_nodeset(hwloc_topology_t topology, size_t len, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags) __hwloc_attribute_malloc __hwloc_attribute_deprecated; +static __hwloc_inline void * +hwloc_alloc_membind_nodeset(hwloc_topology_t topology, size_t len, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags) +{ + return hwloc_alloc_membind(topology, len, nodeset, policy, flags | HWLOC_MEMBIND_BYNODESET); +} + +/** \brief Allocate some memory on the given nodeset \p nodeset. + */ +static __hwloc_inline void * +hwloc_alloc_membind_policy_nodeset(hwloc_topology_t topology, size_t len, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags) __hwloc_attribute_malloc __hwloc_attribute_deprecated; +static __hwloc_inline void * +hwloc_alloc_membind_policy_nodeset(hwloc_topology_t topology, size_t len, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags) +{ + return hwloc_alloc_membind_policy(topology, len, nodeset, policy, flags | HWLOC_MEMBIND_BYNODESET); +} + +/** \brief Convert a CPU set into a NUMA node set and handle non-NUMA cases + */ +static __hwloc_inline void +hwloc_cpuset_to_nodeset_strict(hwloc_topology_t topology, hwloc_const_cpuset_t _cpuset, hwloc_nodeset_t nodeset) __hwloc_attribute_deprecated; +static __hwloc_inline void +hwloc_cpuset_to_nodeset_strict(hwloc_topology_t topology, hwloc_const_cpuset_t _cpuset, hwloc_nodeset_t nodeset) +{ + hwloc_cpuset_to_nodeset(topology, _cpuset, nodeset); +} + +/** \brief Convert a NUMA node set into a CPU set and handle non-NUMA cases + */ +static __hwloc_inline void +hwloc_cpuset_from_nodeset_strict(hwloc_topology_t topology, hwloc_cpuset_t _cpuset, hwloc_const_nodeset_t nodeset) __hwloc_attribute_deprecated; +static __hwloc_inline void +hwloc_cpuset_from_nodeset_strict(hwloc_topology_t topology, hwloc_cpuset_t _cpuset, hwloc_const_nodeset_t nodeset) +{ + hwloc_cpuset_from_nodeset(topology, _cpuset, nodeset); +} + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* HWLOC_DEPRECATED_H */ diff --git a/src/3rdparty/hwloc/include/hwloc/diff.h b/src/3rdparty/hwloc/include/hwloc/diff.h new file mode 100644 index 00000000..79f2df3d --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/diff.h @@ -0,0 +1,289 @@ +/* + * Copyright © 2013-2018 Inria. All rights reserved. + * See COPYING in top-level directory. + */ + +/** \file + * \brief Topology differences. + */ + +#ifndef HWLOC_DIFF_H +#define HWLOC_DIFF_H + +#ifndef HWLOC_H +#error Please include the main hwloc.h instead +#endif + + +#ifdef __cplusplus +extern "C" { +#elif 0 +} +#endif + + +/** \defgroup hwlocality_diff Topology differences + * + * Applications that manipulate many similar topologies, for instance + * one for each node of a homogeneous cluster, may want to compress + * topologies to reduce the memory footprint. + * + * This file offers a way to manipulate the difference between topologies + * and export/import it to/from XML. + * Compression may therefore be achieved by storing one topology + * entirely while the others are only described by their differences + * with the former. + * The actual topology can be reconstructed when actually needed by + * applying the precomputed difference to the reference topology. + * + * This interface targets very similar nodes. + * Only very simple differences between topologies are actually + * supported, for instance a change in the memory size, the name + * of the object, or some info attribute. + * More complex differences such as adding or removing objects cannot + * be represented in the difference structures and therefore return + * errors. + * Differences between object sets or topology-wide allowed sets, + * cannot be represented either. + * + * It means that there is no need to apply the difference when + * looking at the tree organization (how many levels, how many + * objects per level, what kind of objects, CPU and node sets, etc) + * and when binding to objects. + * However the difference must be applied when looking at object + * attributes such as the name, the memory size or info attributes. + * + * @{ + */ + + +/** \brief Type of one object attribute difference. + */ +typedef enum hwloc_topology_diff_obj_attr_type_e { + /** \brief The object local memory is modified. + * The union is a hwloc_topology_diff_obj_attr_u::hwloc_topology_diff_obj_attr_uint64_s + * (and the index field is ignored). + */ + HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_SIZE, + + /** \brief The object name is modified. + * The union is a hwloc_topology_diff_obj_attr_u::hwloc_topology_diff_obj_attr_string_s + * (and the name field is ignored). + */ + + HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_NAME, + /** \brief the value of an info attribute is modified. + * The union is a hwloc_topology_diff_obj_attr_u::hwloc_topology_diff_obj_attr_string_s. + */ + HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_INFO +} hwloc_topology_diff_obj_attr_type_t; + +/** \brief One object attribute difference. + */ +union hwloc_topology_diff_obj_attr_u { + struct hwloc_topology_diff_obj_attr_generic_s { + /* each part of the union must start with these */ + hwloc_topology_diff_obj_attr_type_t type; + } generic; + + /** \brief Integer attribute modification with an optional index. */ + struct hwloc_topology_diff_obj_attr_uint64_s { + /* used for storing integer attributes */ + hwloc_topology_diff_obj_attr_type_t type; + hwloc_uint64_t index; /* not used for SIZE */ + hwloc_uint64_t oldvalue; + hwloc_uint64_t newvalue; + } uint64; + + /** \brief String attribute modification with an optional name */ + struct hwloc_topology_diff_obj_attr_string_s { + /* used for storing name and info pairs */ + hwloc_topology_diff_obj_attr_type_t type; + char *name; /* not used for NAME */ + char *oldvalue; + char *newvalue; + } string; +}; + + +/** \brief Type of one element of a difference list. + */ +typedef enum hwloc_topology_diff_type_e { + /** \brief An object attribute was changed. + * The union is a hwloc_topology_diff_obj_attr_u::hwloc_topology_diff_obj_attr_s. + */ + HWLOC_TOPOLOGY_DIFF_OBJ_ATTR, + + /** \brief The difference is too complex, + * it cannot be represented. The difference below + * this object has not been checked. + * hwloc_topology_diff_build() will return 1. + * + * The union is a hwloc_topology_diff_obj_attr_u::hwloc_topology_diff_too_complex_s. + */ + HWLOC_TOPOLOGY_DIFF_TOO_COMPLEX +} hwloc_topology_diff_type_t; + +/** \brief One element of a difference list between two topologies. + */ +typedef union hwloc_topology_diff_u { + struct hwloc_topology_diff_generic_s { + /* each part of the union must start with these */ + hwloc_topology_diff_type_t type; + union hwloc_topology_diff_u * next; /* pointer to the next element of the list, or NULL */ + } generic; + + /* A difference in an object attribute. */ + struct hwloc_topology_diff_obj_attr_s { + hwloc_topology_diff_type_t type; /* must be ::HWLOC_TOPOLOGY_DIFF_OBJ_ATTR */ + union hwloc_topology_diff_u * next; + /* List of attribute differences for a single object */ + int obj_depth; + unsigned obj_index; + union hwloc_topology_diff_obj_attr_u diff; + } obj_attr; + + /* A difference that is too complex. */ + struct hwloc_topology_diff_too_complex_s { + hwloc_topology_diff_type_t type; /* must be ::HWLOC_TOPOLOGY_DIFF_TOO_COMPLEX */ + union hwloc_topology_diff_u * next; + /* Where we had to stop computing the diff in the first topology */ + int obj_depth; + unsigned obj_index; + } too_complex; +} * hwloc_topology_diff_t; + + +/** \brief Compute the difference between 2 topologies. + * + * The difference is stored as a list of ::hwloc_topology_diff_t entries + * starting at \p diff. + * It is computed by doing a depth-first traversal of both topology trees + * simultaneously. + * + * If the difference between 2 objects is too complex to be represented + * (for instance if some objects have different types, or different numbers + * of children), a special diff entry of type ::HWLOC_TOPOLOGY_DIFF_TOO_COMPLEX + * is queued. + * The computation of the diff does not continue below these objects. + * So each such diff entry means that the difference between two subtrees + * could not be computed. + * + * \return 0 if the difference can be represented properly. + * + * \return 0 with \p diff pointing to NULL if there is no difference + * between the topologies. + * + * \return 1 if the difference is too complex (see above). Some entries in + * the list will be of type ::HWLOC_TOPOLOGY_DIFF_TOO_COMPLEX. + * + * \return -1 on any other error. + * + * \note \p flags is currently not used. It should be 0. + * + * \note The output diff has to be freed with hwloc_topology_diff_destroy(). + * + * \note The output diff can only be exported to XML or passed to + * hwloc_topology_diff_apply() if 0 was returned, i.e. if no entry of type + * ::HWLOC_TOPOLOGY_DIFF_TOO_COMPLEX is listed. + * + * \note The output diff may be modified by removing some entries from + * the list. The removed entries should be freed by passing them to + * to hwloc_topology_diff_destroy() (possible as another list). +*/ +HWLOC_DECLSPEC int hwloc_topology_diff_build(hwloc_topology_t topology, hwloc_topology_t newtopology, unsigned long flags, hwloc_topology_diff_t *diff); + +/** \brief Flags to be given to hwloc_topology_diff_apply(). + */ +enum hwloc_topology_diff_apply_flags_e { + /** \brief Apply topology diff in reverse direction. + * \hideinitializer + */ + HWLOC_TOPOLOGY_DIFF_APPLY_REVERSE = (1UL<<0) +}; + +/** \brief Apply a topology diff to an existing topology. + * + * \p flags is an OR'ed set of ::hwloc_topology_diff_apply_flags_e. + * + * The new topology is modified in place. hwloc_topology_dup() + * may be used to duplicate it before patching. + * + * If the difference cannot be applied entirely, all previous applied + * elements are unapplied before returning. + * + * \return 0 on success. + * + * \return -N if applying the difference failed while trying + * to apply the N-th part of the difference. For instance -1 + * is returned if the very first difference element could not + * be applied. + */ +HWLOC_DECLSPEC int hwloc_topology_diff_apply(hwloc_topology_t topology, hwloc_topology_diff_t diff, unsigned long flags); + +/** \brief Destroy a list of topology differences. + */ +HWLOC_DECLSPEC int hwloc_topology_diff_destroy(hwloc_topology_diff_t diff); + +/** \brief Load a list of topology differences from a XML file. + * + * If not \c NULL, \p refname will be filled with the identifier + * string of the reference topology for the difference file, + * if any was specified in the XML file. + * This identifier is usually the name of the other XML file + * that contains the reference topology. + * + * \note the pointer returned in refname should later be freed + * by the caller. + */ +HWLOC_DECLSPEC int hwloc_topology_diff_load_xml(const char *xmlpath, hwloc_topology_diff_t *diff, char **refname); + +/** \brief Export a list of topology differences to a XML file. + * + * If not \c NULL, \p refname defines an identifier string + * for the reference topology which was used as a base when + * computing this difference. + * This identifier is usually the name of the other XML file + * that contains the reference topology. + * This attribute is given back when reading the diff from XML. + */ +HWLOC_DECLSPEC int hwloc_topology_diff_export_xml(hwloc_topology_diff_t diff, const char *refname, const char *xmlpath); + +/** \brief Load a list of topology differences from a XML buffer. + * + * If not \c NULL, \p refname will be filled with the identifier + * string of the reference topology for the difference file, + * if any was specified in the XML file. + * This identifier is usually the name of the other XML file + * that contains the reference topology. + * + * \note the pointer returned in refname should later be freed + * by the caller. + */ +HWLOC_DECLSPEC int hwloc_topology_diff_load_xmlbuffer(const char *xmlbuffer, int buflen, hwloc_topology_diff_t *diff, char **refname); + +/** \brief Export a list of topology differences to a XML buffer. + * + * If not \c NULL, \p refname defines an identifier string + * for the reference topology which was used as a base when + * computing this difference. + * This identifier is usually the name of the other XML file + * that contains the reference topology. + * This attribute is given back when reading the diff from XML. + * + * The returned buffer ends with a \0 that is included in the returned + * length. + * + * \note The XML buffer should later be freed with hwloc_free_xmlbuffer(). + */ +HWLOC_DECLSPEC int hwloc_topology_diff_export_xmlbuffer(hwloc_topology_diff_t diff, const char *refname, char **xmlbuffer, int *buflen); + +/** @} */ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* HWLOC_DIFF_H */ diff --git a/src/3rdparty/hwloc/include/hwloc/distances.h b/src/3rdparty/hwloc/include/hwloc/distances.h new file mode 100644 index 00000000..d523f29f --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/distances.h @@ -0,0 +1,271 @@ +/* + * Copyright © 2010-2019 Inria. All rights reserved. + * See COPYING in top-level directory. + */ + +/** \file + * \brief Object distances. + */ + +#ifndef HWLOC_DISTANCES_H +#define HWLOC_DISTANCES_H + +#ifndef HWLOC_H +#error Please include the main hwloc.h instead +#endif + + +#ifdef __cplusplus +extern "C" { +#elif 0 +} +#endif + + +/** \defgroup hwlocality_distances_get Retrieve distances between objects + * @{ + */ + +/** \brief Matrix of distances between a set of objects. + * + * This matrix often contains latencies between NUMA nodes + * (as reported in the System Locality Distance Information Table (SLIT) + * in the ACPI specification), which may or may not be physically accurate. + * It corresponds to the latency for accessing the memory of one node + * from a core in another node. + * The corresponding kind is ::HWLOC_DISTANCES_KIND_FROM_OS | ::HWLOC_DISTANCES_KIND_FROM_USER. + * + * The matrix may also contain bandwidths between random sets of objects, + * possibly provided by the user, as specified in the \p kind attribute. + */ +struct hwloc_distances_s { + unsigned nbobjs; /**< \brief Number of objects described by the distance matrix. */ + hwloc_obj_t *objs; /**< \brief Array of objects described by the distance matrix. + * These objects are not in any particular order, + * see hwloc_distances_obj_index() and hwloc_distances_obj_pair_values() + * for easy ways to find objects in this array and their corresponding values. + */ + unsigned long kind; /**< \brief OR'ed set of ::hwloc_distances_kind_e. */ + hwloc_uint64_t *values; /**< \brief Matrix of distances between objects, stored as a one-dimension array. + * + * Distance from i-th to j-th object is stored in slot i*nbobjs+j. + * The meaning of the value depends on the \p kind attribute. + */ +}; + +/** \brief Kinds of distance matrices. + * + * The \p kind attribute of struct hwloc_distances_s is a OR'ed set + * of kinds. + * + * A kind of format HWLOC_DISTANCES_KIND_FROM_* specifies where the + * distance information comes from, if known. + * + * A kind of format HWLOC_DISTANCES_KIND_MEANS_* specifies whether + * values are latencies or bandwidths, if applicable. + */ +enum hwloc_distances_kind_e { + /** \brief These distances were obtained from the operating system or hardware. + * \hideinitializer + */ + HWLOC_DISTANCES_KIND_FROM_OS = (1UL<<0), + /** \brief These distances were provided by the user. + * \hideinitializer + */ + HWLOC_DISTANCES_KIND_FROM_USER = (1UL<<1), + + /** \brief Distance values are similar to latencies between objects. + * Values are smaller for closer objects, hence minimal on the diagonal + * of the matrix (distance between an object and itself). + * It could also be the number of network hops between objects, etc. + * \hideinitializer + */ + HWLOC_DISTANCES_KIND_MEANS_LATENCY = (1UL<<2), + /** \brief Distance values are similar to bandwidths between objects. + * Values are higher for closer objects, hence maximal on the diagonal + * of the matrix (distance between an object and itself). + * Such values are currently ignored for distance-based grouping. + * \hideinitializer + */ + HWLOC_DISTANCES_KIND_MEANS_BANDWIDTH = (1UL<<3) +}; + +/** \brief Retrieve distance matrices. + * + * Retrieve distance matrices from the topology into the \p distances array. + * + * \p flags is currently unused, should be \c 0. + * + * \p kind serves as a filter. If \c 0, all distance matrices are returned. + * If it contains some HWLOC_DISTANCES_KIND_FROM_*, only distance matrices + * whose kind matches one of these are returned. + * If it contains some HWLOC_DISTANCES_KIND_MEANS_*, only distance matrices + * whose kind matches one of these are returned. + * + * On input, \p nr points to the number of distance matrices that may be stored + * in \p distances. + * On output, \p nr points to the number of distance matrices that were actually + * found, even if some of them couldn't be stored in \p distances. + * Distance matrices that couldn't be stored are ignored, but the function still + * returns success (\c 0). The caller may find out by comparing the value pointed + * by \p nr before and after the function call. + * + * Each distance matrix returned in the \p distances array should be released + * by the caller using hwloc_distances_release(). + */ +HWLOC_DECLSPEC int +hwloc_distances_get(hwloc_topology_t topology, + unsigned *nr, struct hwloc_distances_s **distances, + unsigned long kind, unsigned long flags); + +/** \brief Retrieve distance matrices for object at a specific depth in the topology. + * + * Identical to hwloc_distances_get() with the additional \p depth filter. + */ +HWLOC_DECLSPEC int +hwloc_distances_get_by_depth(hwloc_topology_t topology, int depth, + unsigned *nr, struct hwloc_distances_s **distances, + unsigned long kind, unsigned long flags); + +/** \brief Retrieve distance matrices for object of a specific type. + * + * Identical to hwloc_distances_get() with the additional \p type filter. + */ +static __hwloc_inline int +hwloc_distances_get_by_type(hwloc_topology_t topology, hwloc_obj_type_t type, + unsigned *nr, struct hwloc_distances_s **distances, + unsigned long kind, unsigned long flags) +{ + int depth = hwloc_get_type_depth(topology, type); + if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE) { + *nr = 0; + return 0; + } + return hwloc_distances_get_by_depth(topology, depth, nr, distances, kind, flags); +} + +/** \brief Release a distance matrix structure previously returned by hwloc_distances_get(). */ +HWLOC_DECLSPEC void +hwloc_distances_release(hwloc_topology_t topology, struct hwloc_distances_s *distances); + +/** @} */ + + + +/** \defgroup hwlocality_distances_consult Helpers for consulting distance matrices + * @{ + */ + +/** \brief Find the index of an object in a distances structure. + * + * \return -1 if object \p obj is not involved in structure \p distances. + */ +static __hwloc_inline int +hwloc_distances_obj_index(struct hwloc_distances_s *distances, hwloc_obj_t obj) +{ + unsigned i; + for(i=0; inbobjs; i++) + if (distances->objs[i] == obj) + return (int)i; + return -1; +} + +/** \brief Find the values between two objects in a distance matrices. + * + * The distance from \p obj1 to \p obj2 is stored in the value pointed by + * \p value1to2 and reciprocally. + * + * \return -1 if object \p obj1 or \p obj2 is not involved in structure \p distances. + */ +static __hwloc_inline int +hwloc_distances_obj_pair_values(struct hwloc_distances_s *distances, + hwloc_obj_t obj1, hwloc_obj_t obj2, + hwloc_uint64_t *value1to2, hwloc_uint64_t *value2to1) +{ + int i1 = hwloc_distances_obj_index(distances, obj1); + int i2 = hwloc_distances_obj_index(distances, obj2); + if (i1 < 0 || i2 < 0) + return -1; + *value1to2 = distances->values[i1 * distances->nbobjs + i2]; + *value2to1 = distances->values[i2 * distances->nbobjs + i1]; + return 0; +} + +/** @} */ + + + +/** \defgroup hwlocality_distances_add Add or remove distances between objects + * @{ + */ + +/** \brief Flags for adding a new distances to a topology. */ +enum hwloc_distances_add_flag_e { + /** \brief Try to group objects based on the newly provided distance information. + * \hideinitializer + */ + HWLOC_DISTANCES_ADD_FLAG_GROUP = (1UL<<0), + /** \brief If grouping, consider the distance values as inaccurate and relax the + * comparisons during the grouping algorithms. The actual accuracy may be modified + * through the HWLOC_GROUPING_ACCURACY environment variable (see \ref envvar). + * \hideinitializer + */ + HWLOC_DISTANCES_ADD_FLAG_GROUP_INACCURATE = (1UL<<1) +}; + +/** \brief Provide a new distance matrix. + * + * Provide the matrix of distances between a set of objects given by \p nbobjs + * and the \p objs array. \p nbobjs must be at least 2. + * The distances are stored as a one-dimension array in \p values. + * The distance from object i to object j is in slot i*nbobjs+j. + * + * \p kind specifies the kind of distance as a OR'ed set of ::hwloc_distances_kind_e. + * + * \p flags configures the behavior of the function using an optional OR'ed set of + * ::hwloc_distances_add_flag_e. + * + * Objects must be of the same type. They cannot be of type Group. + */ +HWLOC_DECLSPEC int hwloc_distances_add(hwloc_topology_t topology, + unsigned nbobjs, hwloc_obj_t *objs, hwloc_uint64_t *values, + unsigned long kind, unsigned long flags); + +/** \brief Remove all distance matrices from a topology. + * + * Remove all distance matrices, either provided by the user or + * gathered through the OS. + * + * If these distances were used to group objects, these additional + *Group objects are not removed from the topology. + */ +HWLOC_DECLSPEC int hwloc_distances_remove(hwloc_topology_t topology); + +/** \brief Remove distance matrices for objects at a specific depth in the topology. + * + * Identical to hwloc_distances_remove() but only applies to one level of the topology. + */ +HWLOC_DECLSPEC int hwloc_distances_remove_by_depth(hwloc_topology_t topology, int depth); + +/** \brief Remove distance matrices for objects of a specific type in the topology. + * + * Identical to hwloc_distances_remove() but only applies to one level of the topology. + */ +static __hwloc_inline int +hwloc_distances_remove_by_type(hwloc_topology_t topology, hwloc_obj_type_t type) +{ + int depth = hwloc_get_type_depth(topology, type); + if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE) + return 0; + return hwloc_distances_remove_by_depth(topology, depth); +} + +/** @} */ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* HWLOC_DISTANCES_H */ diff --git a/src/3rdparty/hwloc/include/hwloc/export.h b/src/3rdparty/hwloc/include/hwloc/export.h new file mode 100644 index 00000000..b178b77e --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/export.h @@ -0,0 +1,278 @@ +/* + * Copyright © 2009-2018 Inria. All rights reserved. + * Copyright © 2009-2012 Université Bordeaux + * Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +/** \file + * \brief Exporting Topologies to XML or to Synthetic strings. + */ + +#ifndef HWLOC_EXPORT_H +#define HWLOC_EXPORT_H + +#ifndef HWLOC_H +#error Please include the main hwloc.h instead +#endif + + +#ifdef __cplusplus +extern "C" { +#elif 0 +} +#endif + + +/** \defgroup hwlocality_xmlexport Exporting Topologies to XML + * @{ + */ + +/** \brief Flags for exporting XML topologies. + * + * Flags to be given as a OR'ed set to hwloc_topology_export_xml(). + */ +enum hwloc_topology_export_xml_flags_e { + /** \brief Export XML that is loadable by hwloc v1.x. + * However, the export may miss some details about the topology. + * \hideinitializer + */ + HWLOC_TOPOLOGY_EXPORT_XML_FLAG_V1 = (1UL<<0) +}; + +/** \brief Export the topology into an XML file. + * + * This file may be loaded later through hwloc_topology_set_xml(). + * + * By default, the latest export format is used, which means older hwloc + * releases (e.g. v1.x) will not be able to import it. + * Exporting to v1.x specific XML format is possible using flag + * ::HWLOC_TOPOLOGY_EXPORT_XML_FLAG_V1 but it may miss some details + * about the topology. + * If there is any chance that the exported file may ever be imported + * back by a process using hwloc 1.x, one should consider detecting + * it at runtime and using the corresponding export format. + * + * \p flags is a OR'ed set of ::hwloc_topology_export_xml_flags_e. + * + * \return -1 if a failure occured. + * + * \note See also hwloc_topology_set_userdata_export_callback() + * for exporting application-specific object userdata. + * + * \note The topology-specific userdata pointer is ignored when exporting to XML. + * + * \note Only printable characters may be exported to XML string attributes. + * Any other character, especially any non-ASCII character, will be silently + * dropped. + * + * \note If \p name is "-", the XML output is sent to the standard output. + */ +HWLOC_DECLSPEC int hwloc_topology_export_xml(hwloc_topology_t topology, const char *xmlpath, unsigned long flags); + +/** \brief Export the topology into a newly-allocated XML memory buffer. + * + * \p xmlbuffer is allocated by the callee and should be freed with + * hwloc_free_xmlbuffer() later in the caller. + * + * This memory buffer may be loaded later through hwloc_topology_set_xmlbuffer(). + * + * By default, the latest export format is used, which means older hwloc + * releases (e.g. v1.x) will not be able to import it. + * Exporting to v1.x specific XML format is possible using flag + * ::HWLOC_TOPOLOGY_EXPORT_XML_FLAG_V1 but it may miss some details + * about the topology. + * If there is any chance that the exported buffer may ever be imported + * back by a process using hwloc 1.x, one should consider detecting + * it at runtime and using the corresponding export format. + * + * The returned buffer ends with a \0 that is included in the returned + * length. + * + * \p flags is a OR'ed set of ::hwloc_topology_export_xml_flags_e. + * + * \return -1 if a failure occured. + * + * \note See also hwloc_topology_set_userdata_export_callback() + * for exporting application-specific object userdata. + * + * \note The topology-specific userdata pointer is ignored when exporting to XML. + * + * \note Only printable characters may be exported to XML string attributes. + * Any other character, especially any non-ASCII character, will be silently + * dropped. + */ +HWLOC_DECLSPEC int hwloc_topology_export_xmlbuffer(hwloc_topology_t topology, char **xmlbuffer, int *buflen, unsigned long flags); + +/** \brief Free a buffer allocated by hwloc_topology_export_xmlbuffer() */ +HWLOC_DECLSPEC void hwloc_free_xmlbuffer(hwloc_topology_t topology, char *xmlbuffer); + +/** \brief Set the application-specific callback for exporting object userdata + * + * The object userdata pointer is not exported to XML by default because hwloc + * does not know what it contains. + * + * This function lets applications set \p export_cb to a callback function + * that converts this opaque userdata into an exportable string. + * + * \p export_cb is invoked during XML export for each object whose + * \p userdata pointer is not \c NULL. + * The callback should use hwloc_export_obj_userdata() or + * hwloc_export_obj_userdata_base64() to actually export + * something to XML (possibly multiple times per object). + * + * \p export_cb may be set to \c NULL if userdata should not be exported to XML. + * + * \note The topology-specific userdata pointer is ignored when exporting to XML. + */ +HWLOC_DECLSPEC void hwloc_topology_set_userdata_export_callback(hwloc_topology_t topology, + void (*export_cb)(void *reserved, hwloc_topology_t topology, hwloc_obj_t obj)); + +/** \brief Export some object userdata to XML + * + * This function may only be called from within the export() callback passed + * to hwloc_topology_set_userdata_export_callback(). + * It may be invoked one of multiple times to export some userdata to XML. + * The \p buffer content of length \p length is stored with optional name + * \p name. + * + * When importing this XML file, the import() callback (if set) will be + * called exactly as many times as hwloc_export_obj_userdata() was called + * during export(). It will receive the corresponding \p name, \p buffer + * and \p length arguments. + * + * \p reserved, \p topology and \p obj must be the first three parameters + * that were given to the export callback. + * + * Only printable characters may be exported to XML string attributes. + * If a non-printable character is passed in \p name or \p buffer, + * the function returns -1 with errno set to EINVAL. + * + * If exporting binary data, the application should first encode into + * printable characters only (or use hwloc_export_obj_userdata_base64()). + * It should also take care of portability issues if the export may + * be reimported on a different architecture. + */ +HWLOC_DECLSPEC int hwloc_export_obj_userdata(void *reserved, hwloc_topology_t topology, hwloc_obj_t obj, const char *name, const void *buffer, size_t length); + +/** \brief Encode and export some object userdata to XML + * + * This function is similar to hwloc_export_obj_userdata() but it encodes + * the input buffer into printable characters before exporting. + * On import, decoding is automatically performed before the data is given + * to the import() callback if any. + * + * This function may only be called from within the export() callback passed + * to hwloc_topology_set_userdata_export_callback(). + * + * The function does not take care of portability issues if the export + * may be reimported on a different architecture. + */ +HWLOC_DECLSPEC int hwloc_export_obj_userdata_base64(void *reserved, hwloc_topology_t topology, hwloc_obj_t obj, const char *name, const void *buffer, size_t length); + +/** \brief Set the application-specific callback for importing userdata + * + * On XML import, userdata is ignored by default because hwloc does not know + * how to store it in memory. + * + * This function lets applications set \p import_cb to a callback function + * that will get the XML-stored userdata and store it in the object as expected + * by the application. + * + * \p import_cb is called during hwloc_topology_load() as many times as + * hwloc_export_obj_userdata() was called during export. The topology + * is not entirely setup yet. Object attributes are ready to consult, + * but links between objects are not. + * + * \p import_cb may be \c NULL if userdata should be ignored during import. + * + * \note \p buffer contains \p length characters followed by a null byte ('\0'). + * + * \note This function should be called before hwloc_topology_load(). + * + * \note The topology-specific userdata pointer is ignored when importing from XML. + */ +HWLOC_DECLSPEC void hwloc_topology_set_userdata_import_callback(hwloc_topology_t topology, + void (*import_cb)(hwloc_topology_t topology, hwloc_obj_t obj, const char *name, const void *buffer, size_t length)); + +/** @} */ + + +/** \defgroup hwlocality_syntheticexport Exporting Topologies to Synthetic + * @{ + */ + +/** \brief Flags for exporting synthetic topologies. + * + * Flags to be given as a OR'ed set to hwloc_topology_export_synthetic(). + */ +enum hwloc_topology_export_synthetic_flags_e { + /** \brief Export extended types such as L2dcache as basic types such as Cache. + * + * This is required if loading the synthetic description with hwloc < 1.9. + * \hideinitializer + */ + HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_NO_EXTENDED_TYPES = (1UL<<0), + + /** \brief Do not export level attributes. + * + * Ignore level attributes such as memory/cache sizes or PU indexes. + * This is required if loading the synthetic description with hwloc < 1.10. + * \hideinitializer + */ + HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_NO_ATTRS = (1UL<<1), + + /** \brief Export the memory hierarchy as expected in hwloc 1.x. + * + * Instead of attaching memory children to levels, export single NUMA node child + * as normal intermediate levels, when possible. + * This is required if loading the synthetic description with hwloc 1.x. + * However this may fail if some objects have multiple local NUMA nodes. + * \hideinitializer + */ + HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_V1 = (1UL<<2), + + /** \brief Do not export memory information. + * + * Only export the actual hierarchy of normal CPU-side objects and ignore + * where memory is attached. + * This is useful for when the hierarchy of CPUs is what really matters, + * but it behaves as if there was a single machine-wide NUMA node. + * \hideinitializer + */ + HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_IGNORE_MEMORY = (1UL<<3) +}; + +/** \brief Export the topology as a synthetic string. + * + * At most \p buflen characters will be written in \p buffer, + * including the terminating \0. + * + * This exported string may be given back to hwloc_topology_set_synthetic(). + * + * \p flags is a OR'ed set of ::hwloc_topology_export_synthetic_flags_e. + * + * \return The number of characters that were written, + * not including the terminating \0. + * + * \return -1 if the topology could not be exported, + * for instance if it is not symmetric. + * + * \note I/O and Misc children are ignored, the synthetic string only + * describes normal children. + * + * \note A 1024-byte buffer should be large enough for exporting + * topologies in the vast majority of cases. + */ + HWLOC_DECLSPEC int hwloc_topology_export_synthetic(hwloc_topology_t topology, char *buffer, size_t buflen, unsigned long flags); + +/** @} */ + + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* HWLOC_EXPORT_H */ diff --git a/src/3rdparty/hwloc/include/hwloc/gl.h b/src/3rdparty/hwloc/include/hwloc/gl.h new file mode 100644 index 00000000..3e643fa9 --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/gl.h @@ -0,0 +1,135 @@ +/* + * Copyright © 2012 Blue Brain Project, EPFL. All rights reserved. + * Copyright © 2012-2013 Inria. All rights reserved. + * See COPYING in top-level directory. + */ + +/** \file + * \brief Macros to help interaction between hwloc and OpenGL displays. + * + * Applications that use both hwloc and OpenGL may want to include + * this file so as to get topology information for OpenGL displays. + */ + +#ifndef HWLOC_GL_H +#define HWLOC_GL_H + +#include + +#include +#include + + +#ifdef __cplusplus +extern "C" { +#endif + + +/** \defgroup hwlocality_gl Interoperability with OpenGL displays + * + * This interface offers ways to retrieve topology information about + * OpenGL displays. + * + * Only the NVIDIA display locality information is currently available, + * using the NV-CONTROL X11 extension and the NVCtrl library. + * + * @{ + */ + +/** \brief Get the hwloc OS device object corresponding to the + * OpenGL display given by port and device index. + * + * Return the OS device object describing the OpenGL display + * whose port (server) is \p port and device (screen) is \p device. + * Return NULL if there is none. + * + * The topology \p topology does not necessarily have to match the current + * machine. For instance the topology may be an XML import of a remote host. + * I/O devices detection and the GL component must be enabled in the topology. + * + * \note The corresponding PCI device object can be obtained by looking + * at the OS device parent object (unless PCI devices are filtered out). + */ +static __hwloc_inline hwloc_obj_t +hwloc_gl_get_display_osdev_by_port_device(hwloc_topology_t topology, + unsigned port, unsigned device) +{ + unsigned x = (unsigned) -1, y = (unsigned) -1; + hwloc_obj_t osdev = NULL; + while ((osdev = hwloc_get_next_osdev(topology, osdev)) != NULL) { + if (HWLOC_OBJ_OSDEV_GPU == osdev->attr->osdev.type + && osdev->name + && sscanf(osdev->name, ":%u.%u", &x, &y) == 2 + && port == x && device == y) + return osdev; + } + errno = EINVAL; + return NULL; +} + +/** \brief Get the hwloc OS device object corresponding to the + * OpenGL display given by name. + * + * Return the OS device object describing the OpenGL display + * whose name is \p name, built as ":port.device" such as ":0.0" . + * Return NULL if there is none. + * + * The topology \p topology does not necessarily have to match the current + * machine. For instance the topology may be an XML import of a remote host. + * I/O devices detection and the GL component must be enabled in the topology. + * + * \note The corresponding PCI device object can be obtained by looking + * at the OS device parent object (unless PCI devices are filtered out). + */ +static __hwloc_inline hwloc_obj_t +hwloc_gl_get_display_osdev_by_name(hwloc_topology_t topology, + const char *name) +{ + hwloc_obj_t osdev = NULL; + while ((osdev = hwloc_get_next_osdev(topology, osdev)) != NULL) { + if (HWLOC_OBJ_OSDEV_GPU == osdev->attr->osdev.type + && osdev->name + && !strcmp(name, osdev->name)) + return osdev; + } + errno = EINVAL; + return NULL; +} + +/** \brief Get the OpenGL display port and device corresponding + * to the given hwloc OS object. + * + * Return the OpenGL display port (server) in \p port and device (screen) + * in \p screen that correspond to the given hwloc OS device object. + * Return \c -1 if there is none. + * + * The topology \p topology does not necessarily have to match the current + * machine. For instance the topology may be an XML import of a remote host. + * I/O devices detection and the GL component must be enabled in the topology. + */ +static __hwloc_inline int +hwloc_gl_get_display_by_osdev(hwloc_topology_t topology __hwloc_attribute_unused, + hwloc_obj_t osdev, + unsigned *port, unsigned *device) +{ + unsigned x = -1, y = -1; + if (HWLOC_OBJ_OSDEV_GPU == osdev->attr->osdev.type + && sscanf(osdev->name, ":%u.%u", &x, &y) == 2) { + *port = x; + *device = y; + return 0; + } + errno = EINVAL; + return -1; +} + +/** @} */ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* HWLOC_GL_H */ + diff --git a/src/3rdparty/hwloc/include/hwloc/glibc-sched.h b/src/3rdparty/hwloc/include/hwloc/glibc-sched.h new file mode 100644 index 00000000..1f9ba7cd --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/glibc-sched.h @@ -0,0 +1,125 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2013 inria. All rights reserved. + * Copyright © 2009-2011 Université Bordeaux + * Copyright © 2011 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +/** \file + * \brief Macros to help interaction between hwloc and glibc scheduling routines. + * + * Applications that use both hwloc and glibc scheduling routines such as + * sched_getaffinity() or pthread_attr_setaffinity_np() may want to include + * this file so as to ease conversion between their respective types. + */ + +#ifndef HWLOC_GLIBC_SCHED_H +#define HWLOC_GLIBC_SCHED_H + +#include +#include +#include + +#if !defined _GNU_SOURCE || !defined _SCHED_H || (!defined CPU_SETSIZE && !defined sched_priority) +#error Please make sure to include sched.h before including glibc-sched.h, and define _GNU_SOURCE before any inclusion of sched.h +#endif + + +#ifdef __cplusplus +extern "C" { +#endif + + +#ifdef HWLOC_HAVE_CPU_SET + + +/** \defgroup hwlocality_glibc_sched Interoperability with glibc sched affinity + * + * This interface offers ways to convert between hwloc cpusets and glibc cpusets + * such as those manipulated by sched_getaffinity() or pthread_attr_setaffinity_np(). + * + * \note Topology \p topology must match the current machine. + * + * @{ + */ + + +/** \brief Convert hwloc CPU set \p toposet into glibc sched affinity CPU set \p schedset + * + * This function may be used before calling sched_setaffinity or any other function + * that takes a cpu_set_t as input parameter. + * + * \p schedsetsize should be sizeof(cpu_set_t) unless \p schedset was dynamically allocated with CPU_ALLOC + */ +static __hwloc_inline int +hwloc_cpuset_to_glibc_sched_affinity(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t hwlocset, + cpu_set_t *schedset, size_t schedsetsize) +{ +#ifdef CPU_ZERO_S + unsigned cpu; + CPU_ZERO_S(schedsetsize, schedset); + hwloc_bitmap_foreach_begin(cpu, hwlocset) + CPU_SET_S(cpu, schedsetsize, schedset); + hwloc_bitmap_foreach_end(); +#else /* !CPU_ZERO_S */ + unsigned cpu; + CPU_ZERO(schedset); + assert(schedsetsize == sizeof(cpu_set_t)); + hwloc_bitmap_foreach_begin(cpu, hwlocset) + CPU_SET(cpu, schedset); + hwloc_bitmap_foreach_end(); +#endif /* !CPU_ZERO_S */ + return 0; +} + +/** \brief Convert glibc sched affinity CPU set \p schedset into hwloc CPU set + * + * This function may be used before calling sched_setaffinity or any other function + * that takes a cpu_set_t as input parameter. + * + * \p schedsetsize should be sizeof(cpu_set_t) unless \p schedset was dynamically allocated with CPU_ALLOC + */ +static __hwloc_inline int +hwloc_cpuset_from_glibc_sched_affinity(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_cpuset_t hwlocset, + const cpu_set_t *schedset, size_t schedsetsize) +{ + int cpu; +#ifdef CPU_ZERO_S + int count; +#endif + hwloc_bitmap_zero(hwlocset); +#ifdef CPU_ZERO_S + count = CPU_COUNT_S(schedsetsize, schedset); + cpu = 0; + while (count) { + if (CPU_ISSET_S(cpu, schedsetsize, schedset)) { + hwloc_bitmap_set(hwlocset, cpu); + count--; + } + cpu++; + } +#else /* !CPU_ZERO_S */ + /* sched.h does not support dynamic cpu_set_t (introduced in glibc 2.7), + * assume we have a very old interface without CPU_COUNT (added in 2.6) + */ + assert(schedsetsize == sizeof(cpu_set_t)); + for(cpu=0; cpu +#include + + +#ifdef __cplusplus +extern "C" { +#endif + + +/** \defgroup hwlocality_helper_find_inside Finding Objects inside a CPU set + * @{ + */ + +/** \brief Get the first largest object included in the given cpuset \p set. + * + * \return the first object that is included in \p set and whose parent is not. + * + * This is convenient for iterating over all largest objects within a CPU set + * by doing a loop getting the first largest object and clearing its CPU set + * from the remaining CPU set. + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_first_largest_obj_inside_cpuset(hwloc_topology_t topology, hwloc_const_cpuset_t set) +{ + hwloc_obj_t obj = hwloc_get_root_obj(topology); + if (!hwloc_bitmap_intersects(obj->cpuset, set)) + return NULL; + while (!hwloc_bitmap_isincluded(obj->cpuset, set)) { + /* while the object intersects without being included, look at its children */ + hwloc_obj_t child = obj->first_child; + while (child) { + if (hwloc_bitmap_intersects(child->cpuset, set)) + break; + child = child->next_sibling; + } + if (!child) + /* no child intersects, return their father */ + return obj; + /* found one intersecting child, look at its children */ + obj = child; + } + /* obj is included, return it */ + return obj; +} + +/** \brief Get the set of largest objects covering exactly a given cpuset \p set + * + * \return the number of objects returned in \p objs. + */ +HWLOC_DECLSPEC int hwloc_get_largest_objs_inside_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set, + hwloc_obj_t * __hwloc_restrict objs, int max); + +/** \brief Return the next object at depth \p depth included in CPU set \p set. + * + * If \p prev is \c NULL, return the first object at depth \p depth + * included in \p set. The next invokation should pass the previous + * return value in \p prev so as to obtain the next object in \p set. + * + * \note Objects with empty CPU sets are ignored + * (otherwise they would be considered included in any given set). + * + * \note This function cannot work if objects at the given depth do + * not have CPU sets (I/O or Misc objects). + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_next_obj_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set, + int depth, hwloc_obj_t prev) +{ + hwloc_obj_t next = hwloc_get_next_obj_by_depth(topology, depth, prev); + if (!next) + return NULL; + while (next && (hwloc_bitmap_iszero(next->cpuset) || !hwloc_bitmap_isincluded(next->cpuset, set))) + next = next->next_cousin; + return next; +} + +/** \brief Return the next object of type \p type included in CPU set \p set. + * + * If there are multiple or no depth for given type, return \c NULL + * and let the caller fallback to + * hwloc_get_next_obj_inside_cpuset_by_depth(). + * + * \note Objects with empty CPU sets are ignored + * (otherwise they would be considered included in any given set). + * + * \note This function cannot work if objects of the given type do + * not have CPU sets (I/O or Misc objects). + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_next_obj_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set, + hwloc_obj_type_t type, hwloc_obj_t prev) +{ + int depth = hwloc_get_type_depth(topology, type); + if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE) + return NULL; + return hwloc_get_next_obj_inside_cpuset_by_depth(topology, set, depth, prev); +} + +/** \brief Return the (logically) \p idx -th object at depth \p depth included in CPU set \p set. + * + * \note Objects with empty CPU sets are ignored + * (otherwise they would be considered included in any given set). + * + * \note This function cannot work if objects at the given depth do + * not have CPU sets (I/O or Misc objects). + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_obj_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set, + int depth, unsigned idx) __hwloc_attribute_pure; +static __hwloc_inline hwloc_obj_t +hwloc_get_obj_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set, + int depth, unsigned idx) +{ + hwloc_obj_t obj = hwloc_get_obj_by_depth (topology, depth, 0); + unsigned count = 0; + if (!obj) + return NULL; + while (obj) { + if (!hwloc_bitmap_iszero(obj->cpuset) && hwloc_bitmap_isincluded(obj->cpuset, set)) { + if (count == idx) + return obj; + count++; + } + obj = obj->next_cousin; + } + return NULL; +} + +/** \brief Return the \p idx -th object of type \p type included in CPU set \p set. + * + * If there are multiple or no depth for given type, return \c NULL + * and let the caller fallback to + * hwloc_get_obj_inside_cpuset_by_depth(). + * + * \note Objects with empty CPU sets are ignored + * (otherwise they would be considered included in any given set). + * + * \note This function cannot work if objects of the given type do + * not have CPU sets (I/O or Misc objects). + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_obj_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set, + hwloc_obj_type_t type, unsigned idx) __hwloc_attribute_pure; +static __hwloc_inline hwloc_obj_t +hwloc_get_obj_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set, + hwloc_obj_type_t type, unsigned idx) +{ + int depth = hwloc_get_type_depth(topology, type); + if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE) + return NULL; + return hwloc_get_obj_inside_cpuset_by_depth(topology, set, depth, idx); +} + +/** \brief Return the number of objects at depth \p depth included in CPU set \p set. + * + * \note Objects with empty CPU sets are ignored + * (otherwise they would be considered included in any given set). + * + * \note This function cannot work if objects at the given depth do + * not have CPU sets (I/O or Misc objects). + */ +static __hwloc_inline unsigned +hwloc_get_nbobjs_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set, + int depth) __hwloc_attribute_pure; +static __hwloc_inline unsigned +hwloc_get_nbobjs_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set, + int depth) +{ + hwloc_obj_t obj = hwloc_get_obj_by_depth (topology, depth, 0); + unsigned count = 0; + if (!obj) + return 0; + while (obj) { + if (!hwloc_bitmap_iszero(obj->cpuset) && hwloc_bitmap_isincluded(obj->cpuset, set)) + count++; + obj = obj->next_cousin; + } + return count; +} + +/** \brief Return the number of objects of type \p type included in CPU set \p set. + * + * If no object for that type exists inside CPU set \p set, 0 is + * returned. If there are several levels with objects of that type + * inside CPU set \p set, -1 is returned. + * + * \note Objects with empty CPU sets are ignored + * (otherwise they would be considered included in any given set). + * + * \note This function cannot work if objects of the given type do + * not have CPU sets (I/O objects). + */ +static __hwloc_inline int +hwloc_get_nbobjs_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set, + hwloc_obj_type_t type) __hwloc_attribute_pure; +static __hwloc_inline int +hwloc_get_nbobjs_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set, + hwloc_obj_type_t type) +{ + int depth = hwloc_get_type_depth(topology, type); + if (depth == HWLOC_TYPE_DEPTH_UNKNOWN) + return 0; + if (depth == HWLOC_TYPE_DEPTH_MULTIPLE) + return -1; /* FIXME: agregate nbobjs from different levels? */ + return (int) hwloc_get_nbobjs_inside_cpuset_by_depth(topology, set, depth); +} + +/** \brief Return the logical index among the objects included in CPU set \p set. + * + * Consult all objects in the same level as \p obj and inside CPU set \p set + * in the logical order, and return the index of \p obj within them. + * If \p set covers the entire topology, this is the logical index of \p obj. + * Otherwise, this is similar to a logical index within the part of the topology + * defined by CPU set \p set. + * + * \note Objects with empty CPU sets are ignored + * (otherwise they would be considered included in any given set). + * + * \note This function cannot work if obj does not have CPU sets (I/O objects). + */ +static __hwloc_inline int +hwloc_get_obj_index_inside_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set, + hwloc_obj_t obj) __hwloc_attribute_pure; +static __hwloc_inline int +hwloc_get_obj_index_inside_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set, + hwloc_obj_t obj) +{ + int idx = 0; + if (!hwloc_bitmap_isincluded(obj->cpuset, set)) + return -1; + /* count how many objects are inside the cpuset on the way from us to the beginning of the level */ + while ((obj = obj->prev_cousin) != NULL) + if (!hwloc_bitmap_iszero(obj->cpuset) && hwloc_bitmap_isincluded(obj->cpuset, set)) + idx++; + return idx; +} + +/** @} */ + + + +/** \defgroup hwlocality_helper_find_covering Finding Objects covering at least CPU set + * @{ + */ + +/** \brief Get the child covering at least CPU set \p set. + * + * \return \c NULL if no child matches or if \p set is empty. + * + * \note This function cannot work if parent does not have a CPU set (I/O or Misc objects). + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_child_covering_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set, + hwloc_obj_t parent) __hwloc_attribute_pure; +static __hwloc_inline hwloc_obj_t +hwloc_get_child_covering_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set, + hwloc_obj_t parent) +{ + hwloc_obj_t child; + if (hwloc_bitmap_iszero(set)) + return NULL; + child = parent->first_child; + while (child) { + if (child->cpuset && hwloc_bitmap_isincluded(set, child->cpuset)) + return child; + child = child->next_sibling; + } + return NULL; +} + +/** \brief Get the lowest object covering at least CPU set \p set + * + * \return \c NULL if no object matches or if \p set is empty. + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_obj_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set) __hwloc_attribute_pure; +static __hwloc_inline hwloc_obj_t +hwloc_get_obj_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set) +{ + struct hwloc_obj *current = hwloc_get_root_obj(topology); + if (hwloc_bitmap_iszero(set) || !hwloc_bitmap_isincluded(set, current->cpuset)) + return NULL; + while (1) { + hwloc_obj_t child = hwloc_get_child_covering_cpuset(topology, set, current); + if (!child) + return current; + current = child; + } +} + +/** \brief Iterate through same-depth objects covering at least CPU set \p set + * + * If object \p prev is \c NULL, return the first object at depth \p + * depth covering at least part of CPU set \p set. The next + * invokation should pass the previous return value in \p prev so as + * to obtain the next object covering at least another part of \p set. + * + * \note This function cannot work if objects at the given depth do + * not have CPU sets (I/O or Misc objects). + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_next_obj_covering_cpuset_by_depth(hwloc_topology_t topology, hwloc_const_cpuset_t set, + int depth, hwloc_obj_t prev) +{ + hwloc_obj_t next = hwloc_get_next_obj_by_depth(topology, depth, prev); + if (!next) + return NULL; + while (next && !hwloc_bitmap_intersects(set, next->cpuset)) + next = next->next_cousin; + return next; +} + +/** \brief Iterate through same-type objects covering at least CPU set \p set + * + * If object \p prev is \c NULL, return the first object of type \p + * type covering at least part of CPU set \p set. The next invokation + * should pass the previous return value in \p prev so as to obtain + * the next object of type \p type covering at least another part of + * \p set. + * + * If there are no or multiple depths for type \p type, \c NULL is returned. + * The caller may fallback to hwloc_get_next_obj_covering_cpuset_by_depth() + * for each depth. + * + * \note This function cannot work if objects of the given type do + * not have CPU sets (I/O or Misc objects). + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_next_obj_covering_cpuset_by_type(hwloc_topology_t topology, hwloc_const_cpuset_t set, + hwloc_obj_type_t type, hwloc_obj_t prev) +{ + int depth = hwloc_get_type_depth(topology, type); + if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE) + return NULL; + return hwloc_get_next_obj_covering_cpuset_by_depth(topology, set, depth, prev); +} + +/** @} */ + + + +/** \defgroup hwlocality_helper_ancestors Looking at Ancestor and Child Objects + * @{ + * + * Be sure to see the figure in \ref termsanddefs that shows a + * complete topology tree, including depths, child/sibling/cousin + * relationships, and an example of an asymmetric topology where one + * package has fewer caches than its peers. + */ + +/** \brief Returns the ancestor object of \p obj at depth \p depth. + * + * \note \p depth should not be the depth of PU or NUMA objects + * since they are ancestors of no objects (except Misc or I/O). + * This function rather expects an intermediate level depth, + * such as the depth of Packages, Cores, or Caches. + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_ancestor_obj_by_depth (hwloc_topology_t topology __hwloc_attribute_unused, int depth, hwloc_obj_t obj) __hwloc_attribute_pure; +static __hwloc_inline hwloc_obj_t +hwloc_get_ancestor_obj_by_depth (hwloc_topology_t topology __hwloc_attribute_unused, int depth, hwloc_obj_t obj) +{ + hwloc_obj_t ancestor = obj; + if (obj->depth < depth) + return NULL; + while (ancestor && ancestor->depth > depth) + ancestor = ancestor->parent; + return ancestor; +} + +/** \brief Returns the ancestor object of \p obj with type \p type. + * + * \note \p type should not be ::HWLOC_OBJ_PU or ::HWLOC_OBJ_NUMANODE + * since these objects are ancestors of no objects (except Misc or I/O). + * This function rather expects an intermediate object type, + * such as ::HWLOC_OBJ_PACKAGE, ::HWLOC_OBJ_CORE, etc. + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_ancestor_obj_by_type (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_type_t type, hwloc_obj_t obj) __hwloc_attribute_pure; +static __hwloc_inline hwloc_obj_t +hwloc_get_ancestor_obj_by_type (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_type_t type, hwloc_obj_t obj) +{ + hwloc_obj_t ancestor = obj->parent; + while (ancestor && ancestor->type != type) + ancestor = ancestor->parent; + return ancestor; +} + +/** \brief Returns the common parent object to objects \p obj1 and \p obj2 */ +static __hwloc_inline hwloc_obj_t +hwloc_get_common_ancestor_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj1, hwloc_obj_t obj2) __hwloc_attribute_pure; +static __hwloc_inline hwloc_obj_t +hwloc_get_common_ancestor_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj1, hwloc_obj_t obj2) +{ + /* the loop isn't so easy since intermediate ancestors may have + * different depth, causing us to alternate between using obj1->parent + * and obj2->parent. Also, even if at some point we find ancestors of + * of the same depth, their ancestors may have different depth again. + */ + while (obj1 != obj2) { + while (obj1->depth > obj2->depth) + obj1 = obj1->parent; + while (obj2->depth > obj1->depth) + obj2 = obj2->parent; + if (obj1 != obj2 && obj1->depth == obj2->depth) { + obj1 = obj1->parent; + obj2 = obj2->parent; + } + } + return obj1; +} + +/** \brief Returns true if \p obj is inside the subtree beginning with ancestor object \p subtree_root. + * + * \note This function cannot work if \p obj and \p subtree_root objects do + * not have CPU sets (I/O or Misc objects). + */ +static __hwloc_inline int +hwloc_obj_is_in_subtree (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj, hwloc_obj_t subtree_root) __hwloc_attribute_pure; +static __hwloc_inline int +hwloc_obj_is_in_subtree (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj, hwloc_obj_t subtree_root) +{ + return obj->cpuset && subtree_root->cpuset && hwloc_bitmap_isincluded(obj->cpuset, subtree_root->cpuset); +} + +/** \brief Return the next child. + * + * Return the next child among the normal children list, + * then among the memory children list, then among the I/O + * children list, then among the Misc children list. + * + * If \p prev is \c NULL, return the first child. + * + * Return \c NULL when there is no next child. + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_next_child (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t parent, hwloc_obj_t prev) +{ + hwloc_obj_t obj; + int state = 0; + if (prev) { + if (prev->type == HWLOC_OBJ_MISC) + state = 3; + else if (prev->type == HWLOC_OBJ_BRIDGE || prev->type == HWLOC_OBJ_PCI_DEVICE || prev->type == HWLOC_OBJ_OS_DEVICE) + state = 2; + else if (prev->type == HWLOC_OBJ_NUMANODE) + state = 1; + obj = prev->next_sibling; + } else { + obj = parent->first_child; + } + if (!obj && state == 0) { + obj = parent->memory_first_child; + state = 1; + } + if (!obj && state == 1) { + obj = parent->io_first_child; + state = 2; + } + if (!obj && state == 2) { + obj = parent->misc_first_child; + state = 3; + } + return obj; +} + +/** @} */ + + + +/** \defgroup hwlocality_helper_types Kinds of object Type + * @{ + * + * Each object type is + * either Normal (i.e. hwloc_obj_type_is_normal() returns 1), + * or Memory (i.e. hwloc_obj_type_is_memory() returns 1) + * or I/O (i.e. hwloc_obj_type_is_io() returns 1) + * or Misc (i.e. equal to ::HWLOC_OBJ_MISC). + * It cannot be of more than one of these kinds. + */ + +/** \brief Check whether an object type is Normal. + * + * Normal objects are objects of the main CPU hierarchy + * (Machine, Package, Core, PU, CPU caches, etc.), + * but they are not NUMA nodes, I/O devices or Misc objects. + * + * They are attached to parent as Normal children, + * not as Memory, I/O or Misc children. + * + * \return 1 if an object of type \p type is a Normal object, 0 otherwise. + */ +HWLOC_DECLSPEC int +hwloc_obj_type_is_normal(hwloc_obj_type_t type); + +/** \brief Check whether an object type is I/O. + * + * I/O objects are objects attached to their parents + * in the I/O children list. + * This current includes Bridges, PCI and OS devices. + * + * \return 1 if an object of type \p type is a I/O object, 0 otherwise. + */ +HWLOC_DECLSPEC int +hwloc_obj_type_is_io(hwloc_obj_type_t type); + +/** \brief Check whether an object type is Memory. + * + * Memory objects are objects attached to their parents + * in the Memory children list. + * This current only includes NUMA nodes. + * + * \return 1 if an object of type \p type is a Memory object, 0 otherwise. + */ +HWLOC_DECLSPEC int +hwloc_obj_type_is_memory(hwloc_obj_type_t type); + +/** \brief Check whether an object type is a Cache (Data, Unified or Instruction). + * + * \return 1 if an object of type \p type is a Cache, 0 otherwise. + */ +HWLOC_DECLSPEC int +hwloc_obj_type_is_cache(hwloc_obj_type_t type); + +/** \brief Check whether an object type is a Data or Unified Cache. + * + * \return 1 if an object of type \p type is a Data or Unified Cache, 0 otherwise. + */ +HWLOC_DECLSPEC int +hwloc_obj_type_is_dcache(hwloc_obj_type_t type); + +/** \brief Check whether an object type is a Instruction Cache, + * + * \return 1 if an object of type \p type is a Instruction Cache, 0 otherwise. + */ +HWLOC_DECLSPEC int +hwloc_obj_type_is_icache(hwloc_obj_type_t type); + +/** @} */ + + + +/** \defgroup hwlocality_helper_find_cache Looking at Cache Objects + * @{ + */ + +/** \brief Find the depth of cache objects matching cache level and type. + * + * Return the depth of the topology level that contains cache objects + * whose attributes match \p cachelevel and \p cachetype. + + * This function is identical to calling hwloc_get_type_depth() with the + * corresponding type such as ::HWLOC_OBJ_L1ICACHE, except that it may + * also return a Unified cache when looking for an instruction cache. + * + * If no cache level matches, ::HWLOC_TYPE_DEPTH_UNKNOWN is returned. + * + * If \p cachetype is ::HWLOC_OBJ_CACHE_UNIFIED, the depth of the + * unique matching unified cache level is returned. + * + * If \p cachetype is ::HWLOC_OBJ_CACHE_DATA or ::HWLOC_OBJ_CACHE_INSTRUCTION, + * either a matching cache, or a unified cache is returned. + * + * If \p cachetype is \c -1, it is ignored and multiple levels may + * match. The function returns either the depth of a uniquely matching + * level or ::HWLOC_TYPE_DEPTH_MULTIPLE. + */ +static __hwloc_inline int +hwloc_get_cache_type_depth (hwloc_topology_t topology, + unsigned cachelevel, hwloc_obj_cache_type_t cachetype) +{ + int depth; + int found = HWLOC_TYPE_DEPTH_UNKNOWN; + for (depth=0; ; depth++) { + hwloc_obj_t obj = hwloc_get_obj_by_depth(topology, depth, 0); + if (!obj) + break; + if (!hwloc_obj_type_is_dcache(obj->type) || obj->attr->cache.depth != cachelevel) + /* doesn't match, try next depth */ + continue; + if (cachetype == (hwloc_obj_cache_type_t) -1) { + if (found != HWLOC_TYPE_DEPTH_UNKNOWN) { + /* second match, return MULTIPLE */ + return HWLOC_TYPE_DEPTH_MULTIPLE; + } + /* first match, mark it as found */ + found = depth; + continue; + } + if (obj->attr->cache.type == cachetype || obj->attr->cache.type == HWLOC_OBJ_CACHE_UNIFIED) + /* exact match (either unified is alone, or we match instruction or data), return immediately */ + return depth; + } + /* went to the bottom, return what we found */ + return found; +} + +/** \brief Get the first data (or unified) cache covering a cpuset \p set + * + * \return \c NULL if no cache matches. + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_cache_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set) __hwloc_attribute_pure; +static __hwloc_inline hwloc_obj_t +hwloc_get_cache_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set) +{ + hwloc_obj_t current = hwloc_get_obj_covering_cpuset(topology, set); + while (current) { + if (hwloc_obj_type_is_dcache(current->type)) + return current; + current = current->parent; + } + return NULL; +} + +/** \brief Get the first data (or unified) cache shared between an object and somebody else. + * + * \return \c NULL if no cache matches or if an invalid object is given. + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_shared_cache_covering_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj) __hwloc_attribute_pure; +static __hwloc_inline hwloc_obj_t +hwloc_get_shared_cache_covering_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj) +{ + hwloc_obj_t current = obj->parent; + if (!obj->cpuset) + return NULL; + while (current) { + if (!hwloc_bitmap_isequal(current->cpuset, obj->cpuset) + && hwloc_obj_type_is_dcache(current->type)) + return current; + current = current->parent; + } + return NULL; +} + +/** @} */ + + + +/** \defgroup hwlocality_helper_find_misc Finding objects, miscellaneous helpers + * @{ + * + * Be sure to see the figure in \ref termsanddefs that shows a + * complete topology tree, including depths, child/sibling/cousin + * relationships, and an example of an asymmetric topology where one + * package has fewer caches than its peers. + */ + +/** \brief Returns the object of type ::HWLOC_OBJ_PU with \p os_index. + * + * This function is useful for converting a CPU set into the PU + * objects it contains. + * When retrieving the current binding (e.g. with hwloc_get_cpubind()), + * one may iterate over the bits of the resulting CPU set with + * hwloc_bitmap_foreach_begin(), and find the corresponding PUs + * with this function. + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_pu_obj_by_os_index(hwloc_topology_t topology, unsigned os_index) __hwloc_attribute_pure; +static __hwloc_inline hwloc_obj_t +hwloc_get_pu_obj_by_os_index(hwloc_topology_t topology, unsigned os_index) +{ + hwloc_obj_t obj = NULL; + while ((obj = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_PU, obj)) != NULL) + if (obj->os_index == os_index) + return obj; + return NULL; +} + +/** \brief Returns the object of type ::HWLOC_OBJ_NUMANODE with \p os_index. + * + * This function is useful for converting a nodeset into the NUMA node + * objects it contains. + * When retrieving the current binding (e.g. with hwloc_get_membind() with HWLOC_MEMBIND_BYNODESET), + * one may iterate over the bits of the resulting nodeset with + * hwloc_bitmap_foreach_begin(), and find the corresponding NUMA nodes + * with this function. + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_numanode_obj_by_os_index(hwloc_topology_t topology, unsigned os_index) __hwloc_attribute_pure; +static __hwloc_inline hwloc_obj_t +hwloc_get_numanode_obj_by_os_index(hwloc_topology_t topology, unsigned os_index) +{ + hwloc_obj_t obj = NULL; + while ((obj = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_NUMANODE, obj)) != NULL) + if (obj->os_index == os_index) + return obj; + return NULL; +} + +/** \brief Do a depth-first traversal of the topology to find and sort + * + * all objects that are at the same depth than \p src. + * Report in \p objs up to \p max physically closest ones to \p src. + * + * \return the number of objects returned in \p objs. + * + * \return 0 if \p src is an I/O object. + * + * \note This function requires the \p src object to have a CPU set. + */ +/* TODO: rather provide an iterator? Provide a way to know how much should be allocated? By returning the total number of objects instead? */ +HWLOC_DECLSPEC unsigned hwloc_get_closest_objs (hwloc_topology_t topology, hwloc_obj_t src, hwloc_obj_t * __hwloc_restrict objs, unsigned max); + +/** \brief Find an object below another object, both specified by types and indexes. + * + * Start from the top system object and find object of type \p type1 + * and logical index \p idx1. Then look below this object and find another + * object of type \p type2 and logical index \p idx2. Indexes are specified + * within the parent, not withing the entire system. + * + * For instance, if type1 is PACKAGE, idx1 is 2, type2 is CORE and idx2 + * is 3, return the fourth core object below the third package. + * + * \note This function requires these objects to have a CPU set. + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_obj_below_by_type (hwloc_topology_t topology, + hwloc_obj_type_t type1, unsigned idx1, + hwloc_obj_type_t type2, unsigned idx2) __hwloc_attribute_pure; +static __hwloc_inline hwloc_obj_t +hwloc_get_obj_below_by_type (hwloc_topology_t topology, + hwloc_obj_type_t type1, unsigned idx1, + hwloc_obj_type_t type2, unsigned idx2) +{ + hwloc_obj_t obj; + obj = hwloc_get_obj_by_type (topology, type1, idx1); + if (!obj) + return NULL; + return hwloc_get_obj_inside_cpuset_by_type(topology, obj->cpuset, type2, idx2); +} + +/** \brief Find an object below a chain of objects specified by types and indexes. + * + * This is a generalized version of hwloc_get_obj_below_by_type(). + * + * Arrays \p typev and \p idxv must contain \p nr types and indexes. + * + * Start from the top system object and walk the arrays \p typev and \p idxv. + * For each type and logical index couple in the arrays, look under the previously found + * object to find the index-th object of the given type. + * Indexes are specified within the parent, not withing the entire system. + * + * For instance, if nr is 3, typev contains NODE, PACKAGE and CORE, + * and idxv contains 0, 1 and 2, return the third core object below + * the second package below the first NUMA node. + * + * \note This function requires all these objects and the root object + * to have a CPU set. + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_obj_below_array_by_type (hwloc_topology_t topology, int nr, hwloc_obj_type_t *typev, unsigned *idxv) __hwloc_attribute_pure; +static __hwloc_inline hwloc_obj_t +hwloc_get_obj_below_array_by_type (hwloc_topology_t topology, int nr, hwloc_obj_type_t *typev, unsigned *idxv) +{ + hwloc_obj_t obj = hwloc_get_root_obj(topology); + int i; + for(i=0; icpuset, typev[i], idxv[i]); + } + return obj; +} + +/** @} */ + + + +/** \defgroup hwlocality_helper_distribute Distributing items over a topology + * @{ + */ + +/** \brief Flags to be given to hwloc_distrib(). + */ +enum hwloc_distrib_flags_e { + /** \brief Distrib in reverse order, starting from the last objects. + * \hideinitializer + */ + HWLOC_DISTRIB_FLAG_REVERSE = (1UL<<0) +}; + +/** \brief Distribute \p n items over the topology under \p roots + * + * Array \p set will be filled with \p n cpusets recursively distributed + * linearly over the topology under objects \p roots, down to depth \p until + * (which can be INT_MAX to distribute down to the finest level). + * + * \p n_roots is usually 1 and \p roots only contains the topology root object + * so as to distribute over the entire topology. + * + * This is typically useful when an application wants to distribute \p n + * threads over a machine, giving each of them as much private cache as + * possible and keeping them locally in number order. + * + * The caller may typically want to also call hwloc_bitmap_singlify() + * before binding a thread so that it does not move at all. + * + * \p flags should be 0 or a OR'ed set of ::hwloc_distrib_flags_e. + * + * \note This function requires the \p roots objects to have a CPU set. + * + * \note This function replaces the now deprecated hwloc_distribute() + * and hwloc_distributev() functions. + */ +static __hwloc_inline int +hwloc_distrib(hwloc_topology_t topology, + hwloc_obj_t *roots, unsigned n_roots, + hwloc_cpuset_t *set, + unsigned n, + int until, unsigned long flags) +{ + unsigned i; + unsigned tot_weight; + unsigned given, givenweight; + hwloc_cpuset_t *cpusetp = set; + + if (flags & ~HWLOC_DISTRIB_FLAG_REVERSE) { + errno = EINVAL; + return -1; + } + + tot_weight = 0; + for (i = 0; i < n_roots; i++) + tot_weight += (unsigned) hwloc_bitmap_weight(roots[i]->cpuset); + + for (i = 0, given = 0, givenweight = 0; i < n_roots; i++) { + unsigned chunk, weight; + hwloc_obj_t root = roots[flags & HWLOC_DISTRIB_FLAG_REVERSE ? n_roots-1-i : i]; + hwloc_cpuset_t cpuset = root->cpuset; + if (root->type == HWLOC_OBJ_NUMANODE) + /* NUMANodes have same cpuset as their parent, but we need normal objects below */ + root = root->parent; + weight = (unsigned) hwloc_bitmap_weight(cpuset); + if (!weight) + continue; + /* Give to root a chunk proportional to its weight. + * If previous chunks got rounded-up, we may get a bit less. */ + chunk = (( (givenweight+weight) * n + tot_weight-1) / tot_weight) + - (( givenweight * n + tot_weight-1) / tot_weight); + if (!root->arity || chunk <= 1 || root->depth >= until) { + /* We can't split any more, put everything there. */ + if (chunk) { + /* Fill cpusets with ours */ + unsigned j; + for (j=0; j < chunk; j++) + cpusetp[j] = hwloc_bitmap_dup(cpuset); + } else { + /* We got no chunk, just merge our cpuset to a previous one + * (the first chunk cannot be empty) + * so that this root doesn't get ignored. + */ + assert(given); + hwloc_bitmap_or(cpusetp[-1], cpusetp[-1], cpuset); + } + } else { + /* Still more to distribute, recurse into children */ + hwloc_distrib(topology, root->children, root->arity, cpusetp, chunk, until, flags); + } + cpusetp += chunk; + given += chunk; + givenweight += weight; + } + + return 0; +} + +/** @} */ + + + +/** \defgroup hwlocality_helper_topology_sets CPU and node sets of entire topologies + * @{ + */ + +/** \brief Get complete CPU set + * + * \return the complete CPU set of logical processors of the system. + * + * \note The returned cpuset is not newly allocated and should thus not be + * changed or freed; hwloc_bitmap_dup() must be used to obtain a local copy. + * + * \note This is equivalent to retrieving the root object complete CPU-set. + */ +HWLOC_DECLSPEC hwloc_const_cpuset_t +hwloc_topology_get_complete_cpuset(hwloc_topology_t topology) __hwloc_attribute_pure; + +/** \brief Get topology CPU set + * + * \return the CPU set of logical processors of the system for which hwloc + * provides topology information. This is equivalent to the cpuset of the + * system object. + * + * \note The returned cpuset is not newly allocated and should thus not be + * changed or freed; hwloc_bitmap_dup() must be used to obtain a local copy. + * + * \note This is equivalent to retrieving the root object complete CPU-set. + */ +HWLOC_DECLSPEC hwloc_const_cpuset_t +hwloc_topology_get_topology_cpuset(hwloc_topology_t topology) __hwloc_attribute_pure; + +/** \brief Get allowed CPU set + * + * \return the CPU set of allowed logical processors of the system. + * + * \note If the topology flag ::HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM was not set, + * this is identical to hwloc_topology_get_topology_cpuset(), which means + * all PUs are allowed. + * + * \note If ::HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM was set, applying + * hwloc_bitmap_intersects() on the result of this function and on an object + * cpuset checks whether there are allowed PUs inside that object. + * Applying hwloc_bitmap_and() returns the list of these allowed PUs. + * + * \note The returned cpuset is not newly allocated and should thus not be + * changed or freed, hwloc_bitmap_dup() must be used to obtain a local copy. + */ +HWLOC_DECLSPEC hwloc_const_cpuset_t +hwloc_topology_get_allowed_cpuset(hwloc_topology_t topology) __hwloc_attribute_pure; + +/** \brief Get complete node set + * + * \return the complete node set of memory of the system. + * + * \note The returned nodeset is not newly allocated and should thus not be + * changed or freed; hwloc_bitmap_dup() must be used to obtain a local copy. + * + * \note This is equivalent to retrieving the root object complete CPU-set. + */ +HWLOC_DECLSPEC hwloc_const_nodeset_t +hwloc_topology_get_complete_nodeset(hwloc_topology_t topology) __hwloc_attribute_pure; + +/** \brief Get topology node set + * + * \return the node set of memory of the system for which hwloc + * provides topology information. This is equivalent to the nodeset of the + * system object. + * + * \note The returned nodeset is not newly allocated and should thus not be + * changed or freed; hwloc_bitmap_dup() must be used to obtain a local copy. + * + * \note This is equivalent to retrieving the root object complete CPU-set. + */ +HWLOC_DECLSPEC hwloc_const_nodeset_t +hwloc_topology_get_topology_nodeset(hwloc_topology_t topology) __hwloc_attribute_pure; + +/** \brief Get allowed node set + * + * \return the node set of allowed memory of the system. + * + * \note If the topology flag ::HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM was not set, + * this is identical to hwloc_topology_get_topology_nodeset(), which means + * all NUMA nodes are allowed. + * + * \note If ::HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM was set, applying + * hwloc_bitmap_intersects() on the result of this function and on an object + * nodeset checks whether there are allowed NUMA nodes inside that object. + * Applying hwloc_bitmap_and() returns the list of these allowed NUMA nodes. + * + * \note The returned nodeset is not newly allocated and should thus not be + * changed or freed, hwloc_bitmap_dup() must be used to obtain a local copy. + */ +HWLOC_DECLSPEC hwloc_const_nodeset_t +hwloc_topology_get_allowed_nodeset(hwloc_topology_t topology) __hwloc_attribute_pure; + +/** @} */ + + + +/** \defgroup hwlocality_helper_nodeset_convert Converting between CPU sets and node sets + * + * @{ + */ + +/** \brief Convert a CPU set into a NUMA node set and handle non-NUMA cases + * + * If some NUMA nodes have no CPUs at all, this function never sets their + * indexes in the output node set, even if a full CPU set is given in input. + * + * If the topology contains no NUMA nodes, the machine is considered + * as a single memory node, and the following behavior is used: + * If \p cpuset is empty, \p nodeset will be emptied as well. + * Otherwise \p nodeset will be entirely filled. + */ +static __hwloc_inline int +hwloc_cpuset_to_nodeset(hwloc_topology_t topology, hwloc_const_cpuset_t _cpuset, hwloc_nodeset_t nodeset) +{ + int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE); + hwloc_obj_t obj = NULL; + assert(depth != HWLOC_TYPE_DEPTH_UNKNOWN); + hwloc_bitmap_zero(nodeset); + while ((obj = hwloc_get_next_obj_covering_cpuset_by_depth(topology, _cpuset, depth, obj)) != NULL) + if (hwloc_bitmap_set(nodeset, obj->os_index) < 0) + return -1; + return 0; +} + +/** \brief Convert a NUMA node set into a CPU set and handle non-NUMA cases + * + * If the topology contains no NUMA nodes, the machine is considered + * as a single memory node, and the following behavior is used: + * If \p nodeset is empty, \p cpuset will be emptied as well. + * Otherwise \p cpuset will be entirely filled. + * This is useful for manipulating memory binding sets. + */ +static __hwloc_inline int +hwloc_cpuset_from_nodeset(hwloc_topology_t topology, hwloc_cpuset_t _cpuset, hwloc_const_nodeset_t nodeset) +{ + int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE); + hwloc_obj_t obj = NULL; + assert(depth != HWLOC_TYPE_DEPTH_UNKNOWN); + hwloc_bitmap_zero(_cpuset); + while ((obj = hwloc_get_next_obj_by_depth(topology, depth, obj)) != NULL) { + if (hwloc_bitmap_isset(nodeset, obj->os_index)) + /* no need to check obj->cpuset because objects in levels always have a cpuset */ + if (hwloc_bitmap_or(_cpuset, _cpuset, obj->cpuset) < 0) + return -1; + } + return 0; +} + +/** @} */ + + + +/** \defgroup hwlocality_advanced_io Finding I/O objects + * @{ + */ + +/** \brief Get the first non-I/O ancestor object. + * + * Given the I/O object \p ioobj, find the smallest non-I/O ancestor + * object. This object (normal or memory) may then be used for binding + * because it has non-NULL CPU and node sets + * and because its locality is the same as \p ioobj. + * + * \note The resulting object is usually a normal object but it could also + * be a memory object (e.g. NUMA node) in future platforms if I/O objects + * ever get attached to memory instead of CPUs. + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_non_io_ancestor_obj(hwloc_topology_t topology __hwloc_attribute_unused, + hwloc_obj_t ioobj) +{ + hwloc_obj_t obj = ioobj; + while (obj && !obj->cpuset) { + obj = obj->parent; + } + return obj; +} + +/** \brief Get the next PCI device in the system. + * + * \return the first PCI device if \p prev is \c NULL. + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_next_pcidev(hwloc_topology_t topology, hwloc_obj_t prev) +{ + return hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_PCI_DEVICE, prev); +} + +/** \brief Find the PCI device object matching the PCI bus id + * given domain, bus device and function PCI bus id. + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_pcidev_by_busid(hwloc_topology_t topology, + unsigned domain, unsigned bus, unsigned dev, unsigned func) +{ + hwloc_obj_t obj = NULL; + while ((obj = hwloc_get_next_pcidev(topology, obj)) != NULL) { + if (obj->attr->pcidev.domain == domain + && obj->attr->pcidev.bus == bus + && obj->attr->pcidev.dev == dev + && obj->attr->pcidev.func == func) + return obj; + } + return NULL; +} + +/** \brief Find the PCI device object matching the PCI bus id + * given as a string xxxx:yy:zz.t or yy:zz.t. + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_pcidev_by_busidstring(hwloc_topology_t topology, const char *busid) +{ + unsigned domain = 0; /* default */ + unsigned bus, dev, func; + + if (sscanf(busid, "%x:%x.%x", &bus, &dev, &func) != 3 + && sscanf(busid, "%x:%x:%x.%x", &domain, &bus, &dev, &func) != 4) { + errno = EINVAL; + return NULL; + } + + return hwloc_get_pcidev_by_busid(topology, domain, bus, dev, func); +} + +/** \brief Get the next OS device in the system. + * + * \return the first OS device if \p prev is \c NULL. + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_next_osdev(hwloc_topology_t topology, hwloc_obj_t prev) +{ + return hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_OS_DEVICE, prev); +} + +/** \brief Get the next bridge in the system. + * + * \return the first bridge if \p prev is \c NULL. + */ +static __hwloc_inline hwloc_obj_t +hwloc_get_next_bridge(hwloc_topology_t topology, hwloc_obj_t prev) +{ + return hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_BRIDGE, prev); +} + +/* \brief Checks whether a given bridge covers a given PCI bus. + */ +static __hwloc_inline int +hwloc_bridge_covers_pcibus(hwloc_obj_t bridge, + unsigned domain, unsigned bus) +{ + return bridge->type == HWLOC_OBJ_BRIDGE + && bridge->attr->bridge.downstream_type == HWLOC_OBJ_BRIDGE_PCI + && bridge->attr->bridge.downstream.pci.domain == domain + && bridge->attr->bridge.downstream.pci.secondary_bus <= bus + && bridge->attr->bridge.downstream.pci.subordinate_bus >= bus; +} + +/** @} */ + + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* HWLOC_HELPER_H */ diff --git a/src/3rdparty/hwloc/include/hwloc/inlines.h b/src/3rdparty/hwloc/include/hwloc/inlines.h new file mode 100644 index 00000000..494209ea --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/inlines.h @@ -0,0 +1,146 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2018 Inria. All rights reserved. + * Copyright © 2009-2012 Université Bordeaux + * Copyright © 2009-2010 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +/** + * This file contains the inline code of functions declared in hwloc.h + */ + +#ifndef HWLOC_INLINES_H +#define HWLOC_INLINES_H + +#ifndef HWLOC_H +#error Please include the main hwloc.h instead +#endif + +#include +#include + + +#ifdef __cplusplus +extern "C" { +#endif + +static __hwloc_inline int +hwloc_get_type_or_below_depth (hwloc_topology_t topology, hwloc_obj_type_t type) +{ + int depth = hwloc_get_type_depth(topology, type); + + if (depth != HWLOC_TYPE_DEPTH_UNKNOWN) + return depth; + + /* find the highest existing level with type order >= */ + for(depth = hwloc_get_type_depth(topology, HWLOC_OBJ_PU); ; depth--) + if (hwloc_compare_types(hwloc_get_depth_type(topology, depth), type) < 0) + return depth+1; + + /* Shouldn't ever happen, as there is always a Machine level with lower order and known depth. */ + /* abort(); */ +} + +static __hwloc_inline int +hwloc_get_type_or_above_depth (hwloc_topology_t topology, hwloc_obj_type_t type) +{ + int depth = hwloc_get_type_depth(topology, type); + + if (depth != HWLOC_TYPE_DEPTH_UNKNOWN) + return depth; + + /* find the lowest existing level with type order <= */ + for(depth = 0; ; depth++) + if (hwloc_compare_types(hwloc_get_depth_type(topology, depth), type) > 0) + return depth-1; + + /* Shouldn't ever happen, as there is always a PU level with higher order and known depth. */ + /* abort(); */ +} + +static __hwloc_inline int +hwloc_get_nbobjs_by_type (hwloc_topology_t topology, hwloc_obj_type_t type) +{ + int depth = hwloc_get_type_depth(topology, type); + if (depth == HWLOC_TYPE_DEPTH_UNKNOWN) + return 0; + if (depth == HWLOC_TYPE_DEPTH_MULTIPLE) + return -1; /* FIXME: agregate nbobjs from different levels? */ + return (int) hwloc_get_nbobjs_by_depth(topology, depth); +} + +static __hwloc_inline hwloc_obj_t +hwloc_get_obj_by_type (hwloc_topology_t topology, hwloc_obj_type_t type, unsigned idx) +{ + int depth = hwloc_get_type_depth(topology, type); + if (depth == HWLOC_TYPE_DEPTH_UNKNOWN) + return NULL; + if (depth == HWLOC_TYPE_DEPTH_MULTIPLE) + return NULL; + return hwloc_get_obj_by_depth(topology, depth, idx); +} + +static __hwloc_inline hwloc_obj_t +hwloc_get_next_obj_by_depth (hwloc_topology_t topology, int depth, hwloc_obj_t prev) +{ + if (!prev) + return hwloc_get_obj_by_depth (topology, depth, 0); + if (prev->depth != depth) + return NULL; + return prev->next_cousin; +} + +static __hwloc_inline hwloc_obj_t +hwloc_get_next_obj_by_type (hwloc_topology_t topology, hwloc_obj_type_t type, + hwloc_obj_t prev) +{ + int depth = hwloc_get_type_depth(topology, type); + if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE) + return NULL; + return hwloc_get_next_obj_by_depth (topology, depth, prev); +} + +static __hwloc_inline hwloc_obj_t +hwloc_get_root_obj (hwloc_topology_t topology) +{ + return hwloc_get_obj_by_depth (topology, 0, 0); +} + +static __hwloc_inline const char * +hwloc_obj_get_info_by_name(hwloc_obj_t obj, const char *name) +{ + unsigned i; + for(i=0; iinfos_count; i++) { + struct hwloc_info_s *info = &obj->infos[i]; + if (!strcmp(info->name, name)) + return info->value; + } + return NULL; +} + +static __hwloc_inline void * +hwloc_alloc_membind_policy(hwloc_topology_t topology, size_t len, hwloc_const_cpuset_t set, hwloc_membind_policy_t policy, int flags) +{ + void *p = hwloc_alloc_membind(topology, len, set, policy, flags); + if (p) + return p; + + if (hwloc_set_membind(topology, set, policy, flags) < 0) + /* hwloc_set_membind() takes care of ignoring errors if non-STRICT */ + return NULL; + + p = hwloc_alloc(topology, len); + if (p && policy != HWLOC_MEMBIND_FIRSTTOUCH) + /* Enforce the binding by touching the data */ + memset(p, 0, len); + return p; +} + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* HWLOC_INLINES_H */ diff --git a/src/3rdparty/hwloc/include/hwloc/intel-mic.h b/src/3rdparty/hwloc/include/hwloc/intel-mic.h new file mode 100644 index 00000000..6f6f9d1b --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/intel-mic.h @@ -0,0 +1,134 @@ +/* + * Copyright © 2013-2016 Inria. All rights reserved. + * See COPYING in top-level directory. + */ + +/** \file + * \brief Macros to help interaction between hwloc and Intel Xeon Phi (MIC). + * + * Applications that use both hwloc and Intel Xeon Phi (MIC) may want to + * include this file so as to get topology information for MIC devices. + */ + +#ifndef HWLOC_INTEL_MIC_H +#define HWLOC_INTEL_MIC_H + +#include +#include +#include +#ifdef HWLOC_LINUX_SYS +#include +#include +#include +#endif + +#include +#include + + +#ifdef __cplusplus +extern "C" { +#endif + + +/** \defgroup hwlocality_intel_mic Interoperability with Intel Xeon Phi (MIC) + * + * This interface offers ways to retrieve topology information about + * Intel Xeon Phi (MIC) devices. + * + * @{ + */ + +/** \brief Get the CPU set of logical processors that are physically + * close to MIC device whose index is \p idx. + * + * Return the CPU set describing the locality of the MIC device whose index is \p idx. + * + * Topology \p topology and device index \p idx must match the local machine. + * I/O devices detection is not needed in the topology. + * + * The function only returns the locality of the device. + * If more information about the device is needed, OS objects should + * be used instead, see hwloc_intel_mic_get_device_osdev_by_index(). + * + * This function is currently only implemented in a meaningful way for + * Linux; other systems will simply get a full cpuset. + */ +static __hwloc_inline int +hwloc_intel_mic_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused, + int idx __hwloc_attribute_unused, + hwloc_cpuset_t set) +{ +#ifdef HWLOC_LINUX_SYS + /* If we're on Linux, use the sysfs mechanism to get the local cpus */ +#define HWLOC_INTEL_MIC_DEVICE_SYSFS_PATH_MAX 128 + char path[HWLOC_INTEL_MIC_DEVICE_SYSFS_PATH_MAX]; + DIR *sysdir = NULL; + struct dirent *dirent; + unsigned pcibus, pcidev, pcifunc; + + if (!hwloc_topology_is_thissystem(topology)) { + errno = EINVAL; + return -1; + } + + sprintf(path, "/sys/class/mic/mic%d", idx); + sysdir = opendir(path); + if (!sysdir) + return -1; + + while ((dirent = readdir(sysdir)) != NULL) { + if (sscanf(dirent->d_name, "pci_%02x:%02x.%02x", &pcibus, &pcidev, &pcifunc) == 3) { + sprintf(path, "/sys/class/mic/mic%d/pci_%02x:%02x.%02x/local_cpus", idx, pcibus, pcidev, pcifunc); + if (hwloc_linux_read_path_as_cpumask(path, set) < 0 + || hwloc_bitmap_iszero(set)) + hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology)); + break; + } + } + + closedir(sysdir); +#else + /* Non-Linux systems simply get a full cpuset */ + hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology)); +#endif + return 0; +} + +/** \brief Get the hwloc OS device object corresponding to the + * MIC device for the given index. + * + * Return the OS device object describing the MIC device whose index is \p idx. + * Return NULL if there is none. + * + * The topology \p topology does not necessarily have to match the current + * machine. For instance the topology may be an XML import of a remote host. + * I/O devices detection must be enabled in the topology. + * + * \note The corresponding PCI device object can be obtained by looking + * at the OS device parent object. + */ +static __hwloc_inline hwloc_obj_t +hwloc_intel_mic_get_device_osdev_by_index(hwloc_topology_t topology, + unsigned idx) +{ + hwloc_obj_t osdev = NULL; + while ((osdev = hwloc_get_next_osdev(topology, osdev)) != NULL) { + if (HWLOC_OBJ_OSDEV_COPROC == osdev->attr->osdev.type + && osdev->name + && !strncmp("mic", osdev->name, 3) + && atoi(osdev->name + 3) == (int) idx) + return osdev; + } + return NULL; +} + +/** @} */ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* HWLOC_INTEL_MIC_H */ diff --git a/src/3rdparty/hwloc/include/hwloc/linux-libnuma.h b/src/3rdparty/hwloc/include/hwloc/linux-libnuma.h new file mode 100644 index 00000000..7cea4166 --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/linux-libnuma.h @@ -0,0 +1,273 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2017 Inria. All rights reserved. + * Copyright © 2009-2010, 2012 Université Bordeaux + * See COPYING in top-level directory. + */ + +/** \file + * \brief Macros to help interaction between hwloc and Linux libnuma. + * + * Applications that use both Linux libnuma and hwloc may want to + * include this file so as to ease conversion between their respective types. +*/ + +#ifndef HWLOC_LINUX_LIBNUMA_H +#define HWLOC_LINUX_LIBNUMA_H + +#include +#include + + +#ifdef __cplusplus +extern "C" { +#endif + + +/** \defgroup hwlocality_linux_libnuma_ulongs Interoperability with Linux libnuma unsigned long masks + * + * This interface helps converting between Linux libnuma unsigned long masks + * and hwloc cpusets and nodesets. + * + * \note Topology \p topology must match the current machine. + * + * \note The behavior of libnuma is undefined if the kernel is not NUMA-aware. + * (when CONFIG_NUMA is not set in the kernel configuration). + * This helper and libnuma may thus not be strictly compatible in this case, + * which may be detected by checking whether numa_available() returns -1. + * + * @{ + */ + + +/** \brief Convert hwloc CPU set \p cpuset into the array of unsigned long \p mask + * + * \p mask is the array of unsigned long that will be filled. + * \p maxnode contains the maximal node number that may be stored in \p mask. + * \p maxnode will be set to the maximal node number that was found, plus one. + * + * This function may be used before calling set_mempolicy, mbind, migrate_pages + * or any other function that takes an array of unsigned long and a maximal + * node number as input parameter. + */ +static __hwloc_inline int +hwloc_cpuset_to_linux_libnuma_ulongs(hwloc_topology_t topology, hwloc_const_cpuset_t cpuset, + unsigned long *mask, unsigned long *maxnode) +{ + int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE); + unsigned long outmaxnode = -1; + hwloc_obj_t node = NULL; + + /* round-up to the next ulong and clear all bytes */ + *maxnode = (*maxnode + 8*sizeof(*mask) - 1) & ~(8*sizeof(*mask) - 1); + memset(mask, 0, *maxnode/8); + + while ((node = hwloc_get_next_obj_covering_cpuset_by_depth(topology, cpuset, depth, node)) != NULL) { + if (node->os_index >= *maxnode) + continue; + mask[node->os_index/sizeof(*mask)/8] |= 1UL << (node->os_index % (sizeof(*mask)*8)); + if (outmaxnode == (unsigned long) -1 || outmaxnode < node->os_index) + outmaxnode = node->os_index; + } + + *maxnode = outmaxnode+1; + return 0; +} + +/** \brief Convert hwloc NUMA node set \p nodeset into the array of unsigned long \p mask + * + * \p mask is the array of unsigned long that will be filled. + * \p maxnode contains the maximal node number that may be stored in \p mask. + * \p maxnode will be set to the maximal node number that was found, plus one. + * + * This function may be used before calling set_mempolicy, mbind, migrate_pages + * or any other function that takes an array of unsigned long and a maximal + * node number as input parameter. + */ +static __hwloc_inline int +hwloc_nodeset_to_linux_libnuma_ulongs(hwloc_topology_t topology, hwloc_const_nodeset_t nodeset, + unsigned long *mask, unsigned long *maxnode) +{ + int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE); + unsigned long outmaxnode = -1; + hwloc_obj_t node = NULL; + + /* round-up to the next ulong and clear all bytes */ + *maxnode = (*maxnode + 8*sizeof(*mask) - 1) & ~(8*sizeof(*mask) - 1); + memset(mask, 0, *maxnode/8); + + while ((node = hwloc_get_next_obj_by_depth(topology, depth, node)) != NULL) { + if (node->os_index >= *maxnode) + continue; + if (!hwloc_bitmap_isset(nodeset, node->os_index)) + continue; + mask[node->os_index/sizeof(*mask)/8] |= 1UL << (node->os_index % (sizeof(*mask)*8)); + if (outmaxnode == (unsigned long) -1 || outmaxnode < node->os_index) + outmaxnode = node->os_index; + } + + *maxnode = outmaxnode+1; + return 0; +} + +/** \brief Convert the array of unsigned long \p mask into hwloc CPU set + * + * \p mask is a array of unsigned long that will be read. + * \p maxnode contains the maximal node number that may be read in \p mask. + * + * This function may be used after calling get_mempolicy or any other function + * that takes an array of unsigned long as output parameter (and possibly + * a maximal node number as input parameter). + */ +static __hwloc_inline int +hwloc_cpuset_from_linux_libnuma_ulongs(hwloc_topology_t topology, hwloc_cpuset_t cpuset, + const unsigned long *mask, unsigned long maxnode) +{ + int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE); + hwloc_obj_t node = NULL; + hwloc_bitmap_zero(cpuset); + while ((node = hwloc_get_next_obj_by_depth(topology, depth, node)) != NULL) + if (node->os_index < maxnode + && (mask[node->os_index/sizeof(*mask)/8] & (1UL << (node->os_index % (sizeof(*mask)*8))))) + hwloc_bitmap_or(cpuset, cpuset, node->cpuset); + return 0; +} + +/** \brief Convert the array of unsigned long \p mask into hwloc NUMA node set + * + * \p mask is a array of unsigned long that will be read. + * \p maxnode contains the maximal node number that may be read in \p mask. + * + * This function may be used after calling get_mempolicy or any other function + * that takes an array of unsigned long as output parameter (and possibly + * a maximal node number as input parameter). + */ +static __hwloc_inline int +hwloc_nodeset_from_linux_libnuma_ulongs(hwloc_topology_t topology, hwloc_nodeset_t nodeset, + const unsigned long *mask, unsigned long maxnode) +{ + int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE); + hwloc_obj_t node = NULL; + hwloc_bitmap_zero(nodeset); + while ((node = hwloc_get_next_obj_by_depth(topology, depth, node)) != NULL) + if (node->os_index < maxnode + && (mask[node->os_index/sizeof(*mask)/8] & (1UL << (node->os_index % (sizeof(*mask)*8))))) + hwloc_bitmap_set(nodeset, node->os_index); + return 0; +} + +/** @} */ + + + +/** \defgroup hwlocality_linux_libnuma_bitmask Interoperability with Linux libnuma bitmask + * + * This interface helps converting between Linux libnuma bitmasks + * and hwloc cpusets and nodesets. + * + * \note Topology \p topology must match the current machine. + * + * \note The behavior of libnuma is undefined if the kernel is not NUMA-aware. + * (when CONFIG_NUMA is not set in the kernel configuration). + * This helper and libnuma may thus not be strictly compatible in this case, + * which may be detected by checking whether numa_available() returns -1. + * + * @{ + */ + + +/** \brief Convert hwloc CPU set \p cpuset into the returned libnuma bitmask + * + * The returned bitmask should later be freed with numa_bitmask_free. + * + * This function may be used before calling many numa_ functions + * that use a struct bitmask as an input parameter. + * + * \return newly allocated struct bitmask. + */ +static __hwloc_inline struct bitmask * +hwloc_cpuset_to_linux_libnuma_bitmask(hwloc_topology_t topology, hwloc_const_cpuset_t cpuset) __hwloc_attribute_malloc; +static __hwloc_inline struct bitmask * +hwloc_cpuset_to_linux_libnuma_bitmask(hwloc_topology_t topology, hwloc_const_cpuset_t cpuset) +{ + int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE); + hwloc_obj_t node = NULL; + struct bitmask *bitmask = numa_allocate_cpumask(); + if (!bitmask) + return NULL; + while ((node = hwloc_get_next_obj_covering_cpuset_by_depth(topology, cpuset, depth, node)) != NULL) + if (node->attr->numanode.local_memory) + numa_bitmask_setbit(bitmask, node->os_index); + return bitmask; +} + +/** \brief Convert hwloc NUMA node set \p nodeset into the returned libnuma bitmask + * + * The returned bitmask should later be freed with numa_bitmask_free. + * + * This function may be used before calling many numa_ functions + * that use a struct bitmask as an input parameter. + * + * \return newly allocated struct bitmask. + */ +static __hwloc_inline struct bitmask * +hwloc_nodeset_to_linux_libnuma_bitmask(hwloc_topology_t topology, hwloc_const_nodeset_t nodeset) __hwloc_attribute_malloc; +static __hwloc_inline struct bitmask * +hwloc_nodeset_to_linux_libnuma_bitmask(hwloc_topology_t topology, hwloc_const_nodeset_t nodeset) +{ + int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE); + hwloc_obj_t node = NULL; + struct bitmask *bitmask = numa_allocate_cpumask(); + if (!bitmask) + return NULL; + while ((node = hwloc_get_next_obj_by_depth(topology, depth, node)) != NULL) + if (hwloc_bitmap_isset(nodeset, node->os_index) && node->attr->numanode.local_memory) + numa_bitmask_setbit(bitmask, node->os_index); + return bitmask; +} + +/** \brief Convert libnuma bitmask \p bitmask into hwloc CPU set \p cpuset + * + * This function may be used after calling many numa_ functions + * that use a struct bitmask as an output parameter. + */ +static __hwloc_inline int +hwloc_cpuset_from_linux_libnuma_bitmask(hwloc_topology_t topology, hwloc_cpuset_t cpuset, + const struct bitmask *bitmask) +{ + int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE); + hwloc_obj_t node = NULL; + hwloc_bitmap_zero(cpuset); + while ((node = hwloc_get_next_obj_by_depth(topology, depth, node)) != NULL) + if (numa_bitmask_isbitset(bitmask, node->os_index)) + hwloc_bitmap_or(cpuset, cpuset, node->cpuset); + return 0; +} + +/** \brief Convert libnuma bitmask \p bitmask into hwloc NUMA node set \p nodeset + * + * This function may be used after calling many numa_ functions + * that use a struct bitmask as an output parameter. + */ +static __hwloc_inline int +hwloc_nodeset_from_linux_libnuma_bitmask(hwloc_topology_t topology, hwloc_nodeset_t nodeset, + const struct bitmask *bitmask) +{ + int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE); + hwloc_obj_t node = NULL; + hwloc_bitmap_zero(nodeset); + while ((node = hwloc_get_next_obj_by_depth(topology, depth, node)) != NULL) + if (numa_bitmask_isbitset(bitmask, node->os_index)) + hwloc_bitmap_set(nodeset, node->os_index); + return 0; +} + +/** @} */ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* HWLOC_LINUX_NUMA_H */ diff --git a/src/3rdparty/hwloc/include/hwloc/linux.h b/src/3rdparty/hwloc/include/hwloc/linux.h new file mode 100644 index 00000000..c409e1c2 --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/linux.h @@ -0,0 +1,79 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2016 Inria. All rights reserved. + * Copyright © 2009-2011 Université Bordeaux + * See COPYING in top-level directory. + */ + +/** \file + * \brief Macros to help interaction between hwloc and Linux. + * + * Applications that use hwloc on Linux may want to include this file + * if using some low-level Linux features. + */ + +#ifndef HWLOC_LINUX_H +#define HWLOC_LINUX_H + +#include +#include + + +#ifdef __cplusplus +extern "C" { +#endif + + +/** \defgroup hwlocality_linux Linux-specific helpers + * + * This includes helpers for manipulating Linux kernel cpumap files, and hwloc + * equivalents of the Linux sched_setaffinity and sched_getaffinity system calls. + * + * @{ + */ + +/** \brief Bind a thread \p tid on cpus given in cpuset \p set + * + * The behavior is exactly the same as the Linux sched_setaffinity system call, + * but uses a hwloc cpuset. + * + * \note This is equivalent to calling hwloc_set_proc_cpubind() with + * HWLOC_CPUBIND_THREAD as flags. + */ +HWLOC_DECLSPEC int hwloc_linux_set_tid_cpubind(hwloc_topology_t topology, pid_t tid, hwloc_const_cpuset_t set); + +/** \brief Get the current binding of thread \p tid + * + * The behavior is exactly the same as the Linux sched_getaffinity system call, + * but uses a hwloc cpuset. + * + * \note This is equivalent to calling hwloc_get_proc_cpubind() with + * ::HWLOC_CPUBIND_THREAD as flags. + */ +HWLOC_DECLSPEC int hwloc_linux_get_tid_cpubind(hwloc_topology_t topology, pid_t tid, hwloc_cpuset_t set); + +/** \brief Get the last physical CPU where thread \p tid ran. + * + * \note This is equivalent to calling hwloc_get_proc_last_cpu_location() with + * ::HWLOC_CPUBIND_THREAD as flags. + */ +HWLOC_DECLSPEC int hwloc_linux_get_tid_last_cpu_location(hwloc_topology_t topology, pid_t tid, hwloc_bitmap_t set); + +/** \brief Convert a linux kernel cpumask file \p path into a hwloc bitmap \p set. + * + * Might be used when reading CPU set from sysfs attributes such as topology + * and caches for processors, or local_cpus for devices. + * + * \note This function ignores the HWLOC_FSROOT environment variable. + */ +HWLOC_DECLSPEC int hwloc_linux_read_path_as_cpumask(const char *path, hwloc_bitmap_t set); + +/** @} */ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* HWLOC_LINUX_H */ diff --git a/src/3rdparty/hwloc/include/hwloc/nvml.h b/src/3rdparty/hwloc/include/hwloc/nvml.h new file mode 100644 index 00000000..19710866 --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/nvml.h @@ -0,0 +1,181 @@ +/* + * Copyright © 2012-2016 Inria. All rights reserved. + * See COPYING in top-level directory. + */ + +/** \file + * \brief Macros to help interaction between hwloc and the NVIDIA Management Library. + * + * Applications that use both hwloc and the NVIDIA Management Library may want to + * include this file so as to get topology information for NVML devices. + */ + +#ifndef HWLOC_NVML_H +#define HWLOC_NVML_H + +#include +#include +#include +#ifdef HWLOC_LINUX_SYS +#include +#endif + +#include + + +#ifdef __cplusplus +extern "C" { +#endif + + +/** \defgroup hwlocality_nvml Interoperability with the NVIDIA Management Library + * + * This interface offers ways to retrieve topology information about + * devices managed by the NVIDIA Management Library (NVML). + * + * @{ + */ + +/** \brief Get the CPU set of logical processors that are physically + * close to NVML device \p device. + * + * Return the CPU set describing the locality of the NVML device \p device. + * + * Topology \p topology and device \p device must match the local machine. + * I/O devices detection and the NVML component are not needed in the topology. + * + * The function only returns the locality of the device. + * If more information about the device is needed, OS objects should + * be used instead, see hwloc_nvml_get_device_osdev() + * and hwloc_nvml_get_device_osdev_by_index(). + * + * This function is currently only implemented in a meaningful way for + * Linux; other systems will simply get a full cpuset. + */ +static __hwloc_inline int +hwloc_nvml_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused, + nvmlDevice_t device, hwloc_cpuset_t set) +{ +#ifdef HWLOC_LINUX_SYS + /* If we're on Linux, use the sysfs mechanism to get the local cpus */ +#define HWLOC_NVML_DEVICE_SYSFS_PATH_MAX 128 + char path[HWLOC_NVML_DEVICE_SYSFS_PATH_MAX]; + nvmlReturn_t nvres; + nvmlPciInfo_t pci; + + if (!hwloc_topology_is_thissystem(topology)) { + errno = EINVAL; + return -1; + } + + nvres = nvmlDeviceGetPciInfo(device, &pci); + if (NVML_SUCCESS != nvres) { + errno = EINVAL; + return -1; + } + + sprintf(path, "/sys/bus/pci/devices/%04x:%02x:%02x.0/local_cpus", pci.domain, pci.bus, pci.device); + if (hwloc_linux_read_path_as_cpumask(path, set) < 0 + || hwloc_bitmap_iszero(set)) + hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology)); +#else + /* Non-Linux systems simply get a full cpuset */ + hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology)); +#endif + return 0; +} + +/** \brief Get the hwloc OS device object corresponding to the + * NVML device whose index is \p idx. + * + * Return the OS device object describing the NVML device whose + * index is \p idx. Returns NULL if there is none. + * + * The topology \p topology does not necessarily have to match the current + * machine. For instance the topology may be an XML import of a remote host. + * I/O devices detection and the NVML component must be enabled in the topology. + * + * \note The corresponding PCI device object can be obtained by looking + * at the OS device parent object (unless PCI devices are filtered out). + */ +static __hwloc_inline hwloc_obj_t +hwloc_nvml_get_device_osdev_by_index(hwloc_topology_t topology, unsigned idx) +{ + hwloc_obj_t osdev = NULL; + while ((osdev = hwloc_get_next_osdev(topology, osdev)) != NULL) { + if (HWLOC_OBJ_OSDEV_GPU == osdev->attr->osdev.type + && osdev->name + && !strncmp("nvml", osdev->name, 4) + && atoi(osdev->name + 4) == (int) idx) + return osdev; + } + return NULL; +} + +/** \brief Get the hwloc OS device object corresponding to NVML device \p device. + * + * Return the hwloc OS device object that describes the given + * NVML device \p device. Return NULL if there is none. + * + * Topology \p topology and device \p device must match the local machine. + * I/O devices detection and the NVML component must be enabled in the topology. + * If not, the locality of the object may still be found using + * hwloc_nvml_get_device_cpuset(). + * + * \note The corresponding hwloc PCI device may be found by looking + * at the result parent pointer (unless PCI devices are filtered out). + */ +static __hwloc_inline hwloc_obj_t +hwloc_nvml_get_device_osdev(hwloc_topology_t topology, nvmlDevice_t device) +{ + hwloc_obj_t osdev; + nvmlReturn_t nvres; + nvmlPciInfo_t pci; + char uuid[64]; + + if (!hwloc_topology_is_thissystem(topology)) { + errno = EINVAL; + return NULL; + } + + nvres = nvmlDeviceGetPciInfo(device, &pci); + if (NVML_SUCCESS != nvres) + return NULL; + + nvres = nvmlDeviceGetUUID(device, uuid, sizeof(uuid)); + if (NVML_SUCCESS != nvres) + uuid[0] = '\0'; + + osdev = NULL; + while ((osdev = hwloc_get_next_osdev(topology, osdev)) != NULL) { + hwloc_obj_t pcidev = osdev->parent; + const char *info; + + if (strncmp(osdev->name, "nvml", 4)) + continue; + + if (pcidev + && pcidev->type == HWLOC_OBJ_PCI_DEVICE + && pcidev->attr->pcidev.domain == pci.domain + && pcidev->attr->pcidev.bus == pci.bus + && pcidev->attr->pcidev.dev == pci.device + && pcidev->attr->pcidev.func == 0) + return osdev; + + info = hwloc_obj_get_info_by_name(osdev, "NVIDIAUUID"); + if (info && !strcmp(info, uuid)) + return osdev; + } + + return NULL; +} + +/** @} */ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* HWLOC_NVML_H */ diff --git a/src/3rdparty/hwloc/include/hwloc/opencl.h b/src/3rdparty/hwloc/include/hwloc/opencl.h new file mode 100644 index 00000000..058968d7 --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/opencl.h @@ -0,0 +1,206 @@ +/* + * Copyright © 2012-2018 Inria. All rights reserved. + * Copyright © 2013, 2018 Université Bordeaux. All right reserved. + * See COPYING in top-level directory. + */ + +/** \file + * \brief Macros to help interaction between hwloc and the OpenCL interface. + * + * Applications that use both hwloc and OpenCL may want to + * include this file so as to get topology information for OpenCL devices. + */ + +#ifndef HWLOC_OPENCL_H +#define HWLOC_OPENCL_H + +#include +#include +#include +#ifdef HWLOC_LINUX_SYS +#include +#endif + +#ifdef __APPLE__ +#include +#include +#else +#include +#include +#endif + +#include + + +#ifdef __cplusplus +extern "C" { +#endif + + +/** \defgroup hwlocality_opencl Interoperability with OpenCL + * + * This interface offers ways to retrieve topology information about + * OpenCL devices. + * + * Only the AMD OpenCL interface currently offers useful locality information + * about its devices. + * + * @{ + */ + +/** \brief Get the CPU set of logical processors that are physically + * close to OpenCL device \p device. + * + * Return the CPU set describing the locality of the OpenCL device \p device. + * + * Topology \p topology and device \p device must match the local machine. + * I/O devices detection and the OpenCL component are not needed in the topology. + * + * The function only returns the locality of the device. + * If more information about the device is needed, OS objects should + * be used instead, see hwloc_opencl_get_device_osdev() + * and hwloc_opencl_get_device_osdev_by_index(). + * + * This function is currently only implemented in a meaningful way for + * Linux with the AMD OpenCL implementation; other systems will simply + * get a full cpuset. + */ +static __hwloc_inline int +hwloc_opencl_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused, + cl_device_id device __hwloc_attribute_unused, + hwloc_cpuset_t set) +{ +#if (defined HWLOC_LINUX_SYS) && (defined CL_DEVICE_TOPOLOGY_AMD) + /* If we're on Linux + AMD OpenCL, use the AMD extension + the sysfs mechanism to get the local cpus */ +#define HWLOC_OPENCL_DEVICE_SYSFS_PATH_MAX 128 + char path[HWLOC_OPENCL_DEVICE_SYSFS_PATH_MAX]; + cl_device_topology_amd amdtopo; + cl_int clret; + + if (!hwloc_topology_is_thissystem(topology)) { + errno = EINVAL; + return -1; + } + + clret = clGetDeviceInfo(device, CL_DEVICE_TOPOLOGY_AMD, sizeof(amdtopo), &amdtopo, NULL); + if (CL_SUCCESS != clret) { + hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology)); + return 0; + } + if (CL_DEVICE_TOPOLOGY_TYPE_PCIE_AMD != amdtopo.raw.type) { + hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology)); + return 0; + } + + sprintf(path, "/sys/bus/pci/devices/0000:%02x:%02x.%01x/local_cpus", + (unsigned) amdtopo.pcie.bus, (unsigned) amdtopo.pcie.device, (unsigned) amdtopo.pcie.function); + if (hwloc_linux_read_path_as_cpumask(path, set) < 0 + || hwloc_bitmap_iszero(set)) + hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology)); +#else + /* Non-Linux + AMD OpenCL systems simply get a full cpuset */ + hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology)); +#endif + return 0; +} + +/** \brief Get the hwloc OS device object corresponding to the + * OpenCL device for the given indexes. + * + * Return the OS device object describing the OpenCL device + * whose platform index is \p platform_index, + * and whose device index within this platform if \p device_index. + * Return NULL if there is none. + * + * The topology \p topology does not necessarily have to match the current + * machine. For instance the topology may be an XML import of a remote host. + * I/O devices detection and the OpenCL component must be enabled in the topology. + * + * \note The corresponding PCI device object can be obtained by looking + * at the OS device parent object (unless PCI devices are filtered out). + */ +static __hwloc_inline hwloc_obj_t +hwloc_opencl_get_device_osdev_by_index(hwloc_topology_t topology, + unsigned platform_index, unsigned device_index) +{ + unsigned x = (unsigned) -1, y = (unsigned) -1; + hwloc_obj_t osdev = NULL; + while ((osdev = hwloc_get_next_osdev(topology, osdev)) != NULL) { + if (HWLOC_OBJ_OSDEV_COPROC == osdev->attr->osdev.type + && osdev->name + && sscanf(osdev->name, "opencl%ud%u", &x, &y) == 2 + && platform_index == x && device_index == y) + return osdev; + } + return NULL; +} + +/** \brief Get the hwloc OS device object corresponding to OpenCL device \p deviceX. + * + * Use OpenCL device attributes to find the corresponding hwloc OS device object. + * Return NULL if there is none or if useful attributes are not available. + * + * This function currently only works on AMD OpenCL devices that support + * the CL_DEVICE_TOPOLOGY_AMD extension. hwloc_opencl_get_device_osdev_by_index() + * should be preferred whenever possible, i.e. when platform and device index + * are known. + * + * Topology \p topology and device \p device must match the local machine. + * I/O devices detection and the OpenCL component must be enabled in the topology. + * If not, the locality of the object may still be found using + * hwloc_opencl_get_device_cpuset(). + * + * \note This function cannot work if PCI devices are filtered out. + * + * \note The corresponding hwloc PCI device may be found by looking + * at the result parent pointer (unless PCI devices are filtered out). + */ +static __hwloc_inline hwloc_obj_t +hwloc_opencl_get_device_osdev(hwloc_topology_t topology __hwloc_attribute_unused, + cl_device_id device __hwloc_attribute_unused) +{ +#ifdef CL_DEVICE_TOPOLOGY_AMD + hwloc_obj_t osdev; + cl_device_topology_amd amdtopo; + cl_int clret; + + clret = clGetDeviceInfo(device, CL_DEVICE_TOPOLOGY_AMD, sizeof(amdtopo), &amdtopo, NULL); + if (CL_SUCCESS != clret) { + errno = EINVAL; + return NULL; + } + if (CL_DEVICE_TOPOLOGY_TYPE_PCIE_AMD != amdtopo.raw.type) { + errno = EINVAL; + return NULL; + } + + osdev = NULL; + while ((osdev = hwloc_get_next_osdev(topology, osdev)) != NULL) { + hwloc_obj_t pcidev = osdev->parent; + if (strncmp(osdev->name, "opencl", 6)) + continue; + if (pcidev + && pcidev->type == HWLOC_OBJ_PCI_DEVICE + && pcidev->attr->pcidev.domain == 0 + && pcidev->attr->pcidev.bus == amdtopo.pcie.bus + && pcidev->attr->pcidev.dev == amdtopo.pcie.device + && pcidev->attr->pcidev.func == amdtopo.pcie.function) + return osdev; + /* if PCI are filtered out, we need a info attr to match on */ + } + + return NULL; +#else + return NULL; +#endif +} + +/** @} */ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* HWLOC_OPENCL_H */ diff --git a/src/3rdparty/hwloc/include/hwloc/openfabrics-verbs.h b/src/3rdparty/hwloc/include/hwloc/openfabrics-verbs.h new file mode 100644 index 00000000..174ab4a5 --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/openfabrics-verbs.h @@ -0,0 +1,150 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2016 Inria. All rights reserved. + * Copyright © 2009-2010 Université Bordeaux + * Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +/** \file + * \brief Macros to help interaction between hwloc and OpenFabrics + * verbs. + * + * Applications that use both hwloc and OpenFabrics verbs may want to + * include this file so as to get topology information for OpenFabrics + * hardware (InfiniBand, etc). + * + */ + +#ifndef HWLOC_OPENFABRICS_VERBS_H +#define HWLOC_OPENFABRICS_VERBS_H + +#include +#include +#ifdef HWLOC_LINUX_SYS +#include +#endif + +#include + + +#ifdef __cplusplus +extern "C" { +#endif + + +/** \defgroup hwlocality_openfabrics Interoperability with OpenFabrics + * + * This interface offers ways to retrieve topology information about + * OpenFabrics devices (InfiniBand, Omni-Path, usNIC, etc). + * + * @{ + */ + +/** \brief Get the CPU set of logical processors that are physically + * close to device \p ibdev. + * + * Return the CPU set describing the locality of the OpenFabrics + * device \p ibdev (InfiniBand, etc). + * + * Topology \p topology and device \p ibdev must match the local machine. + * I/O devices detection is not needed in the topology. + * + * The function only returns the locality of the device. + * If more information about the device is needed, OS objects should + * be used instead, see hwloc_ibv_get_device_osdev() + * and hwloc_ibv_get_device_osdev_by_name(). + * + * This function is currently only implemented in a meaningful way for + * Linux; other systems will simply get a full cpuset. + */ +static __hwloc_inline int +hwloc_ibv_get_device_cpuset(hwloc_topology_t topology __hwloc_attribute_unused, + struct ibv_device *ibdev, hwloc_cpuset_t set) +{ +#ifdef HWLOC_LINUX_SYS + /* If we're on Linux, use the verbs-provided sysfs mechanism to + get the local cpus */ +#define HWLOC_OPENFABRICS_VERBS_SYSFS_PATH_MAX 128 + char path[HWLOC_OPENFABRICS_VERBS_SYSFS_PATH_MAX]; + + if (!hwloc_topology_is_thissystem(topology)) { + errno = EINVAL; + return -1; + } + + sprintf(path, "/sys/class/infiniband/%s/device/local_cpus", + ibv_get_device_name(ibdev)); + if (hwloc_linux_read_path_as_cpumask(path, set) < 0 + || hwloc_bitmap_iszero(set)) + hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology)); +#else + /* Non-Linux systems simply get a full cpuset */ + hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology)); +#endif + return 0; +} + +/** \brief Get the hwloc OS device object corresponding to the OpenFabrics + * device named \p ibname. + * + * Return the OS device object describing the OpenFabrics device + * (InfiniBand, Omni-Path, usNIC, etc) whose name is \p ibname + * (mlx5_0, hfi1_0, usnic_0, qib0, etc). + * Returns NULL if there is none. + * The name \p ibname is usually obtained from ibv_get_device_name(). + * + * The topology \p topology does not necessarily have to match the current + * machine. For instance the topology may be an XML import of a remote host. + * I/O devices detection must be enabled in the topology. + * + * \note The corresponding PCI device object can be obtained by looking + * at the OS device parent object. + */ +static __hwloc_inline hwloc_obj_t +hwloc_ibv_get_device_osdev_by_name(hwloc_topology_t topology, + const char *ibname) +{ + hwloc_obj_t osdev = NULL; + while ((osdev = hwloc_get_next_osdev(topology, osdev)) != NULL) { + if (HWLOC_OBJ_OSDEV_OPENFABRICS == osdev->attr->osdev.type + && osdev->name && !strcmp(ibname, osdev->name)) + return osdev; + } + return NULL; +} + +/** \brief Get the hwloc OS device object corresponding to the OpenFabrics + * device \p ibdev. + * + * Return the OS device object describing the OpenFabrics device \p ibdev + * (InfiniBand, etc). Returns NULL if there is none. + * + * Topology \p topology and device \p ibdev must match the local machine. + * I/O devices detection must be enabled in the topology. + * If not, the locality of the object may still be found using + * hwloc_ibv_get_device_cpuset(). + * + * \note The corresponding PCI device object can be obtained by looking + * at the OS device parent object. + */ +static __hwloc_inline hwloc_obj_t +hwloc_ibv_get_device_osdev(hwloc_topology_t topology, + struct ibv_device *ibdev) +{ + if (!hwloc_topology_is_thissystem(topology)) { + errno = EINVAL; + return NULL; + } + return hwloc_ibv_get_device_osdev_by_name(topology, ibv_get_device_name(ibdev)); +} + +/** @} */ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* HWLOC_OPENFABRICS_VERBS_H */ diff --git a/src/3rdparty/hwloc/include/hwloc/plugins.h b/src/3rdparty/hwloc/include/hwloc/plugins.h new file mode 100644 index 00000000..cb22000d --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/plugins.h @@ -0,0 +1,542 @@ +/* + * Copyright © 2013-2017 Inria. All rights reserved. + * Copyright © 2016 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +#ifndef HWLOC_PLUGINS_H +#define HWLOC_PLUGINS_H + +/** \file + * \brief Public interface for building hwloc plugins. + */ + +struct hwloc_backend; + +#include +#ifdef HWLOC_INSIDE_PLUGIN +/* needed for hwloc_plugin_check_namespace() */ +#include +#endif + + + +/** \defgroup hwlocality_disc_components Components and Plugins: Discovery components + * @{ + */ + +/** \brief Discovery component type */ +typedef enum hwloc_disc_component_type_e { + /** \brief CPU-only discovery through the OS, or generic no-OS support. + * \hideinitializer */ + HWLOC_DISC_COMPONENT_TYPE_CPU = (1<<0), + + /** \brief xml or synthetic, + * platform-specific components such as bgq. + * Anything the discovers CPU and everything else. + * No misc backend is expected to complement a global component. + * \hideinitializer */ + HWLOC_DISC_COMPONENT_TYPE_GLOBAL = (1<<1), + + /** \brief OpenCL, Cuda, etc. + * \hideinitializer */ + HWLOC_DISC_COMPONENT_TYPE_MISC = (1<<2) +} hwloc_disc_component_type_t; + +/** \brief Discovery component structure + * + * This is the major kind of components, taking care of the discovery. + * They are registered by generic components, either statically-built or as plugins. + */ +struct hwloc_disc_component { + /** \brief Discovery component type */ + hwloc_disc_component_type_t type; + + /** \brief Name. + * If this component is built as a plugin, this name does not have to match the plugin filename. + */ + const char *name; + + /** \brief Component types to exclude, as an OR'ed set of ::hwloc_disc_component_type_e. + * + * For a GLOBAL component, this usually includes all other types (~0). + * + * Other components only exclude types that may bring conflicting + * topology information. MISC components should likely not be excluded + * since they usually bring non-primary additional information. + */ + unsigned excludes; + + /** \brief Instantiate callback to create a backend from the component. + * Parameters data1, data2, data3 are NULL except for components + * that have special enabling routines such as hwloc_topology_set_xml(). */ + struct hwloc_backend * (*instantiate)(struct hwloc_disc_component *component, const void *data1, const void *data2, const void *data3); + + /** \brief Component priority. + * Used to sort topology->components, higher priority first. + * Also used to decide between two components with the same name. + * + * Usual values are + * 50 for native OS (or platform) components, + * 45 for x86, + * 40 for no-OS fallback, + * 30 for global components (xml, synthetic), + * 20 for pci, + * 10 for other misc components (opencl etc.). + */ + unsigned priority; + + /** \brief Enabled by default. + * If unset, if will be disabled unless explicitly requested. + */ + unsigned enabled_by_default; + + /** \private Used internally to list components by priority on topology->components + * (the component structure is usually read-only, + * the core copies it before using this field for queueing) + */ + struct hwloc_disc_component * next; +}; + +/** @} */ + + + + +/** \defgroup hwlocality_disc_backends Components and Plugins: Discovery backends + * @{ + */ + +/** \brief Discovery backend structure + * + * A backend is the instantiation of a discovery component. + * When a component gets enabled for a topology, + * its instantiate() callback creates a backend. + * + * hwloc_backend_alloc() initializes all fields to default values + * that the component may change (except "component" and "next") + * before enabling the backend with hwloc_backend_enable(). + */ +struct hwloc_backend { + /** \private Reserved for the core, set by hwloc_backend_alloc() */ + struct hwloc_disc_component * component; + /** \private Reserved for the core, set by hwloc_backend_enable() */ + struct hwloc_topology * topology; + /** \private Reserved for the core. Set to 1 if forced through envvar, 0 otherwise. */ + int envvar_forced; + /** \private Reserved for the core. Used internally to list backends topology->backends. */ + struct hwloc_backend * next; + + /** \brief Backend flags, currently always 0. */ + unsigned long flags; + + /** \brief Backend-specific 'is_thissystem' property. + * Set to 0 or 1 if the backend should enforce the thissystem flag when it gets enabled. + * Set to -1 if the backend doesn't care (default). */ + int is_thissystem; + + /** \brief Backend private data, or NULL if none. */ + void * private_data; + /** \brief Callback for freeing the private_data. + * May be NULL. + */ + void (*disable)(struct hwloc_backend *backend); + + /** \brief Main discovery callback. + * returns -1 on error, either because it couldn't add its objects ot the existing topology, + * or because of an actual discovery/gathering failure. + * May be NULL. + */ + int (*discover)(struct hwloc_backend *backend); + + /** \brief Callback used by the PCI backend to retrieve the locality of a PCI object from the OS/cpu backend. + * May be NULL. */ + int (*get_pci_busid_cpuset)(struct hwloc_backend *backend, struct hwloc_pcidev_attr_s *busid, hwloc_bitmap_t cpuset); +}; + +/** \brief Allocate a backend structure, set good default values, initialize backend->component and topology, etc. + * The caller will then modify whatever needed, and call hwloc_backend_enable(). + */ +HWLOC_DECLSPEC struct hwloc_backend * hwloc_backend_alloc(struct hwloc_disc_component *component); + +/** \brief Enable a previously allocated and setup backend. */ +HWLOC_DECLSPEC int hwloc_backend_enable(struct hwloc_topology *topology, struct hwloc_backend *backend); + +/** @} */ + + + + +/** \defgroup hwlocality_generic_components Components and Plugins: Generic components + * @{ + */ + +/** \brief Generic component type */ +typedef enum hwloc_component_type_e { + /** \brief The data field must point to a struct hwloc_disc_component. */ + HWLOC_COMPONENT_TYPE_DISC, + + /** \brief The data field must point to a struct hwloc_xml_component. */ + HWLOC_COMPONENT_TYPE_XML +} hwloc_component_type_t; + +/** \brief Generic component structure + * + * Generic components structure, either statically listed by configure in static-components.h + * or dynamically loaded as a plugin. + */ +struct hwloc_component { + /** \brief Component ABI version, set to ::HWLOC_COMPONENT_ABI */ + unsigned abi; + + /** \brief Process-wide component initialization callback. + * + * This optional callback is called when the component is registered + * to the hwloc core (after loading the plugin). + * + * When the component is built as a plugin, this callback + * should call hwloc_check_plugin_namespace() + * and return an negative error code on error. + * + * \p flags is always 0 for now. + * + * \return 0 on success, or a negative code on error. + * + * \note If the component uses ltdl for loading its own plugins, + * it should load/unload them only in init() and finalize(), + * to avoid race conditions with hwloc's use of ltdl. + */ + int (*init)(unsigned long flags); + + /** \brief Process-wide component termination callback. + * + * This optional callback is called after unregistering the component + * from the hwloc core (before unloading the plugin). + * + * \p flags is always 0 for now. + * + * \note If the component uses ltdl for loading its own plugins, + * it should load/unload them only in init() and finalize(), + * to avoid race conditions with hwloc's use of ltdl. + */ + void (*finalize)(unsigned long flags); + + /** \brief Component type */ + hwloc_component_type_t type; + + /** \brief Component flags, unused for now */ + unsigned long flags; + + /** \brief Component data, pointing to a struct hwloc_disc_component or struct hwloc_xml_component. */ + void * data; +}; + +/** @} */ + + + + +/** \defgroup hwlocality_components_core_funcs Components and Plugins: Core functions to be used by components + * @{ + */ + +/** \brief Add an object to the topology. + * + * It is sorted along the tree of other objects according to the inclusion of + * cpusets, to eventually be added as a child of the smallest object including + * this object. + * + * If the cpuset is empty, the type of the object (and maybe some attributes) + * must be enough to find where to insert the object. This is especially true + * for NUMA nodes with memory and no CPUs. + * + * The given object should not have children. + * + * This shall only be called before levels are built. + * + * In case of error, hwloc_report_os_error() is called. + * + * The caller should check whether the object type is filtered-out before calling this function. + * + * The topology cpuset/nodesets will be enlarged to include the object sets. + * + * Returns the object on success. + * Returns NULL and frees obj on error. + * Returns another object and frees obj if it was merged with an identical pre-existing object. + */ +HWLOC_DECLSPEC struct hwloc_obj *hwloc_insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t obj); + +/** \brief Type of error callbacks during object insertion */ +typedef void (*hwloc_report_error_t)(const char * msg, int line); +/** \brief Report an insertion error from a backend */ +HWLOC_DECLSPEC void hwloc_report_os_error(const char * msg, int line); +/** \brief Check whether insertion errors are hidden */ +HWLOC_DECLSPEC int hwloc_hide_errors(void); + +/** \brief Add an object to the topology and specify which error callback to use. + * + * This function is similar to hwloc_insert_object_by_cpuset() but it allows specifying + * where to start insertion from (if \p root is NULL, the topology root object is used), + * and specifying the error callback. + */ +HWLOC_DECLSPEC struct hwloc_obj *hwloc__insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t root, hwloc_obj_t obj, hwloc_report_error_t report_error); + +/** \brief Insert an object somewhere in the topology. + * + * It is added as the last child of the given parent. + * The cpuset is completely ignored, so strange objects such as I/O devices should + * preferably be inserted with this. + * + * When used for "normal" children with cpusets (when importing from XML + * when duplicating a topology), the caller should make sure that: + * - children are inserted in order, + * - children cpusets do not intersect. + * + * The given object may have normal, I/O or Misc children, as long as they are in order as well. + * These children must have valid parent and next_sibling pointers. + * + * The caller should check whether the object type is filtered-out before calling this function. + */ +HWLOC_DECLSPEC void hwloc_insert_object_by_parent(struct hwloc_topology *topology, hwloc_obj_t parent, hwloc_obj_t obj); + +/** \brief Allocate and initialize an object of the given type and physical index. + * + * If \p os_index is unknown or irrelevant, use \c HWLOC_UNKNOWN_INDEX. + */ +HWLOC_DECLSPEC hwloc_obj_t hwloc_alloc_setup_object(hwloc_topology_t topology, hwloc_obj_type_t type, unsigned os_index); + +/** \brief Setup object cpusets/nodesets by OR'ing its children. + * + * Used when adding an object late in the topology. + * Will update the new object by OR'ing all its new children sets. + * + * Used when PCI backend adds a hostbridge parent, when distances + * add a new Group, etc. + */ +HWLOC_DECLSPEC int hwloc_obj_add_children_sets(hwloc_obj_t obj); + +/** \brief Request a reconnection of children and levels in the topology. + * + * May be used by backends during discovery if they need arrays or lists + * of object within levels or children to be fully connected. + * + * \p flags is currently unused, must 0. + */ +HWLOC_DECLSPEC int hwloc_topology_reconnect(hwloc_topology_t topology, unsigned long flags __hwloc_attribute_unused); + +/** \brief Make sure that plugins can lookup core symbols. + * + * This is a sanity check to avoid lazy-lookup failures when libhwloc + * is loaded within a plugin, and later tries to load its own plugins. + * This may fail (and abort the program) if libhwloc symbols are in a + * private namespace. + * + * \return 0 on success. + * \return -1 if the plugin cannot be successfully loaded. The caller + * plugin init() callback should return a negative error code as well. + * + * Plugins should call this function in their init() callback to avoid + * later crashes if lazy symbol resolution is used by the upper layer that + * loaded hwloc (e.g. OpenCL implementations using dlopen with RTLD_LAZY). + * + * \note The build system must define HWLOC_INSIDE_PLUGIN if and only if + * building the caller as a plugin. + * + * \note This function should remain inline so plugins can call it even + * when they cannot find libhwloc symbols. + */ +static __hwloc_inline int +hwloc_plugin_check_namespace(const char *pluginname __hwloc_attribute_unused, const char *symbol __hwloc_attribute_unused) +{ +#ifdef HWLOC_INSIDE_PLUGIN + lt_dlhandle handle; + void *sym; + handle = lt_dlopen(NULL); + if (!handle) + /* cannot check, assume things will work */ + return 0; + sym = lt_dlsym(handle, symbol); + lt_dlclose(handle); + if (!sym) { + static int verboseenv_checked = 0; + static int verboseenv_value = 0; + if (!verboseenv_checked) { + const char *verboseenv = getenv("HWLOC_PLUGINS_VERBOSE"); + verboseenv_value = verboseenv ? atoi(verboseenv) : 0; + verboseenv_checked = 1; + } + if (verboseenv_value) + fprintf(stderr, "Plugin `%s' disabling itself because it cannot find the `%s' core symbol.\n", + pluginname, symbol); + return -1; + } +#endif /* HWLOC_INSIDE_PLUGIN */ + return 0; +} + +/** @} */ + + + + +/** \defgroup hwlocality_components_filtering Components and Plugins: Filtering objects + * @{ + */ + +/** \brief Check whether the given PCI device classid is important. + * + * \return 1 if important, 0 otherwise. + */ +static __hwloc_inline int +hwloc_filter_check_pcidev_subtype_important(unsigned classid) +{ + unsigned baseclass = classid >> 8; + return (baseclass == 0x03 /* PCI_BASE_CLASS_DISPLAY */ + || baseclass == 0x02 /* PCI_BASE_CLASS_NETWORK */ + || baseclass == 0x01 /* PCI_BASE_CLASS_STORAGE */ + || baseclass == 0x0b /* PCI_BASE_CLASS_PROCESSOR */ + || classid == 0x0c04 /* PCI_CLASS_SERIAL_FIBER */ + || classid == 0x0c06 /* PCI_CLASS_SERIAL_INFINIBAND */ + || baseclass == 0x12 /* Processing Accelerators */); +} + +/** \brief Check whether the given OS device subtype is important. + * + * \return 1 if important, 0 otherwise. + */ +static __hwloc_inline int +hwloc_filter_check_osdev_subtype_important(hwloc_obj_osdev_type_t subtype) +{ + return (subtype != HWLOC_OBJ_OSDEV_DMA); +} + +/** \brief Check whether a non-I/O object type should be filtered-out. + * + * Cannot be used for I/O objects. + * + * \return 1 if the object type should be kept, 0 otherwise. + */ +static __hwloc_inline int +hwloc_filter_check_keep_object_type(hwloc_topology_t topology, hwloc_obj_type_t type) +{ + enum hwloc_type_filter_e filter = HWLOC_TYPE_FILTER_KEEP_NONE; + hwloc_topology_get_type_filter(topology, type, &filter); + assert(filter != HWLOC_TYPE_FILTER_KEEP_IMPORTANT); /* IMPORTANT only used for I/O */ + return filter == HWLOC_TYPE_FILTER_KEEP_NONE ? 0 : 1; +} + +/** \brief Check whether the given object should be filtered-out. + * + * \return 1 if the object type should be kept, 0 otherwise. + */ +static __hwloc_inline int +hwloc_filter_check_keep_object(hwloc_topology_t topology, hwloc_obj_t obj) +{ + hwloc_obj_type_t type = obj->type; + enum hwloc_type_filter_e filter = HWLOC_TYPE_FILTER_KEEP_NONE; + hwloc_topology_get_type_filter(topology, type, &filter); + if (filter == HWLOC_TYPE_FILTER_KEEP_NONE) + return 0; + if (filter == HWLOC_TYPE_FILTER_KEEP_IMPORTANT) { + if (type == HWLOC_OBJ_PCI_DEVICE) + return hwloc_filter_check_pcidev_subtype_important(obj->attr->pcidev.class_id); + if (type == HWLOC_OBJ_OS_DEVICE) + return hwloc_filter_check_osdev_subtype_important(obj->attr->osdev.type); + } + return 1; +} + +/** @} */ + + + + +/** \defgroup hwlocality_components_pcidisc Components and Plugins: helpers for PCI discovery + * @{ + */ + +/** \brief Return the offset of the given capability in the PCI config space buffer + * + * This function requires a 256-bytes config space. Unknown/unavailable bytes should be set to 0xff. + */ +HWLOC_DECLSPEC unsigned hwloc_pcidisc_find_cap(const unsigned char *config, unsigned cap); + +/** \brief Fill linkspeed by reading the PCI config space where PCI_CAP_ID_EXP is at position offset. + * + * Needs 20 bytes of EXP capability block starting at offset in the config space + * for registers up to link status. + */ +HWLOC_DECLSPEC int hwloc_pcidisc_find_linkspeed(const unsigned char *config, unsigned offset, float *linkspeed); + +/** \brief Return the hwloc object type (PCI device or Bridge) for the given class and configuration space. + * + * This function requires 16 bytes of common configuration header at the beginning of config. + */ +HWLOC_DECLSPEC hwloc_obj_type_t hwloc_pcidisc_check_bridge_type(unsigned device_class, const unsigned char *config); + +/** \brief Fills the attributes of the given PCI bridge using the given PCI config space. + * + * This function requires 32 bytes of common configuration header at the beginning of config. + * + * Returns -1 and destroys /p obj if bridge fields are invalid. + */ +HWLOC_DECLSPEC int hwloc_pcidisc_setup_bridge_attr(hwloc_obj_t obj, const unsigned char *config); + +/** \brief Insert a PCI object in the given PCI tree by looking at PCI bus IDs. + * + * If \p treep points to \c NULL, the new object is inserted there. + */ +HWLOC_DECLSPEC void hwloc_pcidisc_tree_insert_by_busid(struct hwloc_obj **treep, struct hwloc_obj *obj); + +/** \brief Add some hostbridges on top of the given tree of PCI objects and attach them to the topology. + * + * For now, they will be attached to the root object. The core will move them to their actual PCI + * locality using hwloc_pci_belowroot_apply_locality() at the end of the discovery. + * + * In the meantime, other backends lookup PCI objects or localities (for instance to attach OS devices) + * by using hwloc_pcidisc_find_by_busid() or hwloc_pcidisc_find_busid_parent(). + */ +HWLOC_DECLSPEC int hwloc_pcidisc_tree_attach(struct hwloc_topology *topology, struct hwloc_obj *tree); + +/** @} */ + + + + +/** \defgroup hwlocality_components_pcifind Components and Plugins: finding PCI objects during other discoveries + * @{ + */ + +/** \brief Find the PCI object that matches the bus ID. + * + * To be used after a PCI backend added PCI devices with hwloc_pcidisc_tree_attach() + * and before the core moves them to their actual location with hwloc_pci_belowroot_apply_locality(). + * + * If no exactly matching object is found, return the container bridge if any, or NULL. + * + * On failure, it may be possible to find the PCI locality (instead of the PCI device) + * by calling hwloc_pcidisc_find_busid_parent(). + * + * \note This is semantically identical to hwloc_get_pcidev_by_busid() which only works + * after the topology is fully loaded. + */ +HWLOC_DECLSPEC struct hwloc_obj * hwloc_pcidisc_find_by_busid(struct hwloc_topology *topology, unsigned domain, unsigned bus, unsigned dev, unsigned func); + +/** \brief Find the normal parent of a PCI bus ID. + * + * Look at PCI affinity to find out where the given PCI bus ID should be attached. + * + * This function should be used to attach an I/O device directly under a normal + * (non-I/O) object, instead of below a PCI object. + * It is usually used by backends when hwloc_pcidisc_find_by_busid() failed + * to find the hwloc object corresponding to this bus ID, for instance because + * PCI discovery is not supported on this platform. + */ +HWLOC_DECLSPEC struct hwloc_obj * hwloc_pcidisc_find_busid_parent(struct hwloc_topology *topology, unsigned domain, unsigned bus, unsigned dev, unsigned func); + +/** @} */ + + + + +#endif /* HWLOC_PLUGINS_H */ diff --git a/src/3rdparty/hwloc/include/hwloc/rename.h b/src/3rdparty/hwloc/include/hwloc/rename.h new file mode 100644 index 00000000..7cef1b2e --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/rename.h @@ -0,0 +1,765 @@ +/* + * Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved. + * Copyright © 2010-2018 Inria. All rights reserved. + * See COPYING in top-level directory. + */ + +#ifndef HWLOC_RENAME_H +#define HWLOC_RENAME_H + +#include + + +#ifdef __cplusplus +extern "C" { +#endif + + +/* Only enact these defines if we're actually renaming the symbols + (i.e., avoid trying to have no-op defines if we're *not* + renaming). */ + +#if HWLOC_SYM_TRANSFORM + +/* Use a preprocessor two-step in order to get the prefixing right. + Make 2 macros: HWLOC_NAME and HWLOC_NAME_CAPS for renaming + things. */ + +#define HWLOC_MUNGE_NAME(a, b) HWLOC_MUNGE_NAME2(a, b) +#define HWLOC_MUNGE_NAME2(a, b) a ## b +#define HWLOC_NAME(name) HWLOC_MUNGE_NAME(HWLOC_SYM_PREFIX, hwloc_ ## name) +#define HWLOC_NAME_CAPS(name) HWLOC_MUNGE_NAME(HWLOC_SYM_PREFIX_CAPS, hwloc_ ## name) + +/* Now define all the "real" names to be the prefixed names. This + allows us to use the real names throughout the code base (i.e., + "hwloc_"); the preprocessor will adjust to have the prefixed + name under the covers. */ + +/* Names from hwloc.h */ + +#define hwloc_get_api_version HWLOC_NAME(get_api_version) + +#define hwloc_topology HWLOC_NAME(topology) +#define hwloc_topology_t HWLOC_NAME(topology_t) + +#define hwloc_cpuset_t HWLOC_NAME(cpuset_t) +#define hwloc_const_cpuset_t HWLOC_NAME(const_cpuset_t) +#define hwloc_nodeset_t HWLOC_NAME(nodeset_t) +#define hwloc_const_nodeset_t HWLOC_NAME(const_nodeset_t) + +#define HWLOC_OBJ_MACHINE HWLOC_NAME_CAPS(OBJ_MACHINE) +#define HWLOC_OBJ_NUMANODE HWLOC_NAME_CAPS(OBJ_NUMANODE) +#define HWLOC_OBJ_PACKAGE HWLOC_NAME_CAPS(OBJ_PACKAGE) +#define HWLOC_OBJ_CORE HWLOC_NAME_CAPS(OBJ_CORE) +#define HWLOC_OBJ_PU HWLOC_NAME_CAPS(OBJ_PU) +#define HWLOC_OBJ_L1CACHE HWLOC_NAME_CAPS(OBJ_L1CACHE) +#define HWLOC_OBJ_L2CACHE HWLOC_NAME_CAPS(OBJ_L2CACHE) +#define HWLOC_OBJ_L3CACHE HWLOC_NAME_CAPS(OBJ_L3CACHE) +#define HWLOC_OBJ_L4CACHE HWLOC_NAME_CAPS(OBJ_L4CACHE) +#define HWLOC_OBJ_L5CACHE HWLOC_NAME_CAPS(OBJ_L5CACHE) +#define HWLOC_OBJ_L1ICACHE HWLOC_NAME_CAPS(OBJ_L1ICACHE) +#define HWLOC_OBJ_L2ICACHE HWLOC_NAME_CAPS(OBJ_L2ICACHE) +#define HWLOC_OBJ_L3ICACHE HWLOC_NAME_CAPS(OBJ_L3ICACHE) +#define HWLOC_OBJ_MISC HWLOC_NAME_CAPS(OBJ_MISC) +#define HWLOC_OBJ_GROUP HWLOC_NAME_CAPS(OBJ_GROUP) +#define HWLOC_OBJ_BRIDGE HWLOC_NAME_CAPS(OBJ_BRIDGE) +#define HWLOC_OBJ_PCI_DEVICE HWLOC_NAME_CAPS(OBJ_PCI_DEVICE) +#define HWLOC_OBJ_OS_DEVICE HWLOC_NAME_CAPS(OBJ_OS_DEVICE) +#define HWLOC_OBJ_TYPE_MAX HWLOC_NAME_CAPS(OBJ_TYPE_MAX) +#define hwloc_obj_type_t HWLOC_NAME(obj_type_t) + +#define hwloc_obj_cache_type_e HWLOC_NAME(obj_cache_type_e) +#define hwloc_obj_cache_type_t HWLOC_NAME(obj_cache_type_t) +#define HWLOC_OBJ_CACHE_UNIFIED HWLOC_NAME_CAPS(OBJ_CACHE_UNIFIED) +#define HWLOC_OBJ_CACHE_DATA HWLOC_NAME_CAPS(OBJ_CACHE_DATA) +#define HWLOC_OBJ_CACHE_INSTRUCTION HWLOC_NAME_CAPS(OBJ_CACHE_INSTRUCTION) + +#define hwloc_obj_bridge_type_e HWLOC_NAME(obj_bridge_type_e) +#define hwloc_obj_bridge_type_t HWLOC_NAME(obj_bridge_type_t) +#define HWLOC_OBJ_BRIDGE_HOST HWLOC_NAME_CAPS(OBJ_BRIDGE_HOST) +#define HWLOC_OBJ_BRIDGE_PCI HWLOC_NAME_CAPS(OBJ_BRIDGE_PCI) + +#define hwloc_obj_osdev_type_e HWLOC_NAME(obj_osdev_type_e) +#define hwloc_obj_osdev_type_t HWLOC_NAME(obj_osdev_type_t) +#define HWLOC_OBJ_OSDEV_BLOCK HWLOC_NAME_CAPS(OBJ_OSDEV_BLOCK) +#define HWLOC_OBJ_OSDEV_GPU HWLOC_NAME_CAPS(OBJ_OSDEV_GPU) +#define HWLOC_OBJ_OSDEV_NETWORK HWLOC_NAME_CAPS(OBJ_OSDEV_NETWORK) +#define HWLOC_OBJ_OSDEV_OPENFABRICS HWLOC_NAME_CAPS(OBJ_OSDEV_OPENFABRICS) +#define HWLOC_OBJ_OSDEV_DMA HWLOC_NAME_CAPS(OBJ_OSDEV_DMA) +#define HWLOC_OBJ_OSDEV_COPROC HWLOC_NAME_CAPS(OBJ_OSDEV_COPROC) + +#define hwloc_compare_types HWLOC_NAME(compare_types) + +#define hwloc_compare_types_e HWLOC_NAME(compare_types_e) +#define HWLOC_TYPE_UNORDERED HWLOC_NAME_CAPS(TYPE_UNORDERED) + +#define hwloc_obj HWLOC_NAME(obj) +#define hwloc_obj_t HWLOC_NAME(obj_t) + +#define hwloc_info_s HWLOC_NAME(info_s) + +#define hwloc_obj_attr_u HWLOC_NAME(obj_attr_u) +#define hwloc_numanode_attr_s HWLOC_NAME(numanode_attr_s) +#define hwloc_memory_page_type_s HWLOC_NAME(memory_page_type_s) +#define hwloc_cache_attr_s HWLOC_NAME(cache_attr_s) +#define hwloc_group_attr_s HWLOC_NAME(group_attr_s) +#define hwloc_pcidev_attr_s HWLOC_NAME(pcidev_attr_s) +#define hwloc_bridge_attr_s HWLOC_NAME(bridge_attr_s) +#define hwloc_osdev_attr_s HWLOC_NAME(osdev_attr_s) + +#define hwloc_topology_init HWLOC_NAME(topology_init) +#define hwloc_topology_load HWLOC_NAME(topology_load) +#define hwloc_topology_destroy HWLOC_NAME(topology_destroy) +#define hwloc_topology_dup HWLOC_NAME(topology_dup) +#define hwloc_topology_abi_check HWLOC_NAME(topology_abi_check) +#define hwloc_topology_check HWLOC_NAME(topology_check) + +#define hwloc_topology_flags_e HWLOC_NAME(topology_flags_e) + +#define HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM HWLOC_NAME_CAPS(TOPOLOGY_FLAG_WHOLE_SYSTEM) +#define HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM HWLOC_NAME_CAPS(TOPOLOGY_FLAG_IS_THISSYSTEM) +#define HWLOC_TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES HWLOC_NAME_CAPS(TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES) + +#define hwloc_topology_set_pid HWLOC_NAME(topology_set_pid) +#define hwloc_topology_set_synthetic HWLOC_NAME(topology_set_synthetic) +#define hwloc_topology_set_xml HWLOC_NAME(topology_set_xml) +#define hwloc_topology_set_xmlbuffer HWLOC_NAME(topology_set_xmlbuffer) + +#define hwloc_topology_set_flags HWLOC_NAME(topology_set_flags) +#define hwloc_topology_is_thissystem HWLOC_NAME(topology_is_thissystem) +#define hwloc_topology_get_flags HWLOC_NAME(topology_get_flags) +#define hwloc_topology_discovery_support HWLOC_NAME(topology_discovery_support) +#define hwloc_topology_cpubind_support HWLOC_NAME(topology_cpubind_support) +#define hwloc_topology_membind_support HWLOC_NAME(topology_membind_support) +#define hwloc_topology_support HWLOC_NAME(topology_support) +#define hwloc_topology_get_support HWLOC_NAME(topology_get_support) + +#define hwloc_type_filter_e HWLOC_NAME(type_filter_e) +#define HWLOC_TYPE_FILTER_KEEP_ALL HWLOC_NAME_CAPS(TYPE_FILTER_KEEP_ALL) +#define HWLOC_TYPE_FILTER_KEEP_NONE HWLOC_NAME_CAPS(TYPE_FILTER_KEEP_NONE) +#define HWLOC_TYPE_FILTER_KEEP_STRUCTURE HWLOC_NAME_CAPS(TYPE_FILTER_KEEP_STRUCTURE) +#define HWLOC_TYPE_FILTER_KEEP_IMPORTANT HWLOC_NAME_CAPS(TYPE_FILTER_KEEP_IMPORTANT) +#define hwloc_topology_set_type_filter HWLOC_NAME(topology_set_type_filter) +#define hwloc_topology_get_type_filter HWLOC_NAME(topology_get_type_filter) +#define hwloc_topology_set_all_types_filter HWLOC_NAME(topology_set_all_types_filter) +#define hwloc_topology_set_cache_types_filter HWLOC_NAME(topology_set_cache_types_filter) +#define hwloc_topology_set_icache_types_filter HWLOC_NAME(topology_set_icache_types_filter) +#define hwloc_topology_set_io_types_filter HWLOC_NAME(topology_set_io_types_filter) + +#define hwloc_topology_set_userdata HWLOC_NAME(topology_set_userdata) +#define hwloc_topology_get_userdata HWLOC_NAME(topology_get_userdata) + +#define hwloc_restrict_flags_e HWLOC_NAME(restrict_flags_e) +#define HWLOC_RESTRICT_FLAG_REMOVE_CPULESS HWLOC_NAME_CAPS(RESTRICT_FLAG_REMOVE_CPULESS) +#define HWLOC_RESTRICT_FLAG_ADAPT_MISC HWLOC_NAME_CAPS(RESTRICT_FLAG_ADAPT_MISC) +#define HWLOC_RESTRICT_FLAG_ADAPT_IO HWLOC_NAME_CAPS(RESTRICT_FLAG_ADAPT_IO) +#define hwloc_topology_restrict HWLOC_NAME(topology_restrict) + +#define hwloc_topology_insert_misc_object HWLOC_NAME(topology_insert_misc_object) +#define hwloc_topology_alloc_group_object HWLOC_NAME(topology_alloc_group_object) +#define hwloc_topology_insert_group_object HWLOC_NAME(topology_insert_group_object) +#define hwloc_obj_add_other_obj_sets HWLOC_NAME(obj_add_other_obj_sets) + +#define hwloc_topology_get_depth HWLOC_NAME(topology_get_depth) +#define hwloc_get_type_depth HWLOC_NAME(get_type_depth) +#define hwloc_get_memory_parents_depth HWLOC_NAME(get_memory_parents_depth) + +#define hwloc_get_type_depth_e HWLOC_NAME(get_type_depth_e) +#define HWLOC_TYPE_DEPTH_UNKNOWN HWLOC_NAME_CAPS(TYPE_DEPTH_UNKNOWN) +#define HWLOC_TYPE_DEPTH_MULTIPLE HWLOC_NAME_CAPS(TYPE_DEPTH_MULTIPLE) +#define HWLOC_TYPE_DEPTH_BRIDGE HWLOC_NAME_CAPS(TYPE_DEPTH_BRIDGE) +#define HWLOC_TYPE_DEPTH_PCI_DEVICE HWLOC_NAME_CAPS(TYPE_DEPTH_PCI_DEVICE) +#define HWLOC_TYPE_DEPTH_OS_DEVICE HWLOC_NAME_CAPS(TYPE_DEPTH_OS_DEVICE) +#define HWLOC_TYPE_DEPTH_MISC HWLOC_NAME_CAPS(TYPE_DEPTH_MISC) +#define HWLOC_TYPE_DEPTH_NUMANODE HWLOC_NAME_CAPS(TYPE_DEPTH_NUMANODE) + +#define hwloc_get_depth_type HWLOC_NAME(get_depth_type) +#define hwloc_get_nbobjs_by_depth HWLOC_NAME(get_nbobjs_by_depth) +#define hwloc_get_nbobjs_by_type HWLOC_NAME(get_nbobjs_by_type) + +#define hwloc_get_obj_by_depth HWLOC_NAME(get_obj_by_depth ) +#define hwloc_get_obj_by_type HWLOC_NAME(get_obj_by_type ) + +#define hwloc_obj_type_string HWLOC_NAME(obj_type_string ) +#define hwloc_obj_type_snprintf HWLOC_NAME(obj_type_snprintf ) +#define hwloc_obj_attr_snprintf HWLOC_NAME(obj_attr_snprintf ) +#define hwloc_type_sscanf HWLOC_NAME(type_sscanf) +#define hwloc_type_sscanf_as_depth HWLOC_NAME(type_sscanf_as_depth) + +#define hwloc_obj_get_info_by_name HWLOC_NAME(obj_get_info_by_name) +#define hwloc_obj_add_info HWLOC_NAME(obj_add_info) + +#define HWLOC_CPUBIND_PROCESS HWLOC_NAME_CAPS(CPUBIND_PROCESS) +#define HWLOC_CPUBIND_THREAD HWLOC_NAME_CAPS(CPUBIND_THREAD) +#define HWLOC_CPUBIND_STRICT HWLOC_NAME_CAPS(CPUBIND_STRICT) +#define HWLOC_CPUBIND_NOMEMBIND HWLOC_NAME_CAPS(CPUBIND_NOMEMBIND) + +#define hwloc_cpubind_flags_t HWLOC_NAME(cpubind_flags_t) + +#define hwloc_set_cpubind HWLOC_NAME(set_cpubind) +#define hwloc_get_cpubind HWLOC_NAME(get_cpubind) +#define hwloc_set_proc_cpubind HWLOC_NAME(set_proc_cpubind) +#define hwloc_get_proc_cpubind HWLOC_NAME(get_proc_cpubind) +#define hwloc_set_thread_cpubind HWLOC_NAME(set_thread_cpubind) +#define hwloc_get_thread_cpubind HWLOC_NAME(get_thread_cpubind) + +#define hwloc_get_last_cpu_location HWLOC_NAME(get_last_cpu_location) +#define hwloc_get_proc_last_cpu_location HWLOC_NAME(get_proc_last_cpu_location) + +#define HWLOC_MEMBIND_DEFAULT HWLOC_NAME_CAPS(MEMBIND_DEFAULT) +#define HWLOC_MEMBIND_FIRSTTOUCH HWLOC_NAME_CAPS(MEMBIND_FIRSTTOUCH) +#define HWLOC_MEMBIND_BIND HWLOC_NAME_CAPS(MEMBIND_BIND) +#define HWLOC_MEMBIND_INTERLEAVE HWLOC_NAME_CAPS(MEMBIND_INTERLEAVE) +#define HWLOC_MEMBIND_NEXTTOUCH HWLOC_NAME_CAPS(MEMBIND_NEXTTOUCH) +#define HWLOC_MEMBIND_MIXED HWLOC_NAME_CAPS(MEMBIND_MIXED) + +#define hwloc_membind_policy_t HWLOC_NAME(membind_policy_t) + +#define HWLOC_MEMBIND_PROCESS HWLOC_NAME_CAPS(MEMBIND_PROCESS) +#define HWLOC_MEMBIND_THREAD HWLOC_NAME_CAPS(MEMBIND_THREAD) +#define HWLOC_MEMBIND_STRICT HWLOC_NAME_CAPS(MEMBIND_STRICT) +#define HWLOC_MEMBIND_MIGRATE HWLOC_NAME_CAPS(MEMBIND_MIGRATE) +#define HWLOC_MEMBIND_NOCPUBIND HWLOC_NAME_CAPS(MEMBIND_NOCPUBIND) +#define HWLOC_MEMBIND_BYNODESET HWLOC_NAME_CAPS(MEMBIND_BYNODESET) + +#define hwloc_membind_flags_t HWLOC_NAME(membind_flags_t) + +#define hwloc_set_membind HWLOC_NAME(set_membind) +#define hwloc_get_membind HWLOC_NAME(get_membind) +#define hwloc_set_proc_membind HWLOC_NAME(set_proc_membind) +#define hwloc_get_proc_membind HWLOC_NAME(get_proc_membind) +#define hwloc_set_area_membind HWLOC_NAME(set_area_membind) +#define hwloc_get_area_membind HWLOC_NAME(get_area_membind) +#define hwloc_get_area_memlocation HWLOC_NAME(get_area_memlocation) +#define hwloc_alloc_membind HWLOC_NAME(alloc_membind) +#define hwloc_alloc HWLOC_NAME(alloc) +#define hwloc_free HWLOC_NAME(free) + +#define hwloc_get_non_io_ancestor_obj HWLOC_NAME(get_non_io_ancestor_obj) +#define hwloc_get_next_pcidev HWLOC_NAME(get_next_pcidev) +#define hwloc_get_pcidev_by_busid HWLOC_NAME(get_pcidev_by_busid) +#define hwloc_get_pcidev_by_busidstring HWLOC_NAME(get_pcidev_by_busidstring) +#define hwloc_get_next_osdev HWLOC_NAME(get_next_osdev) +#define hwloc_get_next_bridge HWLOC_NAME(get_next_bridge) +#define hwloc_bridge_covers_pcibus HWLOC_NAME(bridge_covers_pcibus) + +/* hwloc/bitmap.h */ + +#define hwloc_bitmap_s HWLOC_NAME(bitmap_s) +#define hwloc_bitmap_t HWLOC_NAME(bitmap_t) +#define hwloc_const_bitmap_t HWLOC_NAME(const_bitmap_t) + +#define hwloc_bitmap_alloc HWLOC_NAME(bitmap_alloc) +#define hwloc_bitmap_alloc_full HWLOC_NAME(bitmap_alloc_full) +#define hwloc_bitmap_free HWLOC_NAME(bitmap_free) +#define hwloc_bitmap_dup HWLOC_NAME(bitmap_dup) +#define hwloc_bitmap_copy HWLOC_NAME(bitmap_copy) +#define hwloc_bitmap_snprintf HWLOC_NAME(bitmap_snprintf) +#define hwloc_bitmap_asprintf HWLOC_NAME(bitmap_asprintf) +#define hwloc_bitmap_sscanf HWLOC_NAME(bitmap_sscanf) +#define hwloc_bitmap_list_snprintf HWLOC_NAME(bitmap_list_snprintf) +#define hwloc_bitmap_list_asprintf HWLOC_NAME(bitmap_list_asprintf) +#define hwloc_bitmap_list_sscanf HWLOC_NAME(bitmap_list_sscanf) +#define hwloc_bitmap_taskset_snprintf HWLOC_NAME(bitmap_taskset_snprintf) +#define hwloc_bitmap_taskset_asprintf HWLOC_NAME(bitmap_taskset_asprintf) +#define hwloc_bitmap_taskset_sscanf HWLOC_NAME(bitmap_taskset_sscanf) +#define hwloc_bitmap_zero HWLOC_NAME(bitmap_zero) +#define hwloc_bitmap_fill HWLOC_NAME(bitmap_fill) +#define hwloc_bitmap_from_ulong HWLOC_NAME(bitmap_from_ulong) + +#define hwloc_bitmap_from_ith_ulong HWLOC_NAME(bitmap_from_ith_ulong) +#define hwloc_bitmap_to_ulong HWLOC_NAME(bitmap_to_ulong) +#define hwloc_bitmap_to_ith_ulong HWLOC_NAME(bitmap_to_ith_ulong) +#define hwloc_bitmap_only HWLOC_NAME(bitmap_only) +#define hwloc_bitmap_allbut HWLOC_NAME(bitmap_allbut) +#define hwloc_bitmap_set HWLOC_NAME(bitmap_set) +#define hwloc_bitmap_set_range HWLOC_NAME(bitmap_set_range) +#define hwloc_bitmap_set_ith_ulong HWLOC_NAME(bitmap_set_ith_ulong) +#define hwloc_bitmap_clr HWLOC_NAME(bitmap_clr) +#define hwloc_bitmap_clr_range HWLOC_NAME(bitmap_clr_range) +#define hwloc_bitmap_isset HWLOC_NAME(bitmap_isset) +#define hwloc_bitmap_iszero HWLOC_NAME(bitmap_iszero) +#define hwloc_bitmap_isfull HWLOC_NAME(bitmap_isfull) +#define hwloc_bitmap_isequal HWLOC_NAME(bitmap_isequal) +#define hwloc_bitmap_intersects HWLOC_NAME(bitmap_intersects) +#define hwloc_bitmap_isincluded HWLOC_NAME(bitmap_isincluded) +#define hwloc_bitmap_or HWLOC_NAME(bitmap_or) +#define hwloc_bitmap_and HWLOC_NAME(bitmap_and) +#define hwloc_bitmap_andnot HWLOC_NAME(bitmap_andnot) +#define hwloc_bitmap_xor HWLOC_NAME(bitmap_xor) +#define hwloc_bitmap_not HWLOC_NAME(bitmap_not) +#define hwloc_bitmap_first HWLOC_NAME(bitmap_first) +#define hwloc_bitmap_last HWLOC_NAME(bitmap_last) +#define hwloc_bitmap_next HWLOC_NAME(bitmap_next) +#define hwloc_bitmap_first_unset HWLOC_NAME(bitmap_first_unset) +#define hwloc_bitmap_last_unset HWLOC_NAME(bitmap_last_unset) +#define hwloc_bitmap_next_unset HWLOC_NAME(bitmap_next_unset) +#define hwloc_bitmap_singlify HWLOC_NAME(bitmap_singlify) +#define hwloc_bitmap_compare_first HWLOC_NAME(bitmap_compare_first) +#define hwloc_bitmap_compare HWLOC_NAME(bitmap_compare) +#define hwloc_bitmap_weight HWLOC_NAME(bitmap_weight) + +/* hwloc/helper.h */ + +#define hwloc_get_type_or_below_depth HWLOC_NAME(get_type_or_below_depth) +#define hwloc_get_type_or_above_depth HWLOC_NAME(get_type_or_above_depth) +#define hwloc_get_root_obj HWLOC_NAME(get_root_obj) +#define hwloc_get_ancestor_obj_by_depth HWLOC_NAME(get_ancestor_obj_by_depth) +#define hwloc_get_ancestor_obj_by_type HWLOC_NAME(get_ancestor_obj_by_type) +#define hwloc_get_next_obj_by_depth HWLOC_NAME(get_next_obj_by_depth) +#define hwloc_get_next_obj_by_type HWLOC_NAME(get_next_obj_by_type) +#define hwloc_get_pu_obj_by_os_index HWLOC_NAME(get_pu_obj_by_os_index) +#define hwloc_get_numanode_obj_by_os_index HWLOC_NAME(get_numanode_obj_by_os_index) +#define hwloc_get_next_child HWLOC_NAME(get_next_child) +#define hwloc_get_common_ancestor_obj HWLOC_NAME(get_common_ancestor_obj) +#define hwloc_obj_is_in_subtree HWLOC_NAME(obj_is_in_subtree) +#define hwloc_get_first_largest_obj_inside_cpuset HWLOC_NAME(get_first_largest_obj_inside_cpuset) +#define hwloc_get_largest_objs_inside_cpuset HWLOC_NAME(get_largest_objs_inside_cpuset) +#define hwloc_get_next_obj_inside_cpuset_by_depth HWLOC_NAME(get_next_obj_inside_cpuset_by_depth) +#define hwloc_get_next_obj_inside_cpuset_by_type HWLOC_NAME(get_next_obj_inside_cpuset_by_type) +#define hwloc_get_obj_inside_cpuset_by_depth HWLOC_NAME(get_obj_inside_cpuset_by_depth) +#define hwloc_get_obj_inside_cpuset_by_type HWLOC_NAME(get_obj_inside_cpuset_by_type) +#define hwloc_get_nbobjs_inside_cpuset_by_depth HWLOC_NAME(get_nbobjs_inside_cpuset_by_depth) +#define hwloc_get_nbobjs_inside_cpuset_by_type HWLOC_NAME(get_nbobjs_inside_cpuset_by_type) +#define hwloc_get_obj_index_inside_cpuset HWLOC_NAME(get_obj_index_inside_cpuset) +#define hwloc_get_child_covering_cpuset HWLOC_NAME(get_child_covering_cpuset) +#define hwloc_get_obj_covering_cpuset HWLOC_NAME(get_obj_covering_cpuset) +#define hwloc_get_next_obj_covering_cpuset_by_depth HWLOC_NAME(get_next_obj_covering_cpuset_by_depth) +#define hwloc_get_next_obj_covering_cpuset_by_type HWLOC_NAME(get_next_obj_covering_cpuset_by_type) +#define hwloc_obj_type_is_normal HWLOC_NAME(obj_type_is_normal) +#define hwloc_obj_type_is_memory HWLOC_NAME(obj_type_is_memory) +#define hwloc_obj_type_is_io HWLOC_NAME(obj_type_is_io) +#define hwloc_obj_type_is_cache HWLOC_NAME(obj_type_is_cache) +#define hwloc_obj_type_is_dcache HWLOC_NAME(obj_type_is_dcache) +#define hwloc_obj_type_is_icache HWLOC_NAME(obj_type_is_icache) +#define hwloc_get_cache_type_depth HWLOC_NAME(get_cache_type_depth) +#define hwloc_get_cache_covering_cpuset HWLOC_NAME(get_cache_covering_cpuset) +#define hwloc_get_shared_cache_covering_obj HWLOC_NAME(get_shared_cache_covering_obj) +#define hwloc_get_closest_objs HWLOC_NAME(get_closest_objs) +#define hwloc_get_obj_below_by_type HWLOC_NAME(get_obj_below_by_type) +#define hwloc_get_obj_below_array_by_type HWLOC_NAME(get_obj_below_array_by_type) +#define hwloc_distrib_flags_e HWLOC_NAME(distrib_flags_e) +#define HWLOC_DISTRIB_FLAG_REVERSE HWLOC_NAME_CAPS(DISTRIB_FLAG_REVERSE) +#define hwloc_distrib HWLOC_NAME(distrib) +#define hwloc_alloc_membind_policy HWLOC_NAME(alloc_membind_policy) +#define hwloc_alloc_membind_policy_nodeset HWLOC_NAME(alloc_membind_policy_nodeset) +#define hwloc_topology_get_complete_cpuset HWLOC_NAME(topology_get_complete_cpuset) +#define hwloc_topology_get_topology_cpuset HWLOC_NAME(topology_get_topology_cpuset) +#define hwloc_topology_get_allowed_cpuset HWLOC_NAME(topology_get_allowed_cpuset) +#define hwloc_topology_get_complete_nodeset HWLOC_NAME(topology_get_complete_nodeset) +#define hwloc_topology_get_topology_nodeset HWLOC_NAME(topology_get_topology_nodeset) +#define hwloc_topology_get_allowed_nodeset HWLOC_NAME(topology_get_allowed_nodeset) +#define hwloc_cpuset_to_nodeset HWLOC_NAME(cpuset_to_nodeset) +#define hwloc_cpuset_from_nodeset HWLOC_NAME(cpuset_from_nodeset) + +/* export.h */ + +#define hwloc_topology_export_xml_flags_e HWLOC_NAME(topology_export_xml_flags_e) +#define HWLOC_TOPOLOGY_EXPORT_XML_FLAG_V1 HWLOC_NAME_CAPS(TOPOLOGY_EXPORT_XML_FLAG_V1) +#define hwloc_topology_export_xml HWLOC_NAME(topology_export_xml) +#define hwloc_topology_export_xmlbuffer HWLOC_NAME(topology_export_xmlbuffer) +#define hwloc_free_xmlbuffer HWLOC_NAME(free_xmlbuffer) +#define hwloc_topology_set_userdata_export_callback HWLOC_NAME(topology_set_userdata_export_callback) +#define hwloc_export_obj_userdata HWLOC_NAME(export_obj_userdata) +#define hwloc_export_obj_userdata_base64 HWLOC_NAME(export_obj_userdata_base64) +#define hwloc_topology_set_userdata_import_callback HWLOC_NAME(topology_set_userdata_import_callback) + +#define hwloc_topology_export_synthetic_flags_e HWLOC_NAME(topology_export_synthetic_flags_e) +#define HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_NO_EXTENDED_TYPES HWLOC_NAME_CAPS(TOPOLOGY_EXPORT_SYNTHETIC_FLAG_NO_EXTENDED_TYPES) +#define HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_NO_ATTRS HWLOC_NAME_CAPS(TOPOLOGY_EXPORT_SYNTHETIC_FLAG_NO_ATTRS) +#define HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_V1 HWLOC_NAME_CAPS(TOPOLOGY_EXPORT_SYNTHETIC_FLAG_V1) +#define HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_IGNORE_MEMORY HWLOC_NAME_CAPS(TOPOLOGY_EXPORT_SYNTHETIC_FLAG_IGNORE_MEMORY) +#define hwloc_topology_export_synthetic HWLOC_NAME(topology_export_synthetic) + +/* distances.h */ + +#define hwloc_distances_s HWLOC_NAME(distances_s) + +#define hwloc_distances_kind_e HWLOC_NAME(distances_kind_e) +#define HWLOC_DISTANCES_KIND_FROM_OS HWLOC_NAME_CAPS(DISTANCES_KIND_FROM_OS) +#define HWLOC_DISTANCES_KIND_FROM_USER HWLOC_NAME_CAPS(DISTANCES_KIND_FROM_USER) +#define HWLOC_DISTANCES_KIND_MEANS_LATENCY HWLOC_NAME_CAPS(DISTANCES_KIND_MEANS_LATENCY) +#define HWLOC_DISTANCES_KIND_MEANS_BANDWIDTH HWLOC_NAME_CAPS(DISTANCES_KIND_MEANS_BANDWIDTH) + +#define hwloc_distances_get HWLOC_NAME(distances_get) +#define hwloc_distances_get_by_depth HWLOC_NAME(distances_get_by_depth) +#define hwloc_distances_get_by_type HWLOC_NAME(distances_get_by_type) +#define hwloc_distances_release HWLOC_NAME(distances_release) +#define hwloc_distances_obj_index HWLOC_NAME(distances_obj_index) +#define hwloc_distances_obj_pair_values HWLOC_NAME(distances_pair_values) + +#define hwloc_distances_add_flag_e HWLOC_NAME(distances_add_flag_e) +#define HWLOC_DISTANCES_ADD_FLAG_GROUP HWLOC_NAME_CAPS(DISTANCES_ADD_FLAG_GROUP) +#define HWLOC_DISTANCES_ADD_FLAG_GROUP_INACCURATE HWLOC_NAME_CAPS(DISTANCES_ADD_FLAG_GROUP_INACCURATE) + +#define hwloc_distances_add HWLOC_NAME(distances_add) +#define hwloc_distances_remove HWLOC_NAME(distances_remove) +#define hwloc_distances_remove_by_depth HWLOC_NAME(distances_remove_by_depth) +#define hwloc_distances_remove_by_type HWLOC_NAME(distances_remove_by_type) + +/* diff.h */ + +#define hwloc_topology_diff_obj_attr_type_e HWLOC_NAME(topology_diff_obj_attr_type_e) +#define hwloc_topology_diff_obj_attr_type_t HWLOC_NAME(topology_diff_obj_attr_type_t) +#define HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_SIZE HWLOC_NAME_CAPS(TOPOLOGY_DIFF_OBJ_ATTR_SIZE) +#define HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_NAME HWLOC_NAME_CAPS(TOPOLOGY_DIFF_OBJ_ATTR_NAME) +#define HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_INFO HWLOC_NAME_CAPS(TOPOLOGY_DIFF_OBJ_ATTR_INFO) +#define hwloc_topology_diff_obj_attr_u HWLOC_NAME(topology_diff_obj_attr_u) +#define hwloc_topology_diff_obj_attr_generic_s HWLOC_NAME(topology_diff_obj_attr_generic_s) +#define hwloc_topology_diff_obj_attr_uint64_s HWLOC_NAME(topology_diff_obj_attr_uint64_s) +#define hwloc_topology_diff_obj_attr_string_s HWLOC_NAME(topology_diff_obj_attr_string_s) +#define hwloc_topology_diff_type_e HWLOC_NAME(topology_diff_type_e) +#define hwloc_topology_diff_type_t HWLOC_NAME(topology_diff_type_t) +#define HWLOC_TOPOLOGY_DIFF_OBJ_ATTR HWLOC_NAME_CAPS(TOPOLOGY_DIFF_OBJ_ATTR) +#define HWLOC_TOPOLOGY_DIFF_TOO_COMPLEX HWLOC_NAME_CAPS(TOPOLOGY_DIFF_TOO_COMPLEX) +#define hwloc_topology_diff_u HWLOC_NAME(topology_diff_u) +#define hwloc_topology_diff_t HWLOC_NAME(topology_diff_t) +#define hwloc_topology_diff_generic_s HWLOC_NAME(topology_diff_generic_s) +#define hwloc_topology_diff_obj_attr_s HWLOC_NAME(topology_diff_obj_attr_s) +#define hwloc_topology_diff_too_complex_s HWLOC_NAME(topology_diff_too_complex_s) +#define hwloc_topology_diff_build HWLOC_NAME(topology_diff_build) +#define hwloc_topology_diff_apply_flags_e HWLOC_NAME(topology_diff_apply_flags_e) +#define HWLOC_TOPOLOGY_DIFF_APPLY_REVERSE HWLOC_NAME_CAPS(TOPOLOGY_DIFF_APPLY_REVERSE) +#define hwloc_topology_diff_apply HWLOC_NAME(topology_diff_apply) +#define hwloc_topology_diff_destroy HWLOC_NAME(topology_diff_destroy) +#define hwloc_topology_diff_load_xml HWLOC_NAME(topology_diff_load_xml) +#define hwloc_topology_diff_export_xml HWLOC_NAME(topology_diff_export_xml) +#define hwloc_topology_diff_load_xmlbuffer HWLOC_NAME(topology_diff_load_xmlbuffer) +#define hwloc_topology_diff_export_xmlbuffer HWLOC_NAME(topology_diff_export_xmlbuffer) + +/* shmem.h */ + +#define hwloc_shmem_topology_get_length HWLOC_NAME(shmem_topology_get_length) +#define hwloc_shmem_topology_write HWLOC_NAME(shmem_topology_write) +#define hwloc_shmem_topology_adopt HWLOC_NAME(shmem_topology_adopt) + +/* glibc-sched.h */ + +#define hwloc_cpuset_to_glibc_sched_affinity HWLOC_NAME(cpuset_to_glibc_sched_affinity) +#define hwloc_cpuset_from_glibc_sched_affinity HWLOC_NAME(cpuset_from_glibc_sched_affinity) + +/* linux-libnuma.h */ + +#define hwloc_cpuset_to_linux_libnuma_ulongs HWLOC_NAME(cpuset_to_linux_libnuma_ulongs) +#define hwloc_nodeset_to_linux_libnuma_ulongs HWLOC_NAME(nodeset_to_linux_libnuma_ulongs) +#define hwloc_cpuset_from_linux_libnuma_ulongs HWLOC_NAME(cpuset_from_linux_libnuma_ulongs) +#define hwloc_nodeset_from_linux_libnuma_ulongs HWLOC_NAME(nodeset_from_linux_libnuma_ulongs) +#define hwloc_cpuset_to_linux_libnuma_bitmask HWLOC_NAME(cpuset_to_linux_libnuma_bitmask) +#define hwloc_nodeset_to_linux_libnuma_bitmask HWLOC_NAME(nodeset_to_linux_libnuma_bitmask) +#define hwloc_cpuset_from_linux_libnuma_bitmask HWLOC_NAME(cpuset_from_linux_libnuma_bitmask) +#define hwloc_nodeset_from_linux_libnuma_bitmask HWLOC_NAME(nodeset_from_linux_libnuma_bitmask) + +/* linux.h */ + +#define hwloc_linux_set_tid_cpubind HWLOC_NAME(linux_set_tid_cpubind) +#define hwloc_linux_get_tid_cpubind HWLOC_NAME(linux_get_tid_cpubind) +#define hwloc_linux_get_tid_last_cpu_location HWLOC_NAME(linux_get_tid_last_cpu_location) +#define hwloc_linux_read_path_as_cpumask HWLOC_NAME(linux_read_file_cpumask) + +/* openfabrics-verbs.h */ + +#define hwloc_ibv_get_device_cpuset HWLOC_NAME(ibv_get_device_cpuset) +#define hwloc_ibv_get_device_osdev HWLOC_NAME(ibv_get_device_osdev) +#define hwloc_ibv_get_device_osdev_by_name HWLOC_NAME(ibv_get_device_osdev_by_name) + +/* intel-mic.h */ + +#define hwloc_intel_mic_get_device_cpuset HWLOC_NAME(intel_mic_get_device_cpuset) +#define hwloc_intel_mic_get_device_osdev_by_index HWLOC_NAME(intel_mic_get_device_osdev_by_index) + +/* opencl.h */ + +#define hwloc_opencl_get_device_cpuset HWLOC_NAME(opencl_get_device_cpuset) +#define hwloc_opencl_get_device_osdev HWLOC_NAME(opencl_get_device_osdev) +#define hwloc_opencl_get_device_osdev_by_index HWLOC_NAME(opencl_get_device_osdev_by_index) + +/* cuda.h */ + +#define hwloc_cuda_get_device_pci_ids HWLOC_NAME(cuda_get_device_pci_ids) +#define hwloc_cuda_get_device_cpuset HWLOC_NAME(cuda_get_device_cpuset) +#define hwloc_cuda_get_device_pcidev HWLOC_NAME(cuda_get_device_pcidev) +#define hwloc_cuda_get_device_osdev HWLOC_NAME(cuda_get_device_osdev) +#define hwloc_cuda_get_device_osdev_by_index HWLOC_NAME(cuda_get_device_osdev_by_index) + +/* cudart.h */ + +#define hwloc_cudart_get_device_pci_ids HWLOC_NAME(cudart_get_device_pci_ids) +#define hwloc_cudart_get_device_cpuset HWLOC_NAME(cudart_get_device_cpuset) +#define hwloc_cudart_get_device_pcidev HWLOC_NAME(cudart_get_device_pcidev) +#define hwloc_cudart_get_device_osdev_by_index HWLOC_NAME(cudart_get_device_osdev_by_index) + +/* nvml.h */ + +#define hwloc_nvml_get_device_cpuset HWLOC_NAME(nvml_get_device_cpuset) +#define hwloc_nvml_get_device_osdev HWLOC_NAME(nvml_get_device_osdev) +#define hwloc_nvml_get_device_osdev_by_index HWLOC_NAME(nvml_get_device_osdev_by_index) + +/* gl.h */ + +#define hwloc_gl_get_display_osdev_by_port_device HWLOC_NAME(gl_get_display_osdev_by_port_device) +#define hwloc_gl_get_display_osdev_by_name HWLOC_NAME(gl_get_display_osdev_by_name) +#define hwloc_gl_get_display_by_osdev HWLOC_NAME(gl_get_display_by_osdev) + +/* hwloc/plugins.h */ + +#define hwloc_disc_component_type_e HWLOC_NAME(disc_component_type_e) +#define HWLOC_DISC_COMPONENT_TYPE_CPU HWLOC_NAME_CAPS(DISC_COMPONENT_TYPE_CPU) +#define HWLOC_DISC_COMPONENT_TYPE_GLOBAL HWLOC_NAME_CAPS(DISC_COMPONENT_TYPE_GLOBAL) +#define HWLOC_DISC_COMPONENT_TYPE_MISC HWLOC_NAME_CAPS(DISC_COMPONENT_TYPE_MISC) +#define hwloc_disc_component_type_t HWLOC_NAME(disc_component_type_t) +#define hwloc_disc_component HWLOC_NAME(disc_component) + +#define hwloc_backend HWLOC_NAME(backend) + +#define hwloc_backend_alloc HWLOC_NAME(backend_alloc) +#define hwloc_backend_enable HWLOC_NAME(backend_enable) + +#define hwloc_component_type_e HWLOC_NAME(component_type_e) +#define HWLOC_COMPONENT_TYPE_DISC HWLOC_NAME_CAPS(COMPONENT_TYPE_DISC) +#define HWLOC_COMPONENT_TYPE_XML HWLOC_NAME_CAPS(COMPONENT_TYPE_XML) +#define hwloc_component_type_t HWLOC_NAME(component_type_t) +#define hwloc_component HWLOC_NAME(component) + +#define hwloc_plugin_check_namespace HWLOC_NAME(plugin_check_namespace) + +#define hwloc_insert_object_by_cpuset HWLOC_NAME(insert_object_by_cpuset) +#define hwloc_report_error_t HWLOC_NAME(report_error_t) +#define hwloc_report_os_error HWLOC_NAME(report_os_error) +#define hwloc_hide_errors HWLOC_NAME(hide_errors) +#define hwloc__insert_object_by_cpuset HWLOC_NAME(_insert_object_by_cpuset) +#define hwloc_insert_object_by_parent HWLOC_NAME(insert_object_by_parent) +#define hwloc_alloc_setup_object HWLOC_NAME(alloc_setup_object) +#define hwloc_obj_add_children_sets HWLOC_NAME(add_children_sets) +#define hwloc_topology_reconnect HWLOC_NAME(topology_reconnect) + +#define hwloc_filter_check_pcidev_subtype_important HWLOC_NAME(filter_check_pcidev_subtype_important) +#define hwloc_filter_check_osdev_subtype_important HWLOC_NAME(filter_check_osdev_subtype_important) +#define hwloc_filter_check_keep_object_type HWLOC_NAME(filter_check_keep_object_type) +#define hwloc_filter_check_keep_object HWLOC_NAME(filter_check_keep_object) + +#define hwloc_pcidisc_find_cap HWLOC_NAME(pcidisc_find_cap) +#define hwloc_pcidisc_find_linkspeed HWLOC_NAME(pcidisc_find_linkspeed) +#define hwloc_pcidisc_check_bridge_type HWLOC_NAME(pcidisc_check_bridge_type) +#define hwloc_pcidisc_setup_bridge_attr HWLOC_NAME(pcidisc_setup_bridge_attr) +#define hwloc_pcidisc_tree_insert_by_busid HWLOC_NAME(pcidisc_tree_insert_by_busid) +#define hwloc_pcidisc_tree_attach HWLOC_NAME(pcidisc_tree_attach) + +#define hwloc_pcidisc_find_by_busid HWLOC_NAME(pcidisc_find_by_busid) +#define hwloc_pcidisc_find_busid_parent HWLOC_NAME(pcidisc_find_busid_parent) + +/* hwloc/deprecated.h */ + +#define hwloc_topology_insert_misc_object_by_parent HWLOC_NAME(topology_insert_misc_object_by_parent) +#define hwloc_obj_cpuset_snprintf HWLOC_NAME(obj_cpuset_snprintf) +#define hwloc_obj_type_sscanf HWLOC_NAME(obj_type_sscanf) + +#define hwloc_set_membind_nodeset HWLOC_NAME(set_membind_nodeset) +#define hwloc_get_membind_nodeset HWLOC_NAME(get_membind_nodeset) +#define hwloc_set_proc_membind_nodeset HWLOC_NAME(set_proc_membind_nodeset) +#define hwloc_get_proc_membind_nodeset HWLOC_NAME(get_proc_membind_nodeset) +#define hwloc_set_area_membind_nodeset HWLOC_NAME(set_area_membind_nodeset) +#define hwloc_get_area_membind_nodeset HWLOC_NAME(get_area_membind_nodeset) +#define hwloc_alloc_membind_nodeset HWLOC_NAME(alloc_membind_nodeset) + +#define hwloc_cpuset_to_nodeset_strict HWLOC_NAME(cpuset_to_nodeset_strict) +#define hwloc_cpuset_from_nodeset_strict HWLOC_NAME(cpuset_from_nodeset_strict) + +/* private/debug.h */ + +#define hwloc_debug_enabled HWLOC_NAME(debug_enabled) +#define hwloc_debug HWLOC_NAME(debug) + +/* private/misc.h */ + +#define hwloc_snprintf HWLOC_NAME(snprintf) +#define hwloc_namecoloncmp HWLOC_NAME(namecoloncmp) +#define hwloc_ffsl_manual HWLOC_NAME(ffsl_manual) +#define hwloc_ffs32 HWLOC_NAME(ffs32) +#define hwloc_ffsl_from_ffs32 HWLOC_NAME(ffsl_from_ffs32) +#define hwloc_flsl_manual HWLOC_NAME(flsl_manual) +#define hwloc_fls32 HWLOC_NAME(fls32) +#define hwloc_flsl_from_fls32 HWLOC_NAME(flsl_from_fls32) +#define hwloc_weight_long HWLOC_NAME(weight_long) +#define hwloc_strncasecmp HWLOC_NAME(strncasecmp) + +#define hwloc_bitmap_compare_inclusion HWLOC_NAME(bitmap_compare_inclusion) + +#define hwloc_pci_class_string HWLOC_NAME(pci_class_string) +#define hwloc_linux_pci_link_speed_from_string HWLOC_NAME(linux_pci_link_speed_from_string) + +#define hwloc_cache_type_by_depth_type HWLOC_NAME(cache_type_by_depth_type) +#define hwloc__obj_type_is_normal HWLOC_NAME(_obj_type_is_normal) +#define hwloc__obj_type_is_memory HWLOC_NAME(_obj_type_is_memory) +#define hwloc__obj_type_is_io HWLOC_NAME(_obj_type_is_io) +#define hwloc__obj_type_is_special HWLOC_NAME(_obj_type_is_special) + +#define hwloc__obj_type_is_cache HWLOC_NAME(_obj_type_is_cache) +#define hwloc__obj_type_is_dcache HWLOC_NAME(_obj_type_is_dcache) +#define hwloc__obj_type_is_icache HWLOC_NAME(_obj_type_is_icache) + +/* private/cpuid-x86.h */ + +#define hwloc_have_x86_cpuid HWLOC_NAME(have_x86_cpuid) +#define hwloc_x86_cpuid HWLOC_NAME(x86_cpuid) + +/* private/xml.h */ + +#define hwloc__xml_verbose HWLOC_NAME(_xml_verbose) + +#define hwloc__xml_import_state_s HWLOC_NAME(_xml_import_state_s) +#define hwloc__xml_import_state_t HWLOC_NAME(_xml_import_state_t) +#define hwloc__xml_import_diff HWLOC_NAME(_xml_import_diff) +#define hwloc_xml_backend_data_s HWLOC_NAME(xml_backend_data_s) +#define hwloc__xml_export_state_s HWLOC_NAME(_xml_export_state_s) +#define hwloc__xml_export_state_t HWLOC_NAME(_xml_export_state_t) +#define hwloc__xml_export_data_s HWLOC_NAME(_xml_export_data_s) +#define hwloc__xml_export_topology HWLOC_NAME(_xml_export_topology) +#define hwloc__xml_export_diff HWLOC_NAME(_xml_export_diff) + +#define hwloc_xml_callbacks HWLOC_NAME(xml_callbacks) +#define hwloc_xml_component HWLOC_NAME(xml_component) +#define hwloc_xml_callbacks_register HWLOC_NAME(xml_callbacks_register) +#define hwloc_xml_callbacks_reset HWLOC_NAME(xml_callbacks_reset) + +#define hwloc__xml_imported_v1distances_s HWLOC_NAME(_xml_imported_v1distances_s) + +/* private/components.h */ + +#define hwloc_disc_component_force_enable HWLOC_NAME(disc_component_force_enable) +#define hwloc_disc_components_enable_others HWLOC_NAME(disc_components_instantiate_others) + +#define hwloc_backends_is_thissystem HWLOC_NAME(backends_is_thissystem) +#define hwloc_backends_find_callbacks HWLOC_NAME(backends_find_callbacks) + +#define hwloc_backends_init HWLOC_NAME(backends_init) +#define hwloc_backends_disable_all HWLOC_NAME(backends_disable_all) + +#define hwloc_components_init HWLOC_NAME(components_init) +#define hwloc_components_fini HWLOC_NAME(components_fini) + +/* private/internal-private.h */ + +#define hwloc_xml_component HWLOC_NAME(xml_component) +#define hwloc_synthetic_component HWLOC_NAME(synthetic_component) + +#define hwloc_aix_component HWLOC_NAME(aix_component) +#define hwloc_bgq_component HWLOC_NAME(bgq_component) +#define hwloc_darwin_component HWLOC_NAME(darwin_component) +#define hwloc_freebsd_component HWLOC_NAME(freebsd_component) +#define hwloc_hpux_component HWLOC_NAME(hpux_component) +#define hwloc_linux_component HWLOC_NAME(linux_component) +#define hwloc_netbsd_component HWLOC_NAME(netbsd_component) +#define hwloc_noos_component HWLOC_NAME(noos_component) +#define hwloc_solaris_component HWLOC_NAME(solaris_component) +#define hwloc_windows_component HWLOC_NAME(windows_component) +#define hwloc_x86_component HWLOC_NAME(x86_component) + +#define hwloc_cuda_component HWLOC_NAME(cuda_component) +#define hwloc_gl_component HWLOC_NAME(gl_component) +#define hwloc_linuxio_component HWLOC_NAME(linuxio_component) +#define hwloc_nvml_component HWLOC_NAME(nvml_component) +#define hwloc_opencl_component HWLOC_NAME(opencl_component) +#define hwloc_pci_component HWLOC_NAME(pci_component) + +#define hwloc_xml_libxml_component HWLOC_NAME(xml_libxml_component) +#define hwloc_xml_nolibxml_component HWLOC_NAME(xml_nolibxml_component) + +/* private/private.h */ + +#define hwloc_special_level_s HWLOC_NAME(special_level_s) + +#define hwloc_pci_forced_locality_s HWLOC_NAME(pci_forced_locality_s) + +#define hwloc_alloc_root_sets HWLOC_NAME(alloc_root_sets) +#define hwloc_setup_pu_level HWLOC_NAME(setup_pu_level) +#define hwloc_get_sysctlbyname HWLOC_NAME(get_sysctlbyname) +#define hwloc_get_sysctl HWLOC_NAME(get_sysctl) +#define hwloc_fallback_nbprocessors HWLOC_NAME(fallback_nbprocessors) + +#define hwloc__object_cpusets_compare_first HWLOC_NAME(_object_cpusets_compare_first) +#define hwloc__reorder_children HWLOC_NAME(_reorder_children) + +#define hwloc_topology_setup_defaults HWLOC_NAME(topology_setup_defaults) +#define hwloc_topology_clear HWLOC_NAME(topology_clear) + +#define hwloc__attach_memory_object HWLOC_NAME(insert_memory_object) + +#define hwloc_pci_discovery_init HWLOC_NAME(pci_discovery_init) +#define hwloc_pci_discovery_prepare HWLOC_NAME(pci_discovery_prepare) +#define hwloc_pci_discovery_exit HWLOC_NAME(pci_discovery_exit) +#define hwloc_find_insert_io_parent_by_complete_cpuset HWLOC_NAME(hwloc_find_insert_io_parent_by_complete_cpuset) +#define hwloc_pci_belowroot_apply_locality HWLOC_NAME(pci_belowroot_apply_locality) + +#define hwloc__add_info HWLOC_NAME(_add_info) +#define hwloc__add_info_nodup HWLOC_NAME(_add_info_nodup) +#define hwloc__move_infos HWLOC_NAME(_move_infos) +#define hwloc__free_infos HWLOC_NAME(_free_infos) + +#define hwloc_binding_hooks HWLOC_NAME(binding_hooks) +#define hwloc_set_native_binding_hooks HWLOC_NAME(set_native_binding_hooks) +#define hwloc_set_binding_hooks HWLOC_NAME(set_binding_hooks) + +#define hwloc_set_linuxfs_hooks HWLOC_NAME(set_linuxfs_hooks) +#define hwloc_set_bgq_hooks HWLOC_NAME(set_bgq_hooks) +#define hwloc_set_solaris_hooks HWLOC_NAME(set_solaris_hooks) +#define hwloc_set_aix_hooks HWLOC_NAME(set_aix_hooks) +#define hwloc_set_windows_hooks HWLOC_NAME(set_windows_hooks) +#define hwloc_set_darwin_hooks HWLOC_NAME(set_darwin_hooks) +#define hwloc_set_freebsd_hooks HWLOC_NAME(set_freebsd_hooks) +#define hwloc_set_netbsd_hooks HWLOC_NAME(set_netbsd_hooks) +#define hwloc_set_hpux_hooks HWLOC_NAME(set_hpux_hooks) + +#define hwloc_look_hardwired_fujitsu_k HWLOC_NAME(look_hardwired_fujitsu_k) +#define hwloc_look_hardwired_fujitsu_fx10 HWLOC_NAME(look_hardwired_fujitsu_fx10) +#define hwloc_look_hardwired_fujitsu_fx100 HWLOC_NAME(look_hardwired_fujitsu_fx100) + +#define hwloc_add_uname_info HWLOC_NAME(add_uname_info) +#define hwloc_free_unlinked_object HWLOC_NAME(free_unlinked_object) +#define hwloc_free_object_and_children HWLOC_NAME(free_object_and_children) +#define hwloc_free_object_siblings_and_children HWLOC_NAME(free_object_siblings_and_children) + +#define hwloc_alloc_heap HWLOC_NAME(alloc_heap) +#define hwloc_alloc_mmap HWLOC_NAME(alloc_mmap) +#define hwloc_free_heap HWLOC_NAME(free_heap) +#define hwloc_free_mmap HWLOC_NAME(free_mmap) +#define hwloc_alloc_or_fail HWLOC_NAME(alloc_or_fail) + +#define hwloc_internal_distances_s HWLOC_NAME(internal_distances_s) +#define hwloc_internal_distances_init HWLOC_NAME(internal_distances_init) +#define hwloc_internal_distances_prepare HWLOC_NAME(internal_distances_prepare) +#define hwloc_internal_distances_dup HWLOC_NAME(internal_distances_dup) +#define hwloc_internal_distances_refresh HWLOC_NAME(internal_distances_refresh) +#define hwloc_internal_distances_destroy HWLOC_NAME(internal_distances_destroy) + +#define hwloc_internal_distances_add HWLOC_NAME(internal_distances_add) +#define hwloc_internal_distances_add_by_index HWLOC_NAME(internal_distances_add_by_index) +#define hwloc_internal_distances_invalidate_cached_objs HWLOC_NAME(hwloc_internal_distances_invalidate_cached_objs) + +#define hwloc_encode_to_base64 HWLOC_NAME(encode_to_base64) +#define hwloc_decode_from_base64 HWLOC_NAME(decode_from_base64) + +#define hwloc_progname HWLOC_NAME(progname) + +#define hwloc__topology_disadopt HWLOC_NAME(_topology_disadopt) +#define hwloc__topology_dup HWLOC_NAME(_topology_dup) + +#define hwloc_tma HWLOC_NAME(tma) +#define hwloc_tma_malloc HWLOC_NAME(tma_malloc) +#define hwloc_tma_calloc HWLOC_NAME(tma_calloc) +#define hwloc_tma_strdup HWLOC_NAME(tma_strdup) +#define hwloc_bitmap_tma_dup HWLOC_NAME(bitmap_tma_dup) + +/* private/solaris-chiptype.h */ + +#define hwloc_solaris_chip_info_s HWLOC_NAME(solaris_chip_info_s) +#define hwloc_solaris_get_chip_info HWLOC_NAME(solaris_get_chip_info) + +#endif /* HWLOC_SYM_TRANSFORM */ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* HWLOC_RENAME_H */ diff --git a/src/3rdparty/hwloc/include/hwloc/shmem.h b/src/3rdparty/hwloc/include/hwloc/shmem.h new file mode 100644 index 00000000..22249463 --- /dev/null +++ b/src/3rdparty/hwloc/include/hwloc/shmem.h @@ -0,0 +1,137 @@ +/* + * Copyright © 2013-2018 Inria. All rights reserved. + * See COPYING in top-level directory. + */ + +/** \file + * \brief Sharing topologies between processes + */ + +#ifndef HWLOC_SHMEM_H +#define HWLOC_SHMEM_H + +#include + +#ifdef __cplusplus +extern "C" { +#elif 0 +} +#endif + + +/** \defgroup hwlocality_shmem Sharing topologies between processes + * + * These functions are used to share a topology between processes by + * duplicating it into a file-backed shared-memory buffer. + * + * The master process must first get the required shared-memory size + * for storing this topology with hwloc_shmem_topology_get_length(). + * + * Then it must find a virtual memory area of that size that is available + * in all processes (identical virtual addresses in all processes). + * On Linux, this can be done by comparing holes found in /proc/\/maps + * for each process. + * + * Once found, it must open a destination file for storing the buffer, + * and pass it to hwloc_shmem_topology_write() together with + * virtual memory address and length obtained above. + * + * Other processes may then adopt this shared topology by opening the + * same file and passing it to hwloc_shmem_topology_adopt() with the + * exact same virtual memory address and length. + * + * @{ + */ + +/** \brief Get the required shared memory length for storing a topology. + * + * This length (in bytes) must be used in hwloc_shmem_topology_write() + * and hwloc_shmem_topology_adopt() later. + * + * \note Flags \p flags are currently unused, must be 0. + */ +HWLOC_DECLSPEC int hwloc_shmem_topology_get_length(hwloc_topology_t topology, + size_t *lengthp, + unsigned long flags); + +/** \brief Duplicate a topology to a shared memory file. + * + * Temporarily map a file in virtual memory and duplicate the + * topology \p topology by allocating duplicates in there. + * + * The segment of the file pointed by descriptor \p fd, + * starting at offset \p fileoffset, and of length \p length (in bytes), + * will be temporarily mapped at virtual address \p mmap_address + * during the duplication. + * + * The mapping length \p length must have been previously obtained with + * hwloc_shmem_topology_get_length() + * and the topology must not have been modified in the meantime. + * + * \note Flags \p flags are currently unused, must be 0. + * + * \note The object userdata pointer is duplicated but the pointed buffer + * is not. However the caller may also allocate it manually in shared memory + * to share it as well. + * + * \return -1 with errno set to EBUSY if the virtual memory mapping defined + * by \p mmap_address and \p length isn't available in the process. + * \return -1 with errno set to EINVAL if \p fileoffset, \p mmap_address + * or \p length aren't page-aligned. + */ +HWLOC_DECLSPEC int hwloc_shmem_topology_write(hwloc_topology_t topology, + int fd, hwloc_uint64_t fileoffset, + void *mmap_address, size_t length, + unsigned long flags); + +/** \brief Adopt a shared memory topology stored in a file. + * + * Map a file in virtual memory and adopt the topology that was previously + * stored there with hwloc_shmem_topology_write(). + * + * The returned adopted topology in \p topologyp can be used just like any + * topology. And it must be destroyed with hwloc_topology_destroy() as usual. + * + * However the topology is read-only. + * For instance, it cannot be modified with hwloc_topology_restrict() + * and object userdata pointers cannot be changed. + * + * The segment of the file pointed by descriptor \p fd, + * starting at offset \p fileoffset, and of length \p length (in bytes), + * will be mapped at virtual address \p mmap_address. + * + * The file pointed by descriptor \p fd, the offset \p fileoffset, + * the requested mapping virtual address \p mmap_address and the length \p length + * must be identical to what was given to hwloc_shmem_topology_write() earlier. + * + * \note Flags \p flags are currently unused, must be 0. + * + * \note The object userdata pointer should not be used unless the process + * that created the shared topology also placed userdata-pointed buffers + * in shared memory. + * + * \note This function takes care of calling hwloc_topology_abi_check(). + * + * \return -1 with errno set to EBUSY if the virtual memory mapping defined + * by \p mmap_address and \p length isn't available in the process. + * + * \return -1 with errno set to EINVAL if \p fileoffset, \p mmap_address + * or \p length aren't page-aligned, or do not match what was given to + * hwloc_shmem_topology_write() earlier. + * + * \return -1 with errno set to EINVAL if the layout of the topology structure + * is different between the writer process and the adopter process. + */ +HWLOC_DECLSPEC int hwloc_shmem_topology_adopt(hwloc_topology_t *topologyp, + int fd, hwloc_uint64_t fileoffset, + void *mmap_address, size_t length, + unsigned long flags); +/** @} */ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* HWLOC_SHMEM_H */ diff --git a/src/3rdparty/hwloc/include/private/autogen/config.h b/src/3rdparty/hwloc/include/private/autogen/config.h new file mode 100644 index 00000000..a97bdfea --- /dev/null +++ b/src/3rdparty/hwloc/include/private/autogen/config.h @@ -0,0 +1,672 @@ +/* + * Copyright © 2009, 2011, 2012 CNRS. All rights reserved. + * Copyright © 2009-2018 Inria. All rights reserved. + * Copyright © 2009, 2011, 2012, 2015 Université Bordeaux. All rights reserved. + * Copyright © 2009 Cisco Systems, Inc. All rights reserved. + * $COPYRIGHT$ + * + * Additional copyrights may follow + * + * $HEADER$ + */ + +#ifndef HWLOC_CONFIGURE_H +#define HWLOC_CONFIGURE_H + +#define DECLSPEC_EXPORTS + +#define HWLOC_HAVE_MSVC_CPUIDEX 1 + +/* Define to 1 if the system has the type `CACHE_DESCRIPTOR'. */ +#define HAVE_CACHE_DESCRIPTOR 0 + +/* Define to 1 if the system has the type `CACHE_RELATIONSHIP'. */ +#define HAVE_CACHE_RELATIONSHIP 0 + +/* Define to 1 if you have the `clz' function. */ +/* #undef HAVE_CLZ */ + +/* Define to 1 if you have the `clzl' function. */ +/* #undef HAVE_CLZL */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_CL_CL_EXT_H */ + +/* Define to 1 if you have the `cpuset_setaffinity' function. */ +/* #undef HAVE_CPUSET_SETAFFINITY */ + +/* Define to 1 if you have the `cpuset_setid' function. */ +/* #undef HAVE_CPUSET_SETID */ + +/* Define to 1 if we have -lcuda */ +/* #undef HAVE_CUDA */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_CUDA_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_CUDA_RUNTIME_API_H */ + +/* Define to 1 if you have the declaration of `CL_DEVICE_TOPOLOGY_AMD', and to + 0 if you don't. */ +/* #undef HAVE_DECL_CL_DEVICE_TOPOLOGY_AMD */ + +/* Define to 1 if you have the declaration of `CTL_HW', and to 0 if you don't. + */ +/* #undef HAVE_DECL_CTL_HW */ + +/* Define to 1 if you have the declaration of `fabsf', and to 0 if you don't. + */ +#define HAVE_DECL_FABSF 1 + +/* Define to 1 if you have the declaration of `modff', and to 0 if you don't. + */ +#define HAVE_DECL_MODFF 1 + +/* Define to 1 if you have the declaration of `HW_NCPU', and to 0 if you + don't. */ +/* #undef HAVE_DECL_HW_NCPU */ + +/* Define to 1 if you have the declaration of + `nvmlDeviceGetMaxPcieLinkGeneration', and to 0 if you don't. */ +/* #undef HAVE_DECL_NVMLDEVICEGETMAXPCIELINKGENERATION */ + +/* Define to 1 if you have the declaration of `pthread_getaffinity_np', and to + 0 if you don't. */ +#define HAVE_DECL_PTHREAD_GETAFFINITY_NP 0 + +/* Define to 1 if you have the declaration of `pthread_setaffinity_np', and to + 0 if you don't. */ +#define HAVE_DECL_PTHREAD_SETAFFINITY_NP 0 + +/* Define to 1 if you have the declaration of `strtoull', and to 0 if you + don't. */ +#define HAVE_DECL_STRTOULL 0 + +/* Define to 1 if you have the declaration of `strcasecmp', and to 0 if you + don't. */ +/* #undef HWLOC_HAVE_DECL_STRCASECMP */ + +/* Define to 1 if you have the declaration of `snprintf', and to 0 if you + don't. */ +#define HAVE_DECL_SNPRINTF 0 + +/* Define to 1 if you have the declaration of `_strdup', and to 0 if you + don't. */ +#define HAVE_DECL__STRDUP 1 + +/* Define to 1 if you have the declaration of `_putenv', and to 0 if you + don't. */ +#define HAVE_DECL__PUTENV 1 + +/* Define to 1 if you have the declaration of `_SC_LARGE_PAGESIZE', and to 0 + if you don't. */ +#define HAVE_DECL__SC_LARGE_PAGESIZE 0 + +/* Define to 1 if you have the declaration of `_SC_NPROCESSORS_CONF', and to 0 + if you don't. */ +#define HAVE_DECL__SC_NPROCESSORS_CONF 0 + +/* Define to 1 if you have the declaration of `_SC_NPROCESSORS_ONLN', and to 0 + if you don't. */ +#define HAVE_DECL__SC_NPROCESSORS_ONLN 0 + +/* Define to 1 if you have the declaration of `_SC_NPROC_CONF', and to 0 if + you don't. */ +#define HAVE_DECL__SC_NPROC_CONF 0 + +/* Define to 1 if you have the declaration of `_SC_NPROC_ONLN', and to 0 if + you don't. */ +#define HAVE_DECL__SC_NPROC_ONLN 0 + +/* Define to 1 if you have the declaration of `_SC_PAGESIZE', and to 0 if you + don't. */ +#define HAVE_DECL__SC_PAGESIZE 0 + +/* Define to 1 if you have the declaration of `_SC_PAGE_SIZE', and to 0 if you + don't. */ +#define HAVE_DECL__SC_PAGE_SIZE 0 + +/* Define to 1 if you have the header file. */ +/* #define HAVE_DIRENT_H 1 */ +#undef HAVE_DIRENT_H + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DLFCN_H */ + +/* Define to 1 if you have the `ffs' function. */ +/* #undef HAVE_FFS */ + +/* Define to 1 if you have the `ffsl' function. */ +/* #undef HAVE_FFSL */ + +/* Define to 1 if you have the `fls' function. */ +/* #undef HAVE_FLS */ + +/* Define to 1 if you have the `flsl' function. */ +/* #undef HAVE_FLSL */ + +/* Define to 1 if you have the `getpagesize' function. */ +#define HAVE_GETPAGESIZE 1 + +/* Define to 1 if the system has the type `GROUP_AFFINITY'. */ +#define HAVE_GROUP_AFFINITY 1 + +/* Define to 1 if the system has the type `GROUP_RELATIONSHIP'. */ +#define HAVE_GROUP_RELATIONSHIP 1 + +/* Define to 1 if you have the `host_info' function. */ +/* #undef HAVE_HOST_INFO */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_INFINIBAND_VERBS_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define to 1 if the system has the type `KAFFINITY'. */ +#define HAVE_KAFFINITY 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_KSTAT_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_LANGINFO_H */ + +/* Define to 1 if we have -lgdi32 */ +#define HAVE_LIBGDI32 1 + +/* Define to 1 if we have -libverbs */ +/* #undef HAVE_LIBIBVERBS */ + +/* Define to 1 if we have -lkstat */ +/* #undef HAVE_LIBKSTAT */ + +/* Define to 1 if we have -llgrp */ +/* #undef HAVE_LIBLGRP */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LOCALE_H 1 + +/* Define to 1 if the system has the type `LOGICAL_PROCESSOR_RELATIONSHIP'. */ +#define HAVE_LOGICAL_PROCESSOR_RELATIONSHIP 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_MACH_MACH_HOST_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_MACH_MACH_INIT_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_MALLOC_H 1 + +/* Define to 1 if you have the `memalign' function. */ +/* #undef HAVE_MEMALIGN */ + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `nl_langinfo' function. */ +/* #undef HAVE_NL_LANGINFO */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_NUMAIF_H */ + +/* Define to 1 if the system has the type `NUMA_NODE_RELATIONSHIP'. */ +#define HAVE_NUMA_NODE_RELATIONSHIP 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_NVCTRL_NVCTRL_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_NVML_H */ + +/* Define to 1 if you have the `openat' function. */ +/* #undef HAVE_OPENAT */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PICL_H */ + +/* Define to 1 if you have the `posix_memalign' function. */ +/* #undef HAVE_POSIX_MEMALIGN */ + +/* Define to 1 if the system has the type `PROCESSOR_CACHE_TYPE'. */ +#define HAVE_PROCESSOR_CACHE_TYPE 1 + +/* Define to 1 if the system has the type `PROCESSOR_GROUP_INFO'. */ +#define HAVE_PROCESSOR_GROUP_INFO 1 + +/* Define to 1 if the system has the type `PROCESSOR_RELATIONSHIP'. */ +#define HAVE_PROCESSOR_RELATIONSHIP 1 + +/* Define to 1 if the system has the type `PSAPI_WORKING_SET_EX_BLOCK'. */ +/* #undef HAVE_PSAPI_WORKING_SET_EX_BLOCK */ + +/* Define to 1 if the system has the type `PSAPI_WORKING_SET_EX_INFORMATION'. + */ +/* #undef HAVE_PSAPI_WORKING_SET_EX_INFORMATION */ + +/* Define to 1 if the system has the type `PROCESSOR_NUMBER'. */ +#define HAVE_PROCESSOR_NUMBER 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PTHREAD_NP_H */ + +/* Define to 1 if the system has the type `pthread_t'. */ +/* #undef HAVE_PTHREAD_T */ +#undef HAVE_PTHREAD_T + +/* Define to 1 if you have the `putwc' function. */ +#define HAVE_PUTWC 1 + +/* Define to 1 if the system has the type `RelationProcessorPackage'. */ +/* #undef HAVE_RELATIONPROCESSORPACKAGE */ + +/* Define to 1 if you have the `setlocale' function. */ +#define HAVE_SETLOCALE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the `strftime' function. */ +#define HAVE_STRFTIME 1 + +/* Define to 1 if you have the header file. */ +/* #define HAVE_STRINGS_H 1*/ +#undef HAVE_STRINGS_H + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the `strncasecmp' function. */ +#define HAVE_STRNCASECMP 1 + +/* Define to '1' if sysctl is present and usable */ +/* #undef HAVE_SYSCTL */ + +/* Define to '1' if sysctlbyname is present and usable */ +/* #undef HAVE_SYSCTLBYNAME */ + +/* Define to 1 if the system has the type + `SYSTEM_LOGICAL_PROCESSOR_INFORMATION'. */ +#define HAVE_SYSTEM_LOGICAL_PROCESSOR_INFORMATION 1 + +/* Define to 1 if the system has the type + `SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX'. */ +#define HAVE_SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_CPUSET_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_LGRP_USER_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_MMAN_H */ + +/* Define to 1 if you have the header file. */ +/* #define HAVE_SYS_PARAM_H 1 */ +#undef HAVE_SYS_PARAM_H + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_SYSCTL_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_UTSNAME_H */ + +/* Define to 1 if you have the `uname' function. */ +/* #undef HAVE_UNAME */ + +/* Define to 1 if you have the header file. */ +/* #define HAVE_UNISTD_H 1 */ +#undef HAVE_UNISTD_H + +/* Define to 1 if you have the `uselocale' function. */ +/* #undef HAVE_USELOCALE */ + +/* Define to 1 if the system has the type `wchar_t'. */ +#define HAVE_WCHAR_T 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_X11_KEYSYM_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_X11_XLIB_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_X11_XUTIL_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_XLOCALE_H */ + +/* Define to 1 on AIX */ +/* #undef HWLOC_AIX_SYS */ + +/* Define to 1 on BlueGene/Q */ +/* #undef HWLOC_BGQ_SYS */ + +/* Whether C compiler supports symbol visibility or not */ +#define HWLOC_C_HAVE_VISIBILITY 0 + +/* Define to 1 on Darwin */ +/* #undef HWLOC_DARWIN_SYS */ + +/* Whether we are in debugging mode or not */ +/* #undef HWLOC_DEBUG */ + +/* Define to 1 on *FREEBSD */ +/* #undef HWLOC_FREEBSD_SYS */ + +/* Whether your compiler has __attribute__ or not */ +/* #define HWLOC_HAVE_ATTRIBUTE 1 */ +#undef HWLOC_HAVE_ATTRIBUTE + +/* Whether your compiler has __attribute__ aligned or not */ +/* #define HWLOC_HAVE_ATTRIBUTE_ALIGNED 1 */ + +/* Whether your compiler has __attribute__ always_inline or not */ +/* #define HWLOC_HAVE_ATTRIBUTE_ALWAYS_INLINE 1 */ + +/* Whether your compiler has __attribute__ cold or not */ +/* #define HWLOC_HAVE_ATTRIBUTE_COLD 1 */ + +/* Whether your compiler has __attribute__ const or not */ +/* #define HWLOC_HAVE_ATTRIBUTE_CONST 1 */ + +/* Whether your compiler has __attribute__ deprecated or not */ +/* #define HWLOC_HAVE_ATTRIBUTE_DEPRECATED 1 */ + +/* Whether your compiler has __attribute__ format or not */ +/* #define HWLOC_HAVE_ATTRIBUTE_FORMAT 1 */ + +/* Whether your compiler has __attribute__ hot or not */ +/* #define HWLOC_HAVE_ATTRIBUTE_HOT 1 */ + +/* Whether your compiler has __attribute__ malloc or not */ +/* #define HWLOC_HAVE_ATTRIBUTE_MALLOC 1 */ + +/* Whether your compiler has __attribute__ may_alias or not */ +/* #define HWLOC_HAVE_ATTRIBUTE_MAY_ALIAS 1 */ + +/* Whether your compiler has __attribute__ nonnull or not */ +/* #define HWLOC_HAVE_ATTRIBUTE_NONNULL 1 */ + +/* Whether your compiler has __attribute__ noreturn or not */ +/* #define HWLOC_HAVE_ATTRIBUTE_NORETURN 1 */ + +/* Whether your compiler has __attribute__ no_instrument_function or not */ +/* #define HWLOC_HAVE_ATTRIBUTE_NO_INSTRUMENT_FUNCTION 1 */ + +/* Whether your compiler has __attribute__ packed or not */ +/* #define HWLOC_HAVE_ATTRIBUTE_PACKED 1 */ + +/* Whether your compiler has __attribute__ pure or not */ +/* #define HWLOC_HAVE_ATTRIBUTE_PURE 1 */ + +/* Whether your compiler has __attribute__ sentinel or not */ +/* #define HWLOC_HAVE_ATTRIBUTE_SENTINEL 1 */ + +/* Whether your compiler has __attribute__ unused or not */ +/* #define HWLOC_HAVE_ATTRIBUTE_UNUSED 1 */ + +/* Whether your compiler has __attribute__ warn unused result or not */ +/* #define HWLOC_HAVE_ATTRIBUTE_WARN_UNUSED_RESULT 1 */ + +/* Whether your compiler has __attribute__ weak alias or not */ +/* #define HWLOC_HAVE_ATTRIBUTE_WEAK_ALIAS 1 */ + +/* Define to 1 if your `ffs' function is known to be broken. */ +/* #undef HWLOC_HAVE_BROKEN_FFS */ + +/* Define to 1 if you have the `cairo' library. */ +/* #undef HWLOC_HAVE_CAIRO */ + +/* Define to 1 if you have the `clz' function. */ +/* #undef HWLOC_HAVE_CLZ */ + +/* Define to 1 if you have the `clzl' function. */ +/* #undef HWLOC_HAVE_CLZL */ + +/* Define to 1 if you have cpuid */ +/* #undef HWLOC_HAVE_CPUID */ + +/* Define to 1 if the CPU_SET macro works */ +/* #undef HWLOC_HAVE_CPU_SET */ + +/* Define to 1 if the CPU_SET_S macro works */ +/* #undef HWLOC_HAVE_CPU_SET_S */ + +/* Define to 1 if you have the `cudart' SDK. */ +/* #undef HWLOC_HAVE_CUDART */ + +/* Define to 1 if function `clz' is declared by system headers */ +/* #undef HWLOC_HAVE_DECL_CLZ */ + +/* Define to 1 if function `clzl' is declared by system headers */ +/* #undef HWLOC_HAVE_DECL_CLZL */ + +/* Define to 1 if function `ffs' is declared by system headers */ +/* #undef HWLOC_HAVE_DECL_FFS */ + +/* Define to 1 if function `ffsl' is declared by system headers */ +/* #undef HWLOC_HAVE_DECL_FFSL */ + +/* Define to 1 if function `fls' is declared by system headers */ +/* #undef HWLOC_HAVE_DECL_FLS */ + +/* Define to 1 if function `flsl' is declared by system headers */ +/* #undef HWLOC_HAVE_DECL_FLSL */ + +/* Define to 1 if you have the `ffs' function. */ +/* #undef HWLOC_HAVE_FFS */ + +/* Define to 1 if you have the `ffsl' function. */ +/* #undef HWLOC_HAVE_FFSL */ + +/* Define to 1 if you have the `fls' function. */ +/* #undef HWLOC_HAVE_FLS */ + +/* Define to 1 if you have the `flsl' function. */ +/* #undef HWLOC_HAVE_FLSL */ + +/* Define to 1 if you have the GL module components. */ +/* #undef HWLOC_HAVE_GL */ + +/* Define to 1 if you have a library providing the termcap interface */ +/* #undef HWLOC_HAVE_LIBTERMCAP */ + +/* Define to 1 if you have the `libxml2' library. */ +/* #undef HWLOC_HAVE_LIBXML2 */ + +/* Define to 1 if building the Linux PCI component */ +/* #undef HWLOC_HAVE_LINUXPCI */ + +/* Define to 1 if you have the `NVML' library. */ +/* #undef HWLOC_HAVE_NVML */ + +/* Define to 1 if glibc provides the old prototype (without length) of + sched_setaffinity() */ +/* #undef HWLOC_HAVE_OLD_SCHED_SETAFFINITY */ + +/* Define to 1 if you have the `OpenCL' library. */ +/* #undef HWLOC_HAVE_OPENCL */ + +/* Define to 1 if the hwloc library should support dynamically-loaded plugins + */ +/* #undef HWLOC_HAVE_PLUGINS */ + +/* `Define to 1 if you have pthread_getthrds_np' */ +/* #undef HWLOC_HAVE_PTHREAD_GETTHRDS_NP */ + +/* Define to 1 if pthread mutexes are available */ +/* #undef HWLOC_HAVE_PTHREAD_MUTEX */ + +/* Define to 1 if glibc provides a prototype of sched_setaffinity() */ +#define HWLOC_HAVE_SCHED_SETAFFINITY 1 + +/* Define to 1 if you have the header file. */ +#define HWLOC_HAVE_STDINT_H 1 + +/* Define to 1 if you have the `windows.h' header. */ +#define HWLOC_HAVE_WINDOWS_H 1 + +/* Define to 1 if X11 headers including Xutil.h and keysym.h are available. */ +/* #undef HWLOC_HAVE_X11_KEYSYM */ + +/* Define to 1 if function `syscall' is available */ +/* #undef HWLOC_HAVE_SYSCALL */ + +/* Define to 1 on HP-UX */ +/* #undef HWLOC_HPUX_SYS */ + +/* Define to 1 on Linux */ +/* #undef HWLOC_LINUX_SYS */ + +/* Define to 1 on *NETBSD */ +/* #undef HWLOC_NETBSD_SYS */ + +/* The size of `unsigned int', as computed by sizeof */ +#define HWLOC_SIZEOF_UNSIGNED_INT 4 + +/* The size of `unsigned long', as computed by sizeof */ +#define HWLOC_SIZEOF_UNSIGNED_LONG 4 + +/* Define to 1 on Solaris */ +/* #undef HWLOC_SOLARIS_SYS */ + +/* The hwloc symbol prefix */ +#define HWLOC_SYM_PREFIX hwloc_ + +/* The hwloc symbol prefix in all caps */ +#define HWLOC_SYM_PREFIX_CAPS HWLOC_ + +/* Whether we need to re-define all the hwloc public symbols or not */ +#define HWLOC_SYM_TRANSFORM 0 + +/* Define to 1 on unsupported systems */ +/* #undef HWLOC_UNSUPPORTED_SYS */ + +/* Define to 1 if ncurses works, preferred over curses */ +/* #undef HWLOC_USE_NCURSES */ + +/* Define to 1 on WINDOWS */ +#define HWLOC_WIN_SYS 1 + +/* Define to 1 on x86_32 */ +/* #undef HWLOC_X86_32_ARCH */ + +/* Define to 1 on x86_64 */ +#define HWLOC_X86_64_ARCH 1 + +/* Define to the sub-directory in which libtool stores uninstalled libraries. + */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "hwloc" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "http://www.open-mpi.org/projects/hwloc/" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "hwloc" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "hwloc" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "hwloc" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION HWLOC_VERSION + +/* The size of `unsigned int', as computed by sizeof. */ +#define SIZEOF_UNSIGNED_INT 4 + +/* The size of `unsigned long', as computed by sizeof. */ +#define SIZEOF_UNSIGNED_LONG 4 + +/* The size of `void *', as computed by sizeof. */ +#define SIZEOF_VOID_P 8 + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Enable extensions on HP-UX. */ +#ifndef _HPUX_SOURCE +# define _HPUX_SOURCE 1 +#endif + + +/* Enable extensions on AIX 3, Interix. */ +/* +#ifndef _ALL_SOURCE +# define _ALL_SOURCE 1 +#endif +*/ + +/* Enable GNU extensions on systems that have them. */ +/* +#ifndef _GNU_SOURCE +# define _GNU_SOURCE 1 +#endif +*/ +/* Enable threading extensions on Solaris. */ +/* +#ifndef _POSIX_PTHREAD_SEMANTICS +# define _POSIX_PTHREAD_SEMANTICS 1 +#endif +*/ +/* Enable extensions on HP NonStop. */ +/* +#ifndef _TANDEM_SOURCE +# define _TANDEM_SOURCE 1 +#endif +*/ +/* Enable general extensions on Solaris. */ +/* +#ifndef __EXTENSIONS__ +# define __EXTENSIONS__ 1 +#endif +*/ + + +/* Version number of package */ +#define VERSION HWLOC_VERSION + +/* Define to 1 if the X Window System is missing or not being used. */ +#define X_DISPLAY_MISSING 1 + +/* Define to 1 if on MINIX. */ +/* #undef _MINIX */ + +/* Define to 2 if the system does not provide POSIX.1 features except with + this defined. */ +/* #undef _POSIX_1_SOURCE */ + +/* Define to 1 if you need to in order for `stat' and other things to work. */ +/* #undef _POSIX_SOURCE */ + +/* Define this to the process ID type */ +#define hwloc_pid_t HANDLE + +/* Define this to either strncasecmp or strncmp */ +#define hwloc_strncasecmp strncasecmp + +/* Define this to the thread ID type */ +#define hwloc_thread_t HANDLE + + +#endif /* HWLOC_CONFIGURE_H */ diff --git a/src/3rdparty/hwloc/include/private/components.h b/src/3rdparty/hwloc/include/private/components.h new file mode 100644 index 00000000..8525bbe4 --- /dev/null +++ b/src/3rdparty/hwloc/include/private/components.h @@ -0,0 +1,43 @@ +/* + * Copyright © 2012-2015 Inria. All rights reserved. + * See COPYING in top-level directory. + */ + + +#ifdef HWLOC_INSIDE_PLUGIN +/* + * these declarations are internal only, they are not available to plugins + * (many functions below are internal static symbols). + */ +#error This file should not be used in plugins +#endif + + +#ifndef PRIVATE_COMPONENTS_H +#define PRIVATE_COMPONENTS_H 1 + +#include + +struct hwloc_topology; + +extern int hwloc_disc_component_force_enable(struct hwloc_topology *topology, + int envvar_forced, /* 1 if forced through envvar, 0 if forced through API */ + int type, const char *name, + const void *data1, const void *data2, const void *data3); +extern void hwloc_disc_components_enable_others(struct hwloc_topology *topology); + +/* Compute the topology is_thissystem flag and find some callbacks based on enabled backends */ +extern void hwloc_backends_is_thissystem(struct hwloc_topology *topology); +extern void hwloc_backends_find_callbacks(struct hwloc_topology *topology); + +/* Initialize the list of backends used by a topology */ +extern void hwloc_backends_init(struct hwloc_topology *topology); +/* Disable and destroy all backends used by a topology */ +extern void hwloc_backends_disable_all(struct hwloc_topology *topology); + +/* Used by the core to setup/destroy the list of components */ +extern void hwloc_components_init(void); /* increases components refcount, should be called exactly once per topology (during init) */ +extern void hwloc_components_fini(void); /* decreases components refcount, should be called exactly once per topology (during destroy) */ + +#endif /* PRIVATE_COMPONENTS_H */ + diff --git a/src/3rdparty/hwloc/include/private/cpuid-x86.h b/src/3rdparty/hwloc/include/private/cpuid-x86.h new file mode 100644 index 00000000..2758afe0 --- /dev/null +++ b/src/3rdparty/hwloc/include/private/cpuid-x86.h @@ -0,0 +1,86 @@ +/* + * Copyright © 2010-2012, 2014 Université Bordeaux + * Copyright © 2010 Cisco Systems, Inc. All rights reserved. + * Copyright © 2014 Inria. All rights reserved. + * + * See COPYING in top-level directory. + */ + +/* Internals for x86's cpuid. */ + +#ifndef HWLOC_PRIVATE_CPUID_X86_H +#define HWLOC_PRIVATE_CPUID_X86_H + +#if (defined HWLOC_X86_32_ARCH) && (!defined HWLOC_HAVE_MSVC_CPUIDEX) +static __hwloc_inline int hwloc_have_x86_cpuid(void) +{ + int ret; + unsigned tmp, tmp2; + __asm__( + "mov $0,%0\n\t" /* Not supported a priori */ + + "pushfl \n\t" /* Save flags */ + + "pushfl \n\t" \ + "pop %1 \n\t" /* Get flags */ \ + +#define TRY_TOGGLE \ + "xor $0x00200000,%1\n\t" /* Try to toggle ID */ \ + "mov %1,%2\n\t" /* Save expected value */ \ + "push %1 \n\t" \ + "popfl \n\t" /* Try to toggle */ \ + "pushfl \n\t" \ + "pop %1 \n\t" \ + "cmp %1,%2\n\t" /* Compare with expected value */ \ + "jnz 0f\n\t" /* Unexpected, failure */ \ + + TRY_TOGGLE /* Try to set/clear */ + TRY_TOGGLE /* Try to clear/set */ + + "mov $1,%0\n\t" /* Passed the test! */ + + "0: \n\t" + "popfl \n\t" /* Restore flags */ + + : "=r" (ret), "=&r" (tmp), "=&r" (tmp2)); + return ret; +} +#endif /* !defined HWLOC_X86_32_ARCH && !defined HWLOC_HAVE_MSVC_CPUIDEX*/ +#if (defined HWLOC_X86_64_ARCH) || (defined HWLOC_HAVE_MSVC_CPUIDEX) +static __hwloc_inline int hwloc_have_x86_cpuid(void) { return 1; } +#endif /* HWLOC_X86_64_ARCH */ + +static __hwloc_inline void hwloc_x86_cpuid(unsigned *eax, unsigned *ebx, unsigned *ecx, unsigned *edx) +{ +#ifdef HWLOC_HAVE_MSVC_CPUIDEX + int regs[4]; + __cpuidex(regs, *eax, *ecx); + *eax = regs[0]; + *ebx = regs[1]; + *ecx = regs[2]; + *edx = regs[3]; +#else /* HWLOC_HAVE_MSVC_CPUIDEX */ + /* Note: gcc might want to use bx or the stack for %1 addressing, so we can't + * use them :/ */ +#ifdef HWLOC_X86_64_ARCH + hwloc_uint64_t sav_rbx; + __asm__( + "mov %%rbx,%2\n\t" + "cpuid\n\t" + "xchg %2,%%rbx\n\t" + "movl %k2,%1\n\t" + : "+a" (*eax), "=m" (*ebx), "=&r"(sav_rbx), + "+c" (*ecx), "=&d" (*edx)); +#elif defined(HWLOC_X86_32_ARCH) + __asm__( + "mov %%ebx,%1\n\t" + "cpuid\n\t" + "xchg %%ebx,%1\n\t" + : "+a" (*eax), "=&SD" (*ebx), "+c" (*ecx), "=&d" (*edx)); +#else +#error unknown architecture +#endif +#endif /* HWLOC_HAVE_MSVC_CPUIDEX */ +} + +#endif /* HWLOC_PRIVATE_X86_CPUID_H */ diff --git a/src/3rdparty/hwloc/include/private/debug.h b/src/3rdparty/hwloc/include/private/debug.h new file mode 100644 index 00000000..74b697db --- /dev/null +++ b/src/3rdparty/hwloc/include/private/debug.h @@ -0,0 +1,83 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2017 Inria. All rights reserved. + * Copyright © 2009, 2011 Université Bordeaux + * Copyright © 2011 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +/* The configuration file */ + +#ifndef HWLOC_DEBUG_H +#define HWLOC_DEBUG_H + +#include +#include + +#ifdef HWLOC_DEBUG +#include +#include +#endif + +/* Compile-time assertion */ +#define HWLOC_BUILD_ASSERT(condition) ((void)sizeof(char[1 - 2*!(condition)])) + +#ifdef HWLOC_DEBUG +static __hwloc_inline int hwloc_debug_enabled(void) +{ + static int checked = 0; + static int enabled = 1; + if (!checked) { + const char *env = getenv("HWLOC_DEBUG_VERBOSE"); + if (env) + enabled = atoi(env); + if (enabled) + fprintf(stderr, "hwloc verbose debug enabled, may be disabled with HWLOC_DEBUG_VERBOSE=0 in the environment.\n"); + checked = 1; + } + return enabled; +} +#endif + +static __hwloc_inline void hwloc_debug(const char *s __hwloc_attribute_unused, ...) __hwloc_attribute_format(printf, 1, 2); +static __hwloc_inline void hwloc_debug(const char *s __hwloc_attribute_unused, ...) +{ +#ifdef HWLOC_DEBUG + if (hwloc_debug_enabled()) { + va_list ap; + va_start(ap, s); + vfprintf(stderr, s, ap); + va_end(ap); + } +#endif +} + +#ifdef HWLOC_DEBUG +#define hwloc_debug_bitmap(fmt, bitmap) do { \ +if (hwloc_debug_enabled()) { \ + char *s; \ + hwloc_bitmap_asprintf(&s, bitmap); \ + fprintf(stderr, fmt, s); \ + free(s); \ +} } while (0) +#define hwloc_debug_1arg_bitmap(fmt, arg1, bitmap) do { \ +if (hwloc_debug_enabled()) { \ + char *s; \ + hwloc_bitmap_asprintf(&s, bitmap); \ + fprintf(stderr, fmt, arg1, s); \ + free(s); \ +} } while (0) +#define hwloc_debug_2args_bitmap(fmt, arg1, arg2, bitmap) do { \ +if (hwloc_debug_enabled()) { \ + char *s; \ + hwloc_bitmap_asprintf(&s, bitmap); \ + fprintf(stderr, fmt, arg1, arg2, s); \ + free(s); \ +} } while (0) +#else +#define hwloc_debug_bitmap(s, bitmap) do { } while(0) +#define hwloc_debug_1arg_bitmap(s, arg1, bitmap) do { } while(0) +#define hwloc_debug_2args_bitmap(s, arg1, arg2, bitmap) do { } while(0) +#endif + +#endif /* HWLOC_DEBUG_H */ diff --git a/src/3rdparty/hwloc/include/private/internal-components.h b/src/3rdparty/hwloc/include/private/internal-components.h new file mode 100644 index 00000000..b138a0eb --- /dev/null +++ b/src/3rdparty/hwloc/include/private/internal-components.h @@ -0,0 +1,41 @@ +/* + * Copyright © 2018 Inria. All rights reserved. + * + * See COPYING in top-level directory. + */ + +/* List of components defined inside hwloc */ + +#ifndef PRIVATE_INTERNAL_COMPONENTS_H +#define PRIVATE_INTERNAL_COMPONENTS_H + +/* global discovery */ +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_xml_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_synthetic_component; + +/* CPU discovery */ +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_aix_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_bgq_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_darwin_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_freebsd_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_hpux_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_linux_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_netbsd_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_noos_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_solaris_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_windows_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_x86_component; + +/* I/O discovery */ +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_cuda_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_gl_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_linuxio_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_nvml_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_opencl_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_pci_component; + +/* XML backend */ +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_xml_nolibxml_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_xml_libxml_component; + +#endif /* PRIVATE_INTERNAL_COMPONENTS_H */ diff --git a/src/3rdparty/hwloc/include/private/misc.h b/src/3rdparty/hwloc/include/private/misc.h new file mode 100644 index 00000000..66608bc7 --- /dev/null +++ b/src/3rdparty/hwloc/include/private/misc.h @@ -0,0 +1,583 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2018 Inria. All rights reserved. + * Copyright © 2009-2012 Université Bordeaux + * Copyright © 2011 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +/* Misc macros and inlines. */ + +#ifndef HWLOC_PRIVATE_MISC_H +#define HWLOC_PRIVATE_MISC_H + +#include +#include +#include + +#ifdef HWLOC_HAVE_DECL_STRNCASECMP +#ifdef HAVE_STRINGS_H +#include +#endif +#else +#ifdef HAVE_CTYPE_H +#include +#endif +#endif + +#define HWLOC_BITS_PER_LONG (HWLOC_SIZEOF_UNSIGNED_LONG * 8) +#define HWLOC_BITS_PER_INT (HWLOC_SIZEOF_UNSIGNED_INT * 8) + +#if (HWLOC_BITS_PER_LONG != 32) && (HWLOC_BITS_PER_LONG != 64) +#error "unknown size for unsigned long." +#endif + +#if (HWLOC_BITS_PER_INT != 16) && (HWLOC_BITS_PER_INT != 32) && (HWLOC_BITS_PER_INT != 64) +#error "unknown size for unsigned int." +#endif + +/* internal-use-only value for when we don't know the type or don't have any value */ +#define HWLOC_OBJ_TYPE_NONE ((hwloc_obj_type_t) -1) + +/** + * ffsl helpers. + */ + +#if defined(HWLOC_HAVE_BROKEN_FFS) + +/* System has a broken ffs(). + * We must check the before __GNUC__ or HWLOC_HAVE_FFSL + */ +# define HWLOC_NO_FFS + +#elif defined(__GNUC__) + +# if (__GNUC__ >= 4) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4)) + /* Starting from 3.4, gcc has a long variant. */ +# define hwloc_ffsl(x) __builtin_ffsl(x) +# else +# define hwloc_ffs(x) __builtin_ffs(x) +# define HWLOC_NEED_FFSL +# endif + +#elif defined(HWLOC_HAVE_FFSL) + +# ifndef HWLOC_HAVE_DECL_FFSL +extern int ffsl(long) __hwloc_attribute_const; +# endif + +# define hwloc_ffsl(x) ffsl(x) + +#elif defined(HWLOC_HAVE_FFS) + +# ifndef HWLOC_HAVE_DECL_FFS +extern int ffs(int) __hwloc_attribute_const; +# endif + +# define hwloc_ffs(x) ffs(x) +# define HWLOC_NEED_FFSL + +#else /* no ffs implementation */ + +# define HWLOC_NO_FFS + +#endif + +#ifdef HWLOC_NO_FFS + +/* no ffs or it is known to be broken */ +static __hwloc_inline int +hwloc_ffsl_manual(unsigned long x) __hwloc_attribute_const; +static __hwloc_inline int +hwloc_ffsl_manual(unsigned long x) +{ + int i; + + if (!x) + return 0; + + i = 1; +#if HWLOC_BITS_PER_LONG >= 64 + if (!(x & 0xfffffffful)) { + x >>= 32; + i += 32; + } +#endif + if (!(x & 0xffffu)) { + x >>= 16; + i += 16; + } + if (!(x & 0xff)) { + x >>= 8; + i += 8; + } + if (!(x & 0xf)) { + x >>= 4; + i += 4; + } + if (!(x & 0x3)) { + x >>= 2; + i += 2; + } + if (!(x & 0x1)) { + x >>= 1; + i += 1; + } + + return i; +} +/* always define hwloc_ffsl as a macro, to avoid renaming breakage */ +#define hwloc_ffsl hwloc_ffsl_manual + +#elif defined(HWLOC_NEED_FFSL) + +/* We only have an int ffs(int) implementation, build a long one. */ + +/* First make it 32 bits if it was only 16. */ +static __hwloc_inline int +hwloc_ffs32(unsigned long x) __hwloc_attribute_const; +static __hwloc_inline int +hwloc_ffs32(unsigned long x) +{ +#if HWLOC_BITS_PER_INT == 16 + int low_ffs, hi_ffs; + + low_ffs = hwloc_ffs(x & 0xfffful); + if (low_ffs) + return low_ffs; + + hi_ffs = hwloc_ffs(x >> 16); + if (hi_ffs) + return hi_ffs + 16; + + return 0; +#else + return hwloc_ffs(x); +#endif +} + +/* Then make it 64 bit if longs are. */ +static __hwloc_inline int +hwloc_ffsl_from_ffs32(unsigned long x) __hwloc_attribute_const; +static __hwloc_inline int +hwloc_ffsl_from_ffs32(unsigned long x) +{ +#if HWLOC_BITS_PER_LONG == 64 + int low_ffs, hi_ffs; + + low_ffs = hwloc_ffs32(x & 0xfffffffful); + if (low_ffs) + return low_ffs; + + hi_ffs = hwloc_ffs32(x >> 32); + if (hi_ffs) + return hi_ffs + 32; + + return 0; +#else + return hwloc_ffs32(x); +#endif +} +/* always define hwloc_ffsl as a macro, to avoid renaming breakage */ +#define hwloc_ffsl hwloc_ffsl_from_ffs32 + +#endif + +/** + * flsl helpers. + */ +#ifdef __GNUC_____ + +# if (__GNUC__ >= 4) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4)) +# define hwloc_flsl(x) ((x) ? (8*sizeof(long) - __builtin_clzl(x)) : 0) +# else +# define hwloc_fls(x) ((x) ? (8*sizeof(int) - __builtin_clz(x)) : 0) +# define HWLOC_NEED_FLSL +# endif + +#elif defined(HWLOC_HAVE_FLSL) + +# ifndef HWLOC_HAVE_DECL_FLSL +extern int flsl(long) __hwloc_attribute_const; +# endif + +# define hwloc_flsl(x) flsl(x) + +#elif defined(HWLOC_HAVE_CLZL) + +# ifndef HWLOC_HAVE_DECL_CLZL +extern int clzl(long) __hwloc_attribute_const; +# endif + +# define hwloc_flsl(x) ((x) ? (8*sizeof(long) - clzl(x)) : 0) + +#elif defined(HWLOC_HAVE_FLS) + +# ifndef HWLOC_HAVE_DECL_FLS +extern int fls(int) __hwloc_attribute_const; +# endif + +# define hwloc_fls(x) fls(x) +# define HWLOC_NEED_FLSL + +#elif defined(HWLOC_HAVE_CLZ) + +# ifndef HWLOC_HAVE_DECL_CLZ +extern int clz(int) __hwloc_attribute_const; +# endif + +# define hwloc_fls(x) ((x) ? (8*sizeof(int) - clz(x)) : 0) +# define HWLOC_NEED_FLSL + +#else /* no fls implementation */ + +static __hwloc_inline int +hwloc_flsl_manual(unsigned long x) __hwloc_attribute_const; +static __hwloc_inline int +hwloc_flsl_manual(unsigned long x) +{ + int i = 0; + + if (!x) + return 0; + + i = 1; +#if HWLOC_BITS_PER_LONG >= 64 + if ((x & 0xffffffff00000000ul)) { + x >>= 32; + i += 32; + } +#endif + if ((x & 0xffff0000u)) { + x >>= 16; + i += 16; + } + if ((x & 0xff00)) { + x >>= 8; + i += 8; + } + if ((x & 0xf0)) { + x >>= 4; + i += 4; + } + if ((x & 0xc)) { + x >>= 2; + i += 2; + } + if ((x & 0x2)) { + x >>= 1; + i += 1; + } + + return i; +} +/* always define hwloc_flsl as a macro, to avoid renaming breakage */ +#define hwloc_flsl hwloc_flsl_manual + +#endif + +#ifdef HWLOC_NEED_FLSL + +/* We only have an int fls(int) implementation, build a long one. */ + +/* First make it 32 bits if it was only 16. */ +static __hwloc_inline int +hwloc_fls32(unsigned long x) __hwloc_attribute_const; +static __hwloc_inline int +hwloc_fls32(unsigned long x) +{ +#if HWLOC_BITS_PER_INT == 16 + int low_fls, hi_fls; + + hi_fls = hwloc_fls(x >> 16); + if (hi_fls) + return hi_fls + 16; + + low_fls = hwloc_fls(x & 0xfffful); + if (low_fls) + return low_fls; + + return 0; +#else + return hwloc_fls(x); +#endif +} + +/* Then make it 64 bit if longs are. */ +static __hwloc_inline int +hwloc_flsl_from_fls32(unsigned long x) __hwloc_attribute_const; +static __hwloc_inline int +hwloc_flsl_from_fls32(unsigned long x) +{ +#if HWLOC_BITS_PER_LONG == 64 + int low_fls, hi_fls; + + hi_fls = hwloc_fls32(x >> 32); + if (hi_fls) + return hi_fls + 32; + + low_fls = hwloc_fls32(x & 0xfffffffful); + if (low_fls) + return low_fls; + + return 0; +#else + return hwloc_fls32(x); +#endif +} +/* always define hwloc_flsl as a macro, to avoid renaming breakage */ +#define hwloc_flsl hwloc_flsl_from_fls32 + +#endif + +static __hwloc_inline int +hwloc_weight_long(unsigned long w) __hwloc_attribute_const; +static __hwloc_inline int +hwloc_weight_long(unsigned long w) +{ +#if HWLOC_BITS_PER_LONG == 32 +#if (__GNUC__ >= 4) || ((__GNUC__ == 3) && (__GNUC_MINOR__) >= 4) + return __builtin_popcount(w); +#else + unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555); + res = (res & 0x33333333) + ((res >> 2) & 0x33333333); + res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F); + res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF); + return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF); +#endif +#else /* HWLOC_BITS_PER_LONG == 32 */ +#if (__GNUC__ >= 4) || ((__GNUC__ == 3) && (__GNUC_MINOR__) >= 4) + return __builtin_popcountll(w); +#else + unsigned long res; + res = (w & 0x5555555555555555ul) + ((w >> 1) & 0x5555555555555555ul); + res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); + res = (res & 0x0F0F0F0F0F0F0F0Ful) + ((res >> 4) & 0x0F0F0F0F0F0F0F0Ful); + res = (res & 0x00FF00FF00FF00FFul) + ((res >> 8) & 0x00FF00FF00FF00FFul); + res = (res & 0x0000FFFF0000FFFFul) + ((res >> 16) & 0x0000FFFF0000FFFFul); + return (res & 0x00000000FFFFFFFFul) + ((res >> 32) & 0x00000000FFFFFFFFul); +#endif +#endif /* HWLOC_BITS_PER_LONG == 64 */ +} + +#if !HAVE_DECL_STRTOULL && defined(HAVE_STRTOULL) +unsigned long long int strtoull(const char *nptr, char **endptr, int base); +#endif + +static __hwloc_inline int hwloc_strncasecmp(const char *s1, const char *s2, size_t n) +{ +#ifdef HWLOC_HAVE_DECL_STRNCASECMP + return strncasecmp(s1, s2, n); +#else + while (n) { + char c1 = tolower(*s1), c2 = tolower(*s2); + if (!c1 || !c2 || c1 != c2) + return c1-c2; + n--; s1++; s2++; + } + return 0; +#endif +} + +static __hwloc_inline hwloc_obj_type_t hwloc_cache_type_by_depth_type(unsigned depth, hwloc_obj_cache_type_t type) +{ + if (type == HWLOC_OBJ_CACHE_INSTRUCTION) { + if (depth >= 1 && depth <= 3) + return HWLOC_OBJ_L1ICACHE + depth-1; + else + return HWLOC_OBJ_TYPE_NONE; + } else { + if (depth >= 1 && depth <= 5) + return HWLOC_OBJ_L1CACHE + depth-1; + else + return HWLOC_OBJ_TYPE_NONE; + } +} + +#define HWLOC_BITMAP_EQUAL 0 /* Bitmaps are equal */ +#define HWLOC_BITMAP_INCLUDED 1 /* First bitmap included in second */ +#define HWLOC_BITMAP_CONTAINS 2 /* First bitmap contains second */ +#define HWLOC_BITMAP_INTERSECTS 3 /* Bitmaps intersect without any inclusion */ +#define HWLOC_BITMAP_DIFFERENT 4 /* Bitmaps do not intersect */ + +/* Compare bitmaps \p bitmap1 and \p bitmap2 from an inclusion point of view. */ +HWLOC_DECLSPEC int hwloc_bitmap_compare_inclusion(hwloc_const_bitmap_t bitmap1, hwloc_const_bitmap_t bitmap2) __hwloc_attribute_pure; + +/* Return a stringified PCI class. */ +HWLOC_DECLSPEC extern const char * hwloc_pci_class_string(unsigned short class_id); + +/* Parse a PCI link speed (GT/s) string from Linux sysfs */ +#ifdef HWLOC_LINUX_SYS +#include /* for atof() */ +static __hwloc_inline float +hwloc_linux_pci_link_speed_from_string(const char *string) +{ + /* don't parse Gen1 with atof() since it expects a localized string + * while the kernel sysfs files aren't. + */ + if (!strncmp(string, "2.5 ", 4)) + /* "2.5 GT/s" is Gen1 with 8/10 encoding */ + return 2.5 * .8; + + /* also hardwire Gen2 since it also has a specific encoding */ + if (!strncmp(string, "5 ", 2)) + /* "5 GT/s" is Gen2 with 8/10 encoding */ + return 5 * .8; + + /* handle Gen3+ in a generic way */ + return atof(string) * 128./130; /* Gen3+ encoding is 128/130 */ +} +#endif + +/* Traverse children of a parent */ +#define for_each_child(child, parent) for(child = parent->first_child; child; child = child->next_sibling) +#define for_each_memory_child(child, parent) for(child = parent->memory_first_child; child; child = child->next_sibling) +#define for_each_io_child(child, parent) for(child = parent->io_first_child; child; child = child->next_sibling) +#define for_each_misc_child(child, parent) for(child = parent->misc_first_child; child; child = child->next_sibling) + +/* Any object attached to normal children */ +static __hwloc_inline int hwloc__obj_type_is_normal (hwloc_obj_type_t type) +{ + /* type contiguity is asserted in topology_check() */ + return type <= HWLOC_OBJ_GROUP; +} + +/* Any object attached to memory children, currently only NUMA nodes */ +static __hwloc_inline int hwloc__obj_type_is_memory (hwloc_obj_type_t type) +{ + /* type contiguity is asserted in topology_check() */ + return type == HWLOC_OBJ_NUMANODE; +} + +/* I/O or Misc object, without cpusets or nodesets. */ +static __hwloc_inline int hwloc__obj_type_is_special (hwloc_obj_type_t type) +{ + /* type contiguity is asserted in topology_check() */ + return type >= HWLOC_OBJ_BRIDGE && type <= HWLOC_OBJ_MISC; +} + +/* Any object attached to io children */ +static __hwloc_inline int hwloc__obj_type_is_io (hwloc_obj_type_t type) +{ + /* type contiguity is asserted in topology_check() */ + return type >= HWLOC_OBJ_BRIDGE && type <= HWLOC_OBJ_OS_DEVICE; +} + +static __hwloc_inline int +hwloc__obj_type_is_cache(hwloc_obj_type_t type) +{ + /* type contiguity is asserted in topology_check() */ + return (type >= HWLOC_OBJ_L1CACHE && type <= HWLOC_OBJ_L3ICACHE); +} + +static __hwloc_inline int +hwloc__obj_type_is_dcache(hwloc_obj_type_t type) +{ + /* type contiguity is asserted in topology_check() */ + return (type >= HWLOC_OBJ_L1CACHE && type <= HWLOC_OBJ_L5CACHE); +} + +/** \brief Check whether an object is a Instruction Cache. */ +static __hwloc_inline int +hwloc__obj_type_is_icache(hwloc_obj_type_t type) +{ + /* type contiguity is asserted in topology_check() */ + return (type >= HWLOC_OBJ_L1ICACHE && type <= HWLOC_OBJ_L3ICACHE); +} + +#ifdef HAVE_USELOCALE +#include "locale.h" +#ifdef HAVE_XLOCALE_H +#include "xlocale.h" +#endif +#define hwloc_localeswitch_declare locale_t __old_locale = (locale_t)0, __new_locale +#define hwloc_localeswitch_init() do { \ + __new_locale = newlocale(LC_ALL_MASK, "C", (locale_t)0); \ + if (__new_locale != (locale_t)0) \ + __old_locale = uselocale(__new_locale); \ +} while (0) +#define hwloc_localeswitch_fini() do { \ + if (__new_locale != (locale_t)0) { \ + uselocale(__old_locale); \ + freelocale(__new_locale); \ + } \ +} while(0) +#else /* HAVE_USELOCALE */ +#if __HWLOC_HAVE_ATTRIBUTE_UNUSED +#define hwloc_localeswitch_declare int __dummy_nolocale __hwloc_attribute_unused +#define hwloc_localeswitch_init() +#else +#define hwloc_localeswitch_declare int __dummy_nolocale +#define hwloc_localeswitch_init() (void)__dummy_nolocale +#endif +#define hwloc_localeswitch_fini() +#endif /* HAVE_USELOCALE */ + +#if !HAVE_DECL_FABSF +#define fabsf(f) fabs((double)(f)) +#endif + +#if !HAVE_DECL_MODFF +#define modff(x,iptr) (float)modf((double)x,(double *)iptr) +#endif + +#if HAVE_DECL__SC_PAGE_SIZE +#define hwloc_getpagesize() sysconf(_SC_PAGE_SIZE) +#elif HAVE_DECL__SC_PAGESIZE +#define hwloc_getpagesize() sysconf(_SC_PAGESIZE) +#elif defined HAVE_GETPAGESIZE +#define hwloc_getpagesize() getpagesize() +#else +#undef hwloc_getpagesize +#endif + +#if HWLOC_HAVE_ATTRIBUTE_FORMAT +# define __hwloc_attribute_format(type, str, arg) __attribute__((__format__(type, str, arg))) +#else +# define __hwloc_attribute_format(type, str, arg) +#endif + +#define hwloc_memory_size_printf_value(_size, _verbose) \ + ((_size) < (10ULL<<20) || (_verbose) ? (((_size)>>9)+1)>>1 : (_size) < (10ULL<<30) ? (((_size)>>19)+1)>>1 : (_size) < (10ULL<<40) ? (((_size)>>29)+1)>>1 : (((_size)>>39)+1)>>1) +#define hwloc_memory_size_printf_unit(_size, _verbose) \ + ((_size) < (10ULL<<20) || (_verbose) ? "KB" : (_size) < (10ULL<<30) ? "MB" : (_size) < (10ULL<<40) ? "GB" : "TB") + +#ifdef HWLOC_WIN_SYS +# ifndef HAVE_SSIZE_T +typedef SSIZE_T ssize_t; +# endif +# if !HAVE_DECL_STRTOULL && !defined(HAVE_STRTOULL) +# define strtoull _strtoui64 +# endif +# ifndef S_ISREG +# define S_ISREG(m) ((m) & S_IFREG) +# endif +# ifndef S_ISDIR +# define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) +# endif +# ifndef S_IRWXU +# define S_IRWXU 00700 +# endif +# ifndef HWLOC_HAVE_DECL_STRCASECMP +# define strcasecmp _stricmp +# endif +# if !HAVE_DECL_SNPRINTF +# define snprintf _snprintf +# endif +# if HAVE_DECL__STRDUP +# define strdup _strdup +# endif +# if HAVE_DECL__PUTENV +# define putenv _putenv +# endif +#endif + +#if defined HWLOC_WIN_SYS && !defined __MINGW32__ && !defined(__CYGWIN__) +/* MSVC doesn't support C99 variable-length array */ +#include +#define HWLOC_VLA(_type, _name, _nb) _type *_name = (_type*) _alloca((_nb)*sizeof(_type)) +#else +#define HWLOC_VLA(_type, _name, _nb) _type _name[_nb] +#endif + +#endif /* HWLOC_PRIVATE_MISC_H */ diff --git a/src/3rdparty/hwloc/include/private/netloc.h b/src/3rdparty/hwloc/include/private/netloc.h new file mode 100644 index 00000000..c070c54c --- /dev/null +++ b/src/3rdparty/hwloc/include/private/netloc.h @@ -0,0 +1,578 @@ +/* + * Copyright © 2014 Cisco Systems, Inc. All rights reserved. + * Copyright © 2013-2014 University of Wisconsin-La Crosse. + * All rights reserved. + * Copyright © 2015-2017 Inria. All rights reserved. + * + * $COPYRIGHT$ + * + * Additional copyrights may follow + * See COPYING in top-level directory. + * + * $HEADER$ + */ + +#ifndef _NETLOC_PRIVATE_H_ +#define _NETLOC_PRIVATE_H_ + +#include +#include +#include +#include +#include + +#define NETLOCFILE_VERSION 1 + +#ifdef NETLOC_SCOTCH +#include +#include +#define NETLOC_int SCOTCH_Num +#else +#define NETLOC_int int +#endif + +/* + * "Import" a few things from hwloc + */ +#define __netloc_attribute_unused __hwloc_attribute_unused +#define __netloc_attribute_malloc __hwloc_attribute_malloc +#define __netloc_attribute_const __hwloc_attribute_const +#define __netloc_attribute_pure __hwloc_attribute_pure +#define __netloc_attribute_deprecated __hwloc_attribute_deprecated +#define __netloc_attribute_may_alias __hwloc_attribute_may_alias +#define NETLOC_DECLSPEC HWLOC_DECLSPEC + + +/********************************************************************** + * Types + **********************************************************************/ + +/** + * Definitions for Comparators + * \sa These are the return values from the following functions: + * netloc_network_compare, netloc_dt_edge_t_compare, netloc_dt_node_t_compare + */ +typedef enum { + NETLOC_CMP_SAME = 0, /**< Compared as the Same */ + NETLOC_CMP_SIMILAR = -1, /**< Compared as Similar, but not the Same */ + NETLOC_CMP_DIFF = -2 /**< Compared as Different */ +} netloc_compare_type_t; + +/** + * Enumerated type for the various types of supported networks + */ +typedef enum { + NETLOC_NETWORK_TYPE_ETHERNET = 1, /**< Ethernet network */ + NETLOC_NETWORK_TYPE_INFINIBAND = 2, /**< InfiniBand network */ + NETLOC_NETWORK_TYPE_INVALID = 3 /**< Invalid network */ +} netloc_network_type_t; + +/** + * Enumerated type for the various types of supported topologies + */ +typedef enum { + NETLOC_TOPOLOGY_TYPE_INVALID = -1, /**< Invalid */ + NETLOC_TOPOLOGY_TYPE_TREE = 1, /**< Tree */ +} netloc_topology_type_t; + +/** + * Enumerated type for the various types of nodes + */ +typedef enum { + NETLOC_NODE_TYPE_HOST = 0, /**< Host (a.k.a., network addressable endpoint - e.g., MAC Address) node */ + NETLOC_NODE_TYPE_SWITCH = 1, /**< Switch node */ + NETLOC_NODE_TYPE_INVALID = 2 /**< Invalid node */ +} netloc_node_type_t; + +typedef enum { + NETLOC_ARCH_TREE = 0, /* Fat tree */ +} netloc_arch_type_t; + + +/* Pre declarations to avoid inter dependency problems */ +/** \cond IGNORE */ +struct netloc_topology_t; +typedef struct netloc_topology_t netloc_topology_t; +struct netloc_node_t; +typedef struct netloc_node_t netloc_node_t; +struct netloc_edge_t; +typedef struct netloc_edge_t netloc_edge_t; +struct netloc_physical_link_t; +typedef struct netloc_physical_link_t netloc_physical_link_t; +struct netloc_path_t; +typedef struct netloc_path_t netloc_path_t; + +struct netloc_arch_tree_t; +typedef struct netloc_arch_tree_t netloc_arch_tree_t; +struct netloc_arch_node_t; +typedef struct netloc_arch_node_t netloc_arch_node_t; +struct netloc_arch_node_slot_t; +typedef struct netloc_arch_node_slot_t netloc_arch_node_slot_t; +struct netloc_arch_t; +typedef struct netloc_arch_t netloc_arch_t; +/** \endcond */ + +/** + * \struct netloc_topology_t + * \brief Netloc Topology Context + * + * An opaque data structure used to reference a network topology. + * + * \note Must be initialized with \ref netloc_topology_construct() + */ +struct netloc_topology_t { + /** Topology path */ + char *topopath; + /** Subnet ID */ + char *subnet_id; + + /** Node List */ + netloc_node_t *nodes; /* Hash table of nodes by physical_id */ + netloc_node_t *nodesByHostname; /* Hash table of nodes by hostname */ + + netloc_physical_link_t *physical_links; /* Hash table with physcial links */ + + /** Partition List */ + UT_array *partitions; + + /** Hwloc topology List */ + char *hwlocpath; + UT_array *topos; + hwloc_topology_t *hwloc_topos; + + /** Type of the graph */ + netloc_topology_type_t type; +}; + +/** + * \brief Netloc Node Type + * + * Represents the concept of a node (a.k.a., vertex, endpoint) within a network + * graph. This could be a server or a network switch. The \ref node_type parameter + * will distinguish the exact type of node this represents in the graph. + */ +struct netloc_node_t { + UT_hash_handle hh; /* makes this structure hashable with physical_id */ + UT_hash_handle hh2; /* makes this structure hashable with hostname */ + + /** Physical ID of the node */ + char physical_id[20]; + + /** Logical ID of the node (if any) */ + int logical_id; + + /** Type of the node */ + netloc_node_type_t type; + + /* Pointer to physical_links */ + UT_array *physical_links; + + /** Description information from discovery (if any) */ + char *description; + + /** + * Application-given private data pointer. + * Initialized to NULL, and not used by the netloc library. + */ + void * userdata; + + /** Outgoing edges from this node */ + netloc_edge_t *edges; + + UT_array *subnodes; /* the group of nodes for the virtual nodes */ + + netloc_path_t *paths; + + char *hostname; + + UT_array *partitions; /* index in the list from the topology */ + + hwloc_topology_t hwlocTopo; + int hwlocTopoIdx; +}; + +/** + * \brief Netloc Edge Type + * + * Represents the concept of a directed edge within a network graph. + * + * \note We do not point to the netloc_node_t structure directly to + * simplify the representation, and allow the information to more easily + * be entered into the data store without circular references. + * \todo JJH Is the note above still true? + */ +struct netloc_edge_t { + UT_hash_handle hh; /* makes this structure hashable */ + + netloc_node_t *dest; + + int id; + + /** Pointers to the parent node */ + netloc_node_t *node; + + /* Pointer to physical_links */ + UT_array *physical_links; + + /** total gbits of the links */ + float total_gbits; + + UT_array *partitions; /* index in the list from the topology */ + + UT_array *subnode_edges; /* for edges going to virtual nodes */ + + struct netloc_edge_t *other_way; + + /** + * Application-given private data pointer. + * Initialized to NULL, and not used by the netloc library. + */ + void * userdata; +}; + + +struct netloc_physical_link_t { + UT_hash_handle hh; /* makes this structure hashable */ + + int id; // TODO long long + netloc_node_t *src; + netloc_node_t *dest; + int ports[2]; + char *width; + char *speed; + + netloc_edge_t *edge; + + int other_way_id; + struct netloc_physical_link_t *other_way; + + UT_array *partitions; /* index in the list from the topology */ + + /** gbits of the link from speed and width */ + float gbits; + + /** Description information from discovery (if any) */ + char *description; +}; + +struct netloc_path_t { + UT_hash_handle hh; /* makes this structure hashable */ + char dest_id[20]; + UT_array *links; +}; + + +/********************************************************************** + * Architecture structures + **********************************************************************/ +struct netloc_arch_tree_t { + NETLOC_int num_levels; + NETLOC_int *degrees; + NETLOC_int *cost; +}; + +struct netloc_arch_node_t { + UT_hash_handle hh; /* makes this structure hashable */ + char *name; /* Hash key */ + netloc_node_t *node; /* Corresponding node */ + int idx_in_topo; /* idx with ghost hosts to have complete topo */ + int num_slots; /* it is not the real number of slots but the maximum slot idx */ + int *slot_idx; /* corresponding idx in slot_tree */ + int *slot_os_idx; /* corresponding os index for each leaf in tree */ + netloc_arch_tree_t *slot_tree; /* Tree built from hwloc */ + int num_current_slots; /* Number of PUs */ + NETLOC_int *current_slots; /* indices in the complete tree */ + int *slot_ranks; /* corresponding MPI rank for each leaf in tree */ +}; + +struct netloc_arch_node_slot_t { + netloc_arch_node_t *node; + int slot; +}; + +struct netloc_arch_t { + netloc_topology_t *topology; + int has_slots; /* if slots are included in the architecture */ + netloc_arch_type_t type; + union { + netloc_arch_tree_t *node_tree; + netloc_arch_tree_t *global_tree; + } arch; + netloc_arch_node_t *nodes_by_name; + netloc_arch_node_slot_t *node_slot_by_idx; /* node_slot by index in complete topo */ + NETLOC_int num_current_hosts; /* if has_slots, host is a slot, else host is a node */ + NETLOC_int *current_hosts; /* indices in the complete topology */ +}; + +/********************************************************************** + * Topology Functions + **********************************************************************/ +/** + * Allocate a topology handle. + * + * User is responsible for calling \ref netloc_detach on the topology handle. + * The network parameter information is deep copied into the topology handle, so the + * user may destruct the network handle after calling this function and/or reuse + * the network handle. + * + * \returns NETLOC_SUCCESS on success + * \returns NETLOC_ERROR upon an error. + */ +netloc_topology_t *netloc_topology_construct(char *path); + +/** + * Destruct a topology handle + * + * \param topology A valid pointer to a \ref netloc_topology_t handle created + * from a prior call to \ref netloc_topology_construct. + * + * \returns NETLOC_SUCCESS on success + * \returns NETLOC_ERROR upon an error. + */ +int netloc_topology_destruct(netloc_topology_t *topology); + +int netloc_topology_find_partition_idx(netloc_topology_t *topology, char *partition_name); + +int netloc_topology_read_hwloc(netloc_topology_t *topology, int num_nodes, + netloc_node_t **node_list); + +#define netloc_topology_iter_partitions(topology,partition) \ + for ((partition) = (char **)utarray_front(topology->partitions); \ + (partition) != NULL; \ + (partition) = (char **)utarray_next(topology->partitions, partition)) + +#define netloc_topology_iter_hwloctopos(topology,hwloctopo) \ + for ((hwloctopo) = (char **)utarray_front(topology->topos); \ + (hwloctopo) != NULL; \ + (hwloctopo) = (char **)utarray_next(topology->topos, hwloctopo)) + +#define netloc_topology_find_node(topology,node_id,node) \ + HASH_FIND_STR(topology->nodes, node_id, node) + +#define netloc_topology_iter_nodes(topology,node,_tmp) \ + HASH_ITER(hh, topology->nodes, node, _tmp) + +#define netloc_topology_num_nodes(topology) \ + HASH_COUNT(topology->nodes) + +/*************************************************/ + + +/** + * Constructor for netloc_node_t + * + * User is responsible for calling the destructor on the handle. + * + * Returns + * A newly allocated pointer to the network information. + */ +netloc_node_t *netloc_node_construct(void); + +/** + * Destructor for netloc_node_t + * + * \param node A valid node handle + * + * Returns + * NETLOC_SUCCESS on success + * NETLOC_ERROR on error + */ +int netloc_node_destruct(netloc_node_t *node); + +char *netloc_node_pretty_print(netloc_node_t* node); + +#define netloc_node_get_num_subnodes(node) \ + utarray_len((node)->subnodes) + +#define netloc_node_get_subnode(node,i) \ + (*(netloc_node_t **)utarray_eltptr((node)->subnodes, (i))) + +#define netloc_node_get_num_edges(node) \ + utarray_len((node)->edges) + +#define netloc_node_get_edge(node,i) \ + (*(netloc_edge_t **)utarray_eltptr((node)->edges, (i))) + +#define netloc_node_iter_edges(node,edge,_tmp) \ + HASH_ITER(hh, node->edges, edge, _tmp) + +#define netloc_node_iter_paths(node,path,_tmp) \ + HASH_ITER(hh, node->paths, path, _tmp) + +#define netloc_node_is_host(node) \ + (node->type == NETLOC_NODE_TYPE_HOST) + +#define netloc_node_is_switch(node) \ + (node->type == NETLOC_NODE_TYPE_SWITCH) + +#define netloc_node_iter_paths(node, path,_tmp) \ + HASH_ITER(hh, node->paths, path, _tmp) + +int netloc_node_is_in_partition(netloc_node_t *node, int partition); + +/*************************************************/ + + +/** + * Constructor for netloc_edge_t + * + * User is responsible for calling the destructor on the handle. + * + * Returns + * A newly allocated pointer to the edge information. + */ +netloc_edge_t *netloc_edge_construct(void); + +/** + * Destructor for netloc_edge_t + * + * \param edge A valid edge handle + * + * Returns + * NETLOC_SUCCESS on success + * NETLOC_ERROR on error + */ +int netloc_edge_destruct(netloc_edge_t *edge); + +char * netloc_edge_pretty_print(netloc_edge_t* edge); + +void netloc_edge_reset_uid(void); + +int netloc_edge_is_in_partition(netloc_edge_t *edge, int partition); + +#define netloc_edge_get_num_links(edge) \ + utarray_len((edge)->physical_links) + +#define netloc_edge_get_link(edge,i) \ + (*(netloc_physical_link_t **)utarray_eltptr((edge)->physical_links, (i))) + +#define netloc_edge_get_num_subedges(edge) \ + utarray_len((edge)->subnode_edges) + +#define netloc_edge_get_subedge(edge,i) \ + (*(netloc_edge_t **)utarray_eltptr((edge)->subnode_edges, (i))) + +/*************************************************/ + + +/** + * Constructor for netloc_physical_link_t + * + * User is responsible for calling the destructor on the handle. + * + * Returns + * A newly allocated pointer to the physical link information. + */ +netloc_physical_link_t * netloc_physical_link_construct(void); + +/** + * Destructor for netloc_physical_link_t + * + * Returns + * NETLOC_SUCCESS on success + * NETLOC_ERROR on error + */ +int netloc_physical_link_destruct(netloc_physical_link_t *link); + +char * netloc_link_pretty_print(netloc_physical_link_t* link); + +/*************************************************/ + + +netloc_path_t *netloc_path_construct(void); +int netloc_path_destruct(netloc_path_t *path); + + +/********************************************************************** + * Architecture functions + **********************************************************************/ + +netloc_arch_t * netloc_arch_construct(void); + +int netloc_arch_destruct(netloc_arch_t *arch); + +int netloc_arch_build(netloc_arch_t *arch, int add_slots); + +int netloc_arch_set_current_resources(netloc_arch_t *arch); + +int netloc_arch_set_global_resources(netloc_arch_t *arch); + +int netloc_arch_node_get_hwloc_info(netloc_arch_node_t *arch); + +void netloc_arch_tree_complete(netloc_arch_tree_t *tree, UT_array **down_degrees_by_level, + int num_hosts, int **parch_idx); + +NETLOC_int netloc_arch_tree_num_leaves(netloc_arch_tree_t *tree); + + +/********************************************************************** + * Access functions of various elements of the topology + **********************************************************************/ + +#define netloc_get_num_partitions(object) \ + utarray_len((object)->partitions) + +#define netloc_get_partition(object,i) \ + (*(int *)utarray_eltptr((object)->partitions, (i))) + + +#define netloc_path_iter_links(path,link) \ + for ((link) = (netloc_physical_link_t **)utarray_front(path->links); \ + (link) != NULL; \ + (link) = (netloc_physical_link_t **)utarray_next(path->links, link)) + +/********************************************************************** + * Misc functions + **********************************************************************/ + +/** + * Decode the network type + * + * \param net_type A valid member of the \ref netloc_network_type_t type + * + * \returns NULL if the type is invalid + * \returns A string for that \ref netloc_network_type_t type + */ +static inline const char * netloc_network_type_decode(netloc_network_type_t net_type) { + if( NETLOC_NETWORK_TYPE_ETHERNET == net_type ) { + return "ETH"; + } + else if( NETLOC_NETWORK_TYPE_INFINIBAND == net_type ) { + return "IB"; + } + else { + return NULL; + } +} + +/** + * Decode the node type + * + * \param node_type A valid member of the \ref netloc_node_type_t type + * + * \returns NULL if the type is invalid + * \returns A string for that \ref netloc_node_type_t type + */ +static inline const char * netloc_node_type_decode(netloc_node_type_t node_type) { + if( NETLOC_NODE_TYPE_SWITCH == node_type ) { + return "SW"; + } + else if( NETLOC_NODE_TYPE_HOST == node_type ) { + return "CA"; + } + else { + return NULL; + } +} + +ssize_t netloc_line_get(char **lineptr, size_t *n, FILE *stream); + +char *netloc_line_get_next_token(char **string, char c); + +int netloc_build_comm_mat(char *filename, int *pn, double ***pmat); + +#define STRDUP_IF_NOT_NULL(str) (NULL == str ? NULL : strdup(str)) +#define STR_EMPTY_IF_NULL(str) (NULL == str ? "" : str) + + +#endif // _NETLOC_PRIVATE_H_ diff --git a/src/3rdparty/hwloc/include/private/private.h b/src/3rdparty/hwloc/include/private/private.h new file mode 100644 index 00000000..8e3964ab --- /dev/null +++ b/src/3rdparty/hwloc/include/private/private.h @@ -0,0 +1,417 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2019 Inria. All rights reserved. + * Copyright © 2009-2012 Université Bordeaux + * Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved. + * + * See COPYING in top-level directory. + */ + +/* Internal types and helpers. */ + + +#ifdef HWLOC_INSIDE_PLUGIN +/* + * these declarations are internal only, they are not available to plugins + * (many functions below are internal static symbols). + */ +#error This file should not be used in plugins +#endif + + +#ifndef HWLOC_PRIVATE_H +#define HWLOC_PRIVATE_H + +#include +#include +#include +#include +#include +#include +#ifdef HAVE_UNISTD_H +#include +#endif +#ifdef HAVE_STDINT_H +#include +#endif +#ifdef HAVE_SYS_UTSNAME_H +#include +#endif +#include + +#define HWLOC_TOPOLOGY_ABI 0x20000 /* version of the layout of struct topology */ + +/***************************************************** + * WARNING: + * changes below in this structure (and its children) + * should cause a bump of HWLOC_TOPOLOGY_ABI. + *****************************************************/ + +struct hwloc_topology { + unsigned topology_abi; + + unsigned nb_levels; /* Number of horizontal levels */ + unsigned nb_levels_allocated; /* Number of levels allocated and zeroed in level_nbobjects and levels below */ + unsigned *level_nbobjects; /* Number of objects on each horizontal level */ + struct hwloc_obj ***levels; /* Direct access to levels, levels[l = 0 .. nblevels-1][0..level_nbobjects[l]] */ + unsigned long flags; + int type_depth[HWLOC_OBJ_TYPE_MAX]; + enum hwloc_type_filter_e type_filter[HWLOC_OBJ_TYPE_MAX]; + int is_thissystem; + int is_loaded; + int modified; /* >0 if objects were added/removed recently, which means a reconnect is needed */ + hwloc_pid_t pid; /* Process ID the topology is view from, 0 for self */ + void *userdata; + uint64_t next_gp_index; + + void *adopted_shmem_addr; + size_t adopted_shmem_length; + +#define HWLOC_NR_SLEVELS 5 +#define HWLOC_SLEVEL_NUMANODE 0 +#define HWLOC_SLEVEL_BRIDGE 1 +#define HWLOC_SLEVEL_PCIDEV 2 +#define HWLOC_SLEVEL_OSDEV 3 +#define HWLOC_SLEVEL_MISC 4 + /* order must match negative depth, it's asserted in setup_defaults() */ +#define HWLOC_SLEVEL_FROM_DEPTH(x) (HWLOC_TYPE_DEPTH_NUMANODE-(x)) +#define HWLOC_SLEVEL_TO_DEPTH(x) (HWLOC_TYPE_DEPTH_NUMANODE-(x)) + struct hwloc_special_level_s { + unsigned nbobjs; + struct hwloc_obj **objs; + struct hwloc_obj *first, *last; /* Temporarily used while listing object before building the objs array */ + } slevels[HWLOC_NR_SLEVELS]; + + hwloc_bitmap_t allowed_cpuset; + hwloc_bitmap_t allowed_nodeset; + + struct hwloc_binding_hooks { + int (*set_thisproc_cpubind)(hwloc_topology_t topology, hwloc_const_cpuset_t set, int flags); + int (*get_thisproc_cpubind)(hwloc_topology_t topology, hwloc_cpuset_t set, int flags); + int (*set_thisthread_cpubind)(hwloc_topology_t topology, hwloc_const_cpuset_t set, int flags); + int (*get_thisthread_cpubind)(hwloc_topology_t topology, hwloc_cpuset_t set, int flags); + int (*set_proc_cpubind)(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_cpuset_t set, int flags); + int (*get_proc_cpubind)(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_cpuset_t set, int flags); +#ifdef hwloc_thread_t + int (*set_thread_cpubind)(hwloc_topology_t topology, hwloc_thread_t tid, hwloc_const_cpuset_t set, int flags); + int (*get_thread_cpubind)(hwloc_topology_t topology, hwloc_thread_t tid, hwloc_cpuset_t set, int flags); +#endif + + int (*get_thisproc_last_cpu_location)(hwloc_topology_t topology, hwloc_cpuset_t set, int flags); + int (*get_thisthread_last_cpu_location)(hwloc_topology_t topology, hwloc_cpuset_t set, int flags); + int (*get_proc_last_cpu_location)(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_cpuset_t set, int flags); + + int (*set_thisproc_membind)(hwloc_topology_t topology, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags); + int (*get_thisproc_membind)(hwloc_topology_t topology, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags); + int (*set_thisthread_membind)(hwloc_topology_t topology, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags); + int (*get_thisthread_membind)(hwloc_topology_t topology, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags); + int (*set_proc_membind)(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags); + int (*get_proc_membind)(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags); + int (*set_area_membind)(hwloc_topology_t topology, const void *addr, size_t len, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags); + int (*get_area_membind)(hwloc_topology_t topology, const void *addr, size_t len, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags); + int (*get_area_memlocation)(hwloc_topology_t topology, const void *addr, size_t len, hwloc_nodeset_t nodeset, int flags); + /* This has to return the same kind of pointer as alloc_membind, so that free_membind can be used on it */ + void *(*alloc)(hwloc_topology_t topology, size_t len); + /* alloc_membind has to always succeed if !(flags & HWLOC_MEMBIND_STRICT). + * see hwloc_alloc_or_fail which is convenient for that. */ + void *(*alloc_membind)(hwloc_topology_t topology, size_t len, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags); + int (*free_membind)(hwloc_topology_t topology, void *addr, size_t len); + + int (*get_allowed_resources)(hwloc_topology_t topology); + } binding_hooks; + + struct hwloc_topology_support support; + + void (*userdata_export_cb)(void *reserved, struct hwloc_topology *topology, struct hwloc_obj *obj); + void (*userdata_import_cb)(struct hwloc_topology *topology, struct hwloc_obj *obj, const char *name, const void *buffer, size_t length); + int userdata_not_decoded; + + struct hwloc_internal_distances_s { + hwloc_obj_type_t type; + /* add union hwloc_obj_attr_u if we ever support groups */ + unsigned nbobjs; + uint64_t *indexes; /* array of OS or GP indexes before we can convert them into objs. */ + uint64_t *values; /* distance matrices, ordered according to the above indexes/objs array. + * distance from i to j is stored in slot i*nbnodes+j. + */ + unsigned long kind; + + /* objects are currently stored in physical_index order */ + hwloc_obj_t *objs; /* array of objects */ + int objs_are_valid; /* set to 1 if the array objs is still valid, 0 if needs refresh */ + + unsigned id; /* to match the container id field of public distances structure */ + struct hwloc_internal_distances_s *prev, *next; + } *first_dist, *last_dist; + unsigned next_dist_id; + + int grouping; + int grouping_verbose; + unsigned grouping_nbaccuracies; + float grouping_accuracies[5]; + unsigned grouping_next_subkind; + + /* list of enabled backends. */ + struct hwloc_backend * backends; + struct hwloc_backend * get_pci_busid_cpuset_backend; + unsigned backend_excludes; + + /* memory allocator for topology objects */ + struct hwloc_tma * tma; + +/***************************************************** + * WARNING: + * changes above in this structure (and its children) + * should cause a bump of HWLOC_TOPOLOGY_ABI. + *****************************************************/ + + /* + * temporary variables during discovery + */ + + /* machine-wide memory. + * temporarily stored there by OSes that only provide this without NUMA information, + * and actually used later by the core. + */ + struct hwloc_numanode_attr_s machine_memory; + + /* pci stuff */ + int need_pci_belowroot_apply_locality; + int pci_has_forced_locality; + unsigned pci_forced_locality_nr; + struct hwloc_pci_forced_locality_s { + unsigned domain; + unsigned bus_first, bus_last; + hwloc_bitmap_t cpuset; + } * pci_forced_locality; + +}; + +extern void hwloc_alloc_root_sets(hwloc_obj_t root); +extern void hwloc_setup_pu_level(struct hwloc_topology *topology, unsigned nb_pus); +extern int hwloc_get_sysctlbyname(const char *name, int64_t *n); +extern int hwloc_get_sysctl(int name[], unsigned namelen, int *n); +extern int hwloc_fallback_nbprocessors(struct hwloc_topology *topology); + +extern int hwloc__object_cpusets_compare_first(hwloc_obj_t obj1, hwloc_obj_t obj2); +extern void hwloc__reorder_children(hwloc_obj_t parent); + +extern void hwloc_topology_setup_defaults(struct hwloc_topology *topology); +extern void hwloc_topology_clear(struct hwloc_topology *topology); + +/* insert memory object as memory child of normal parent */ +extern struct hwloc_obj * hwloc__attach_memory_object(struct hwloc_topology *topology, hwloc_obj_t parent, + hwloc_obj_t obj, + hwloc_report_error_t report_error); + +extern void hwloc_pci_discovery_init(struct hwloc_topology *topology); +extern void hwloc_pci_discovery_prepare(struct hwloc_topology *topology); +extern void hwloc_pci_discovery_exit(struct hwloc_topology *topology); + +/* Look for an object matching complete cpuset exactly, or insert one. + * Return NULL on failure. + * Return a good fallback (object above) on failure to insert. + */ +extern hwloc_obj_t hwloc_find_insert_io_parent_by_complete_cpuset(struct hwloc_topology *topology, hwloc_cpuset_t cpuset); + +/* Move PCI objects currently attached to the root object ot their actual location. + * Called by the core at the end of hwloc_topology_load(). + * Prior to this call, all PCI objects may be found below the root object. + * After this call and a reconnect of levels, all PCI objects are available through levels. + */ +extern int hwloc_pci_belowroot_apply_locality(struct hwloc_topology *topology); + +extern int hwloc__add_info(struct hwloc_info_s **infosp, unsigned *countp, const char *name, const char *value); +extern int hwloc__add_info_nodup(struct hwloc_info_s **infosp, unsigned *countp, const char *name, const char *value, int replace); +extern int hwloc__move_infos(struct hwloc_info_s **dst_infosp, unsigned *dst_countp, struct hwloc_info_s **src_infosp, unsigned *src_countp); +extern void hwloc__free_infos(struct hwloc_info_s *infos, unsigned count); + +/* set native OS binding hooks */ +extern void hwloc_set_native_binding_hooks(struct hwloc_binding_hooks *hooks, struct hwloc_topology_support *support); +/* set either native OS binding hooks (if thissystem), or dummy ones */ +extern void hwloc_set_binding_hooks(struct hwloc_topology *topology); + +#if defined(HWLOC_LINUX_SYS) +extern void hwloc_set_linuxfs_hooks(struct hwloc_binding_hooks *binding_hooks, struct hwloc_topology_support *support); +#endif /* HWLOC_LINUX_SYS */ + +#if defined(HWLOC_BGQ_SYS) +extern void hwloc_set_bgq_hooks(struct hwloc_binding_hooks *binding_hooks, struct hwloc_topology_support *support); +#endif /* HWLOC_BGQ_SYS */ + +#ifdef HWLOC_SOLARIS_SYS +extern void hwloc_set_solaris_hooks(struct hwloc_binding_hooks *binding_hooks, struct hwloc_topology_support *support); +#endif /* HWLOC_SOLARIS_SYS */ + +#ifdef HWLOC_AIX_SYS +extern void hwloc_set_aix_hooks(struct hwloc_binding_hooks *binding_hooks, struct hwloc_topology_support *support); +#endif /* HWLOC_AIX_SYS */ + +#ifdef HWLOC_WIN_SYS +extern void hwloc_set_windows_hooks(struct hwloc_binding_hooks *binding_hooks, struct hwloc_topology_support *support); +#endif /* HWLOC_WIN_SYS */ + +#ifdef HWLOC_DARWIN_SYS +extern void hwloc_set_darwin_hooks(struct hwloc_binding_hooks *binding_hooks, struct hwloc_topology_support *support); +#endif /* HWLOC_DARWIN_SYS */ + +#ifdef HWLOC_FREEBSD_SYS +extern void hwloc_set_freebsd_hooks(struct hwloc_binding_hooks *binding_hooks, struct hwloc_topology_support *support); +#endif /* HWLOC_FREEBSD_SYS */ + +#ifdef HWLOC_NETBSD_SYS +extern void hwloc_set_netbsd_hooks(struct hwloc_binding_hooks *binding_hooks, struct hwloc_topology_support *support); +#endif /* HWLOC_NETBSD_SYS */ + +#ifdef HWLOC_HPUX_SYS +extern void hwloc_set_hpux_hooks(struct hwloc_binding_hooks *binding_hooks, struct hwloc_topology_support *support); +#endif /* HWLOC_HPUX_SYS */ + +extern int hwloc_look_hardwired_fujitsu_k(struct hwloc_topology *topology); +extern int hwloc_look_hardwired_fujitsu_fx10(struct hwloc_topology *topology); +extern int hwloc_look_hardwired_fujitsu_fx100(struct hwloc_topology *topology); + +/* Insert uname-specific names/values in the object infos array. + * If cached_uname isn't NULL, it is used as a struct utsname instead of recalling uname. + * Any field that starts with \0 is ignored. + */ +extern void hwloc_add_uname_info(struct hwloc_topology *topology, void *cached_uname); + +/* Free obj and its attributes assuming it's not linked to a parent and doesn't have any child */ +extern void hwloc_free_unlinked_object(hwloc_obj_t obj); + +/* Free obj and its children, assuming it's not linked to a parent */ +extern void hwloc_free_object_and_children(hwloc_obj_t obj); + +/* Free obj, its next siblings, and their children, assuming they're not linked to a parent */ +extern void hwloc_free_object_siblings_and_children(hwloc_obj_t obj); + +/* This can be used for the alloc field to get allocated data that can be freed by free() */ +void *hwloc_alloc_heap(hwloc_topology_t topology, size_t len); + +/* This can be used for the alloc field to get allocated data that can be freed by munmap() */ +void *hwloc_alloc_mmap(hwloc_topology_t topology, size_t len); + +/* This can be used for the free_membind field to free data using free() */ +int hwloc_free_heap(hwloc_topology_t topology, void *addr, size_t len); + +/* This can be used for the free_membind field to free data using munmap() */ +int hwloc_free_mmap(hwloc_topology_t topology, void *addr, size_t len); + +/* Allocates unbound memory or fail, depending on whether STRICT is requested + * or not */ +static __hwloc_inline void * +hwloc_alloc_or_fail(hwloc_topology_t topology, size_t len, int flags) +{ + if (flags & HWLOC_MEMBIND_STRICT) + return NULL; + return hwloc_alloc(topology, len); +} + +extern void hwloc_internal_distances_init(hwloc_topology_t topology); +extern void hwloc_internal_distances_prepare(hwloc_topology_t topology); +extern void hwloc_internal_distances_destroy(hwloc_topology_t topology); +extern int hwloc_internal_distances_dup(hwloc_topology_t new, hwloc_topology_t old); +extern void hwloc_internal_distances_refresh(hwloc_topology_t topology); +extern int hwloc_internal_distances_add(hwloc_topology_t topology, unsigned nbobjs, hwloc_obj_t *objs, uint64_t *values, unsigned long kind, unsigned long flags); +extern int hwloc_internal_distances_add_by_index(hwloc_topology_t topology, hwloc_obj_type_t type, unsigned nbobjs, uint64_t *indexes, uint64_t *values, unsigned long kind, unsigned long flags); +extern void hwloc_internal_distances_invalidate_cached_objs(hwloc_topology_t topology); + +/* encode src buffer into target buffer. + * targsize must be at least 4*((srclength+2)/3)+1. + * target will be 0-terminated. + */ +extern int hwloc_encode_to_base64(const char *src, size_t srclength, char *target, size_t targsize); +/* decode src buffer into target buffer. + * src is 0-terminated. + * targsize must be at least srclength*3/4+1 (srclength not including \0) + * but only srclength*3/4 characters will be meaningful + * (the next one may be partially written during decoding, but it should be ignored). + */ +extern int hwloc_decode_from_base64(char const *src, char *target, size_t targsize); + +/* Check whether needle matches the beginning of haystack, at least n, and up + * to a colon or \0 */ +extern int hwloc_namecoloncmp(const char *haystack, const char *needle, size_t n); + +/* On some systems, snprintf returns the size of written data, not the actually + * required size. hwloc_snprintf always report the actually required size. */ +extern int hwloc_snprintf(char *str, size_t size, const char *format, ...) __hwloc_attribute_format(printf, 3, 4); + +/* Return the name of the currently running program, if supported. + * If not NULL, must be freed by the caller. + */ +extern char * hwloc_progname(struct hwloc_topology *topology); + +/* obj->attr->group.kind internal values. + * the core will keep the smallest ones when merging two groups, + * that's why user-given kinds are first. + */ +/* first, user-given groups, should remain as long as possible */ +#define HWLOC_GROUP_KIND_USER 0 /* user-given, user may use subkind too */ +#define HWLOC_GROUP_KIND_SYNTHETIC 10 /* subkind is group depth within synthetic description */ +/* then, hardware-specific groups */ +#define HWLOC_GROUP_KIND_INTEL_KNL_SUBNUMA_CLUSTER 100 /* no subkind */ +#define HWLOC_GROUP_KIND_INTEL_EXTTOPOENUM_UNKNOWN 101 /* subkind is unknown level */ +#define HWLOC_GROUP_KIND_INTEL_MODULE 102 /* no subkind */ +#define HWLOC_GROUP_KIND_INTEL_TILE 103 /* no subkind */ +#define HWLOC_GROUP_KIND_INTEL_DIE 104 /* no subkind */ +#define HWLOC_GROUP_KIND_S390_BOOK 110 /* no subkind */ +#define HWLOC_GROUP_KIND_AMD_COMPUTE_UNIT 120 /* no subkind */ +/* then, OS-specific groups */ +#define HWLOC_GROUP_KIND_SOLARIS_PG_HW_PERF 200 /* subkind is group width */ +#define HWLOC_GROUP_KIND_AIX_SDL_UNKNOWN 210 /* subkind is SDL level */ +#define HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP 220 /* no subkind */ +#define HWLOC_GROUP_KIND_WINDOWS_RELATIONSHIP_UNKNOWN 221 /* no subkind */ +/* distance groups */ +#define HWLOC_GROUP_KIND_DISTANCE 900 /* subkind is round of adding these groups during distance based grouping */ +/* finally, hwloc-specific groups required to insert something else, should disappear as soon as possible */ +#define HWLOC_GROUP_KIND_IO 1000 /* no subkind */ +#define HWLOC_GROUP_KIND_MEMORY 1001 /* no subkind */ + +/* memory allocator for topology objects */ +struct hwloc_tma { + void * (*malloc)(struct hwloc_tma *, size_t); + void *data; + int dontfree; /* when set, free() or realloc() cannot be used, and tma->malloc() cannot fail */ +}; + +static __hwloc_inline void * +hwloc_tma_malloc(struct hwloc_tma *tma, + size_t size) +{ + if (tma) { + return tma->malloc(tma, size); + } else { + return malloc(size); + } +} + +static __hwloc_inline void * +hwloc_tma_calloc(struct hwloc_tma *tma, + size_t size) +{ + char *ptr = hwloc_tma_malloc(tma, size); + if (ptr) + memset(ptr, 0, size); + return ptr; +} + +static __hwloc_inline char * +hwloc_tma_strdup(struct hwloc_tma *tma, + const char *src) +{ + size_t len = strlen(src); + char *ptr = hwloc_tma_malloc(tma, len+1); + if (ptr) + memcpy(ptr, src, len+1); + return ptr; +} + +/* bitmap allocator to be used inside hwloc */ +extern hwloc_bitmap_t hwloc_bitmap_tma_dup(struct hwloc_tma *tma, hwloc_const_bitmap_t old); + +extern int hwloc__topology_dup(hwloc_topology_t *newp, hwloc_topology_t old, struct hwloc_tma *tma); +extern void hwloc__topology_disadopt(hwloc_topology_t topology); + +#endif /* HWLOC_PRIVATE_H */ diff --git a/src/3rdparty/hwloc/include/private/solaris-chiptype.h b/src/3rdparty/hwloc/include/private/solaris-chiptype.h new file mode 100644 index 00000000..4ad2130a --- /dev/null +++ b/src/3rdparty/hwloc/include/private/solaris-chiptype.h @@ -0,0 +1,43 @@ +/* + * Copyright © 2009-2010 Oracle and/or its affiliates. All rights reserved. + * + * Copyright © 2017 Inria. All rights reserved. + * $COPYRIGHT$ + * + * Additional copyrights may follow + * + * $HEADER$ + */ + + +#ifdef HWLOC_INSIDE_PLUGIN +/* + * these declarations are internal only, they are not available to plugins + * (functions below are internal static symbols). + */ +#error This file should not be used in plugins +#endif + + +#ifndef HWLOC_PRIVATE_SOLARIS_CHIPTYPE_H +#define HWLOC_PRIVATE_SOLARIS_CHIPTYPE_H + +struct hwloc_solaris_chip_info_s { + char *model; + char *type; + /* L1i, L1d, L2, L3 */ +#define HWLOC_SOLARIS_CHIP_INFO_L1I 0 +#define HWLOC_SOLARIS_CHIP_INFO_L1D 1 +#define HWLOC_SOLARIS_CHIP_INFO_L2I 2 +#define HWLOC_SOLARIS_CHIP_INFO_L2D 3 +#define HWLOC_SOLARIS_CHIP_INFO_L3 4 + long cache_size[5]; /* cleared to -1 if we don't want of that cache */ + unsigned cache_linesize[5]; + unsigned cache_associativity[5]; + int l2_unified; +}; + +/* fills the structure with 0 on error */ +extern void hwloc_solaris_get_chip_info(struct hwloc_solaris_chip_info_s *info); + +#endif /* HWLOC_PRIVATE_SOLARIS_CHIPTYPE_H */ diff --git a/src/3rdparty/hwloc/include/private/xml.h b/src/3rdparty/hwloc/include/private/xml.h new file mode 100644 index 00000000..7c73384d --- /dev/null +++ b/src/3rdparty/hwloc/include/private/xml.h @@ -0,0 +1,108 @@ +/* + * Copyright © 2009-2019 Inria. All rights reserved. + * See COPYING in top-level directory. + */ + +#ifndef PRIVATE_XML_H +#define PRIVATE_XML_H 1 + +#include + +#include + +HWLOC_DECLSPEC int hwloc__xml_verbose(void); + +/************** + * XML import * + **************/ + +typedef struct hwloc__xml_import_state_s { + struct hwloc__xml_import_state_s *parent; + + /* globals shared because the entire stack of states during import */ + struct hwloc_xml_backend_data_s *global; + + /* opaque data used to store backend-specific data. + * statically allocated to allow stack-allocation by the common code without knowing actual backend needs. + */ + char data[32]; +} * hwloc__xml_import_state_t; + +struct hwloc__xml_imported_v1distances_s { + unsigned long kind; + unsigned nbobjs; + float *floats; + struct hwloc__xml_imported_v1distances_s *prev, *next; +}; + +HWLOC_DECLSPEC int hwloc__xml_import_diff(hwloc__xml_import_state_t state, hwloc_topology_diff_t *firstdiffp); + +struct hwloc_xml_backend_data_s { + /* xml backend parameters */ + int (*look_init)(struct hwloc_xml_backend_data_s *bdata, struct hwloc__xml_import_state_s *state); + void (*look_done)(struct hwloc_xml_backend_data_s *bdata, int result); + void (*backend_exit)(struct hwloc_xml_backend_data_s *bdata); + int (*next_attr)(struct hwloc__xml_import_state_s * state, char **namep, char **valuep); + int (*find_child)(struct hwloc__xml_import_state_s * state, struct hwloc__xml_import_state_s * childstate, char **tagp); + int (*close_tag)(struct hwloc__xml_import_state_s * state); /* look for an explicit closing tag */ + void (*close_child)(struct hwloc__xml_import_state_s * state); + int (*get_content)(struct hwloc__xml_import_state_s * state, char **beginp, size_t expected_length); /* return 0 on empty content (and sets beginp to empty string), 1 on actual content, -1 on error or unexpected content length */ + void (*close_content)(struct hwloc__xml_import_state_s * state); + char * msgprefix; + void *data; /* libxml2 doc, or nolibxml buffer */ + unsigned version_major, version_minor; + unsigned nbnumanodes; + hwloc_obj_t first_numanode, last_numanode; /* temporary cousin-list for handling v1distances */ + struct hwloc__xml_imported_v1distances_s *first_v1dist, *last_v1dist; + int dont_merge_die_groups; +}; + +/************** + * XML export * + **************/ + +typedef struct hwloc__xml_export_state_s { + struct hwloc__xml_export_state_s *parent; + + void (*new_child)(struct hwloc__xml_export_state_s *parentstate, struct hwloc__xml_export_state_s *state, const char *name); + void (*new_prop)(struct hwloc__xml_export_state_s *state, const char *name, const char *value); + void (*add_content)(struct hwloc__xml_export_state_s *state, const char *buffer, size_t length); + void (*end_object)(struct hwloc__xml_export_state_s *state, const char *name); + + struct hwloc__xml_export_data_s { + hwloc_obj_t v1_memory_group; /* if we need to insert intermediate group above memory children when exporting to v1 */ + } *global; + + /* opaque data used to store backend-specific data. + * statically allocated to allow stack-allocation by the common code without knowing actual backend needs. + */ + char data[40]; +} * hwloc__xml_export_state_t; + +HWLOC_DECLSPEC void hwloc__xml_export_topology(hwloc__xml_export_state_t parentstate, hwloc_topology_t topology, unsigned long flags); + +HWLOC_DECLSPEC void hwloc__xml_export_diff(hwloc__xml_export_state_t parentstate, hwloc_topology_diff_t diff); + +/****************** + * XML components * + ******************/ + +struct hwloc_xml_callbacks { + int (*backend_init)(struct hwloc_xml_backend_data_s *bdata, const char *xmlpath, const char *xmlbuffer, int xmlbuflen); + int (*export_file)(struct hwloc_topology *topology, struct hwloc__xml_export_data_s *edata, const char *filename, unsigned long flags); + int (*export_buffer)(struct hwloc_topology *topology, struct hwloc__xml_export_data_s *edata, char **xmlbuffer, int *buflen, unsigned long flags); + void (*free_buffer)(void *xmlbuffer); + int (*import_diff)(struct hwloc__xml_import_state_s *state, const char *xmlpath, const char *xmlbuffer, int xmlbuflen, hwloc_topology_diff_t *diff, char **refnamep); + int (*export_diff_file)(union hwloc_topology_diff_u *diff, const char *refname, const char *filename); + int (*export_diff_buffer)(union hwloc_topology_diff_u *diff, const char *refname, char **xmlbuffer, int *buflen); +}; + +struct hwloc_xml_component { + struct hwloc_xml_callbacks *nolibxml_callbacks; + struct hwloc_xml_callbacks *libxml_callbacks; +}; + +HWLOC_DECLSPEC void hwloc_xml_callbacks_register(struct hwloc_xml_component *component); +HWLOC_DECLSPEC void hwloc_xml_callbacks_reset(void); + +#endif /* PRIVATE_XML_H */ diff --git a/src/3rdparty/hwloc/src/base64.c b/src/3rdparty/hwloc/src/base64.c new file mode 100644 index 00000000..7b3e1210 --- /dev/null +++ b/src/3rdparty/hwloc/src/base64.c @@ -0,0 +1,309 @@ +/* + * Copyright © 2012-2018 Inria. All rights reserved. + * See COPYING in top-level directory. + * + * Modifications after import: + * - removed all #if + * - updated prototypes + * - updated #include + */ + +/* include hwloc's config before anything else + * so that extensions and features are properly enabled + */ +#include + +/* $OpenBSD: base64.c,v 1.5 2006/10/21 09:55:03 otto Exp $ */ + +/* + * Copyright (c) 1996 by Internet Software Consortium. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS + * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE + * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS + * SOFTWARE. + */ + +/* + * Portions Copyright (c) 1995 by International Business Machines, Inc. + * + * International Business Machines, Inc. (hereinafter called IBM) grants + * permission under its copyrights to use, copy, modify, and distribute this + * Software with or without fee, provided that the above copyright notice and + * all paragraphs of this notice appear in all copies, and that the name of IBM + * not be used in connection with the marketing of any product incorporating + * the Software or modifications thereof, without specific, written prior + * permission. + * + * To the extent it has a right to do so, IBM grants an immunity from suit + * under its patents, if any, for the use, sale or manufacture of products to + * the extent that such products are used for performing Domain Name System + * dynamic updates in TCP/IP networks by means of the Software. No immunity is + * granted for any product per se or for any other function of any product. + * + * THE SOFTWARE IS PROVIDED "AS IS", AND IBM DISCLAIMS ALL WARRANTIES, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE. IN NO EVENT SHALL IBM BE LIABLE FOR ANY SPECIAL, + * DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER ARISING + * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE, EVEN + * IF IBM IS APPRISED OF THE POSSIBILITY OF SUCH DAMAGES. + */ + +/* OPENBSD ORIGINAL: lib/libc/net/base64.c */ + +static const char Base64[] = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; +static const char Pad64 = '='; + +/* (From RFC1521 and draft-ietf-dnssec-secext-03.txt) + The following encoding technique is taken from RFC 1521 by Borenstein + and Freed. It is reproduced here in a slightly edited form for + convenience. + + A 65-character subset of US-ASCII is used, enabling 6 bits to be + represented per printable character. (The extra 65th character, "=", + is used to signify a special processing function.) + + The encoding process represents 24-bit groups of input bits as output + strings of 4 encoded characters. Proceeding from left to right, a + 24-bit input group is formed by concatenating 3 8-bit input groups. + These 24 bits are then treated as 4 concatenated 6-bit groups, each + of which is translated into a single digit in the base64 alphabet. + + Each 6-bit group is used as an index into an array of 64 printable + characters. The character referenced by the index is placed in the + output string. + + Table 1: The Base64 Alphabet + + Value Encoding Value Encoding Value Encoding Value Encoding + 0 A 17 R 34 i 51 z + 1 B 18 S 35 j 52 0 + 2 C 19 T 36 k 53 1 + 3 D 20 U 37 l 54 2 + 4 E 21 V 38 m 55 3 + 5 F 22 W 39 n 56 4 + 6 G 23 X 40 o 57 5 + 7 H 24 Y 41 p 58 6 + 8 I 25 Z 42 q 59 7 + 9 J 26 a 43 r 60 8 + 10 K 27 b 44 s 61 9 + 11 L 28 c 45 t 62 + + 12 M 29 d 46 u 63 / + 13 N 30 e 47 v + 14 O 31 f 48 w (pad) = + 15 P 32 g 49 x + 16 Q 33 h 50 y + + Special processing is performed if fewer than 24 bits are available + at the end of the data being encoded. A full encoding quantum is + always completed at the end of a quantity. When fewer than 24 input + bits are available in an input group, zero bits are added (on the + right) to form an integral number of 6-bit groups. Padding at the + end of the data is performed using the '=' character. + + Since all base64 input is an integral number of octets, only the + ------------------------------------------------- + following cases can arise: + + (1) the final quantum of encoding input is an integral + multiple of 24 bits; here, the final unit of encoded + output will be an integral multiple of 4 characters + with no "=" padding, + (2) the final quantum of encoding input is exactly 8 bits; + here, the final unit of encoded output will be two + characters followed by two "=" padding characters, or + (3) the final quantum of encoding input is exactly 16 bits; + here, the final unit of encoded output will be three + characters followed by one "=" padding character. + */ + +#include +#include +#include + +int +hwloc_encode_to_base64(const char *src, size_t srclength, char *target, size_t targsize) +{ + size_t datalength = 0; + unsigned char input[3]; + unsigned char output[4]; + unsigned int i; + + while (2 < srclength) { + input[0] = *src++; + input[1] = *src++; + input[2] = *src++; + srclength -= 3; + + output[0] = input[0] >> 2; + output[1] = ((input[0] & 0x03) << 4) + (input[1] >> 4); + output[2] = ((input[1] & 0x0f) << 2) + (input[2] >> 6); + output[3] = input[2] & 0x3f; + + if (datalength + 4 > targsize) + return (-1); + target[datalength++] = Base64[output[0]]; + target[datalength++] = Base64[output[1]]; + target[datalength++] = Base64[output[2]]; + target[datalength++] = Base64[output[3]]; + } + + /* Now we worry about padding. */ + if (0 != srclength) { + /* Get what's left. */ + input[0] = input[1] = input[2] = '\0'; + for (i = 0; i < srclength; i++) + input[i] = *src++; + + output[0] = input[0] >> 2; + output[1] = ((input[0] & 0x03) << 4) + (input[1] >> 4); + output[2] = ((input[1] & 0x0f) << 2) + (input[2] >> 6); + + if (datalength + 4 > targsize) + return (-1); + target[datalength++] = Base64[output[0]]; + target[datalength++] = Base64[output[1]]; + if (srclength == 1) + target[datalength++] = Pad64; + else + target[datalength++] = Base64[output[2]]; + target[datalength++] = Pad64; + } + if (datalength >= targsize) + return (-1); + target[datalength] = '\0'; /* Returned value doesn't count \0. */ + return (int)(datalength); +} + +/* skips all whitespace anywhere. + converts characters, four at a time, starting at (or after) + src from base - 64 numbers into three 8 bit bytes in the target area. + it returns the number of data bytes stored at the target, or -1 on error. + */ + +int +hwloc_decode_from_base64(char const *src, char *target, size_t targsize) +{ + unsigned int tarindex, state; + int ch; + char *pos; + + state = 0; + tarindex = 0; + + while ((ch = *src++) != '\0') { + if (isspace(ch)) /* Skip whitespace anywhere. */ + continue; + + if (ch == Pad64) + break; + + pos = strchr(Base64, ch); + if (pos == 0) /* A non-base64 character. */ + return (-1); + + switch (state) { + case 0: + if (target) { + if (tarindex >= targsize) + return (-1); + target[tarindex] = (char)(pos - Base64) << 2; + } + state = 1; + break; + case 1: + if (target) { + if (tarindex + 1 >= targsize) + return (-1); + target[tarindex] |= (pos - Base64) >> 4; + target[tarindex+1] = ((pos - Base64) & 0x0f) + << 4 ; + } + tarindex++; + state = 2; + break; + case 2: + if (target) { + if (tarindex + 1 >= targsize) + return (-1); + target[tarindex] |= (pos - Base64) >> 2; + target[tarindex+1] = ((pos - Base64) & 0x03) + << 6; + } + tarindex++; + state = 3; + break; + case 3: + if (target) { + if (tarindex >= targsize) + return (-1); + target[tarindex] |= (pos - Base64); + } + tarindex++; + state = 0; + break; + } + } + + /* + * We are done decoding Base-64 chars. Let's see if we ended + * on a byte boundary, and/or with erroneous trailing characters. + */ + + if (ch == Pad64) { /* We got a pad char. */ + ch = *src++; /* Skip it, get next. */ + switch (state) { + case 0: /* Invalid = in first position */ + case 1: /* Invalid = in second position */ + return (-1); + + case 2: /* Valid, means one byte of info */ + /* Skip any number of spaces. */ + for (; ch != '\0'; ch = *src++) + if (!isspace(ch)) + break; + /* Make sure there is another trailing = sign. */ + if (ch != Pad64) + return (-1); + ch = *src++; /* Skip the = */ + /* Fall through to "single trailing =" case. */ + /* FALLTHROUGH */ + + case 3: /* Valid, means two bytes of info */ + /* + * We know this char is an =. Is there anything but + * whitespace after it? + */ + for (; ch != '\0'; ch = *src++) + if (!isspace(ch)) + return (-1); + + /* + * Now make sure for cases 2 and 3 that the "extra" + * bits that slopped past the last full byte were + * zeros. If we don't check them, they become a + * subliminal channel. + */ + if (target && target[tarindex] != 0) + return (-1); + } + } else { + /* + * We ended by seeing the end of the string. Make sure we + * have no partial bytes lying around. + */ + if (state != 0) + return (-1); + } + + return (tarindex); +} diff --git a/src/3rdparty/hwloc/src/bind.c b/src/3rdparty/hwloc/src/bind.c new file mode 100644 index 00000000..b3457bc7 --- /dev/null +++ b/src/3rdparty/hwloc/src/bind.c @@ -0,0 +1,922 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2018 Inria. All rights reserved. + * Copyright © 2009-2010, 2012 Université Bordeaux + * Copyright © 2011-2015 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +#include +#include +#include +#include +#ifdef HAVE_SYS_MMAN_H +# include +#endif +/* is only needed if we don't have posix_memalign() */ +#if defined(hwloc_getpagesize) && !defined(HAVE_POSIX_MEMALIGN) && defined(HAVE_MEMALIGN) && defined(HAVE_MALLOC_H) +#include +#endif +#ifdef HAVE_UNISTD_H +#include +#endif +#include +#include + +/* TODO: HWLOC_GNU_SYS, + * + * We could use glibc's sched_setaffinity generically when it is available + * + * Darwin and OpenBSD don't seem to have binding facilities. + */ + +#define HWLOC_CPUBIND_ALLFLAGS (HWLOC_CPUBIND_PROCESS|HWLOC_CPUBIND_THREAD|HWLOC_CPUBIND_STRICT|HWLOC_CPUBIND_NOMEMBIND) + +static hwloc_const_bitmap_t +hwloc_fix_cpubind(hwloc_topology_t topology, hwloc_const_bitmap_t set) +{ + hwloc_const_bitmap_t topology_set = hwloc_topology_get_topology_cpuset(topology); + hwloc_const_bitmap_t complete_set = hwloc_topology_get_complete_cpuset(topology); + + if (hwloc_bitmap_iszero(set)) { + errno = EINVAL; + return NULL; + } + + if (!hwloc_bitmap_isincluded(set, complete_set)) { + errno = EINVAL; + return NULL; + } + + if (hwloc_bitmap_isincluded(topology_set, set)) + set = complete_set; + + return set; +} + +int +hwloc_set_cpubind(hwloc_topology_t topology, hwloc_const_bitmap_t set, int flags) +{ + if (flags & ~HWLOC_CPUBIND_ALLFLAGS) { + errno = EINVAL; + return -1; + } + + set = hwloc_fix_cpubind(topology, set); + if (!set) + return -1; + + if (flags & HWLOC_CPUBIND_PROCESS) { + if (topology->binding_hooks.set_thisproc_cpubind) + return topology->binding_hooks.set_thisproc_cpubind(topology, set, flags); + } else if (flags & HWLOC_CPUBIND_THREAD) { + if (topology->binding_hooks.set_thisthread_cpubind) + return topology->binding_hooks.set_thisthread_cpubind(topology, set, flags); + } else { + if (topology->binding_hooks.set_thisproc_cpubind) { + int err = topology->binding_hooks.set_thisproc_cpubind(topology, set, flags); + if (err >= 0 || errno != ENOSYS) + return err; + /* ENOSYS, fallback */ + } + if (topology->binding_hooks.set_thisthread_cpubind) + return topology->binding_hooks.set_thisthread_cpubind(topology, set, flags); + } + + errno = ENOSYS; + return -1; +} + +int +hwloc_get_cpubind(hwloc_topology_t topology, hwloc_bitmap_t set, int flags) +{ + if (flags & ~HWLOC_CPUBIND_ALLFLAGS) { + errno = EINVAL; + return -1; + } + + if (flags & HWLOC_CPUBIND_PROCESS) { + if (topology->binding_hooks.get_thisproc_cpubind) + return topology->binding_hooks.get_thisproc_cpubind(topology, set, flags); + } else if (flags & HWLOC_CPUBIND_THREAD) { + if (topology->binding_hooks.get_thisthread_cpubind) + return topology->binding_hooks.get_thisthread_cpubind(topology, set, flags); + } else { + if (topology->binding_hooks.get_thisproc_cpubind) { + int err = topology->binding_hooks.get_thisproc_cpubind(topology, set, flags); + if (err >= 0 || errno != ENOSYS) + return err; + /* ENOSYS, fallback */ + } + if (topology->binding_hooks.get_thisthread_cpubind) + return topology->binding_hooks.get_thisthread_cpubind(topology, set, flags); + } + + errno = ENOSYS; + return -1; +} + +int +hwloc_set_proc_cpubind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_bitmap_t set, int flags) +{ + if (flags & ~HWLOC_CPUBIND_ALLFLAGS) { + errno = EINVAL; + return -1; + } + + set = hwloc_fix_cpubind(topology, set); + if (!set) + return -1; + + if (topology->binding_hooks.set_proc_cpubind) + return topology->binding_hooks.set_proc_cpubind(topology, pid, set, flags); + + errno = ENOSYS; + return -1; +} + +int +hwloc_get_proc_cpubind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_bitmap_t set, int flags) +{ + if (flags & ~HWLOC_CPUBIND_ALLFLAGS) { + errno = EINVAL; + return -1; + } + + if (topology->binding_hooks.get_proc_cpubind) + return topology->binding_hooks.get_proc_cpubind(topology, pid, set, flags); + + errno = ENOSYS; + return -1; +} + +#ifdef hwloc_thread_t +int +hwloc_set_thread_cpubind(hwloc_topology_t topology, hwloc_thread_t tid, hwloc_const_bitmap_t set, int flags) +{ + if (flags & ~HWLOC_CPUBIND_ALLFLAGS) { + errno = EINVAL; + return -1; + } + + set = hwloc_fix_cpubind(topology, set); + if (!set) + return -1; + + if (topology->binding_hooks.set_thread_cpubind) + return topology->binding_hooks.set_thread_cpubind(topology, tid, set, flags); + + errno = ENOSYS; + return -1; +} + +int +hwloc_get_thread_cpubind(hwloc_topology_t topology, hwloc_thread_t tid, hwloc_bitmap_t set, int flags) +{ + if (flags & ~HWLOC_CPUBIND_ALLFLAGS) { + errno = EINVAL; + return -1; + } + + if (topology->binding_hooks.get_thread_cpubind) + return topology->binding_hooks.get_thread_cpubind(topology, tid, set, flags); + + errno = ENOSYS; + return -1; +} +#endif + +int +hwloc_get_last_cpu_location(hwloc_topology_t topology, hwloc_bitmap_t set, int flags) +{ + if (flags & ~HWLOC_CPUBIND_ALLFLAGS) { + errno = EINVAL; + return -1; + } + + if (flags & HWLOC_CPUBIND_PROCESS) { + if (topology->binding_hooks.get_thisproc_last_cpu_location) + return topology->binding_hooks.get_thisproc_last_cpu_location(topology, set, flags); + } else if (flags & HWLOC_CPUBIND_THREAD) { + if (topology->binding_hooks.get_thisthread_last_cpu_location) + return topology->binding_hooks.get_thisthread_last_cpu_location(topology, set, flags); + } else { + if (topology->binding_hooks.get_thisproc_last_cpu_location) { + int err = topology->binding_hooks.get_thisproc_last_cpu_location(topology, set, flags); + if (err >= 0 || errno != ENOSYS) + return err; + /* ENOSYS, fallback */ + } + if (topology->binding_hooks.get_thisthread_last_cpu_location) + return topology->binding_hooks.get_thisthread_last_cpu_location(topology, set, flags); + } + + errno = ENOSYS; + return -1; +} + +int +hwloc_get_proc_last_cpu_location(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_bitmap_t set, int flags) +{ + if (flags & ~HWLOC_CPUBIND_ALLFLAGS) { + errno = EINVAL; + return -1; + } + + if (topology->binding_hooks.get_proc_last_cpu_location) + return topology->binding_hooks.get_proc_last_cpu_location(topology, pid, set, flags); + + errno = ENOSYS; + return -1; +} + +#define HWLOC_MEMBIND_ALLFLAGS (HWLOC_MEMBIND_PROCESS|HWLOC_MEMBIND_THREAD|HWLOC_MEMBIND_STRICT|HWLOC_MEMBIND_MIGRATE|HWLOC_MEMBIND_NOCPUBIND|HWLOC_MEMBIND_BYNODESET) + +static hwloc_const_nodeset_t +hwloc_fix_membind(hwloc_topology_t topology, hwloc_const_nodeset_t nodeset) +{ + hwloc_const_bitmap_t topology_nodeset = hwloc_topology_get_topology_nodeset(topology); + hwloc_const_bitmap_t complete_nodeset = hwloc_topology_get_complete_nodeset(topology); + + if (hwloc_bitmap_iszero(nodeset)) { + errno = EINVAL; + return NULL; + } + + if (!hwloc_bitmap_isincluded(nodeset, complete_nodeset)) { + errno = EINVAL; + return NULL; + } + + if (hwloc_bitmap_isincluded(topology_nodeset, nodeset)) + return complete_nodeset; + + return nodeset; +} + +static int +hwloc_fix_membind_cpuset(hwloc_topology_t topology, hwloc_nodeset_t nodeset, hwloc_const_cpuset_t cpuset) +{ + hwloc_const_bitmap_t topology_set = hwloc_topology_get_topology_cpuset(topology); + hwloc_const_bitmap_t complete_set = hwloc_topology_get_complete_cpuset(topology); + hwloc_const_bitmap_t complete_nodeset = hwloc_topology_get_complete_nodeset(topology); + + if (hwloc_bitmap_iszero(cpuset)) { + errno = EINVAL; + return -1; + } + + if (!hwloc_bitmap_isincluded(cpuset, complete_set)) { + errno = EINVAL; + return -1; + } + + if (hwloc_bitmap_isincluded(topology_set, cpuset)) { + hwloc_bitmap_copy(nodeset, complete_nodeset); + return 0; + } + + hwloc_cpuset_to_nodeset(topology, cpuset, nodeset); + return 0; +} + +static __hwloc_inline int hwloc__check_membind_policy(hwloc_membind_policy_t policy) +{ + if (policy == HWLOC_MEMBIND_DEFAULT + || policy == HWLOC_MEMBIND_FIRSTTOUCH + || policy == HWLOC_MEMBIND_BIND + || policy == HWLOC_MEMBIND_INTERLEAVE + || policy == HWLOC_MEMBIND_NEXTTOUCH) + return 0; + return -1; +} + +static int +hwloc_set_membind_by_nodeset(hwloc_topology_t topology, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags) +{ + if ((flags & ~HWLOC_MEMBIND_ALLFLAGS) || hwloc__check_membind_policy(policy) < 0) { + errno = EINVAL; + return -1; + } + + nodeset = hwloc_fix_membind(topology, nodeset); + if (!nodeset) + return -1; + + if (flags & HWLOC_MEMBIND_PROCESS) { + if (topology->binding_hooks.set_thisproc_membind) + return topology->binding_hooks.set_thisproc_membind(topology, nodeset, policy, flags); + } else if (flags & HWLOC_MEMBIND_THREAD) { + if (topology->binding_hooks.set_thisthread_membind) + return topology->binding_hooks.set_thisthread_membind(topology, nodeset, policy, flags); + } else { + if (topology->binding_hooks.set_thisproc_membind) { + int err = topology->binding_hooks.set_thisproc_membind(topology, nodeset, policy, flags); + if (err >= 0 || errno != ENOSYS) + return err; + /* ENOSYS, fallback */ + } + if (topology->binding_hooks.set_thisthread_membind) + return topology->binding_hooks.set_thisthread_membind(topology, nodeset, policy, flags); + } + + errno = ENOSYS; + return -1; +} + +int +hwloc_set_membind(hwloc_topology_t topology, hwloc_const_bitmap_t set, hwloc_membind_policy_t policy, int flags) +{ + int ret; + + if (flags & HWLOC_MEMBIND_BYNODESET) { + ret = hwloc_set_membind_by_nodeset(topology, set, policy, flags); + } else { + hwloc_nodeset_t nodeset = hwloc_bitmap_alloc(); + if (hwloc_fix_membind_cpuset(topology, nodeset, set)) + ret = -1; + else + ret = hwloc_set_membind_by_nodeset(topology, nodeset, policy, flags); + hwloc_bitmap_free(nodeset); + } + return ret; +} + +static int +hwloc_get_membind_by_nodeset(hwloc_topology_t topology, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags) +{ + if (flags & ~HWLOC_MEMBIND_ALLFLAGS) { + errno = EINVAL; + return -1; + } + + if (flags & HWLOC_MEMBIND_PROCESS) { + if (topology->binding_hooks.get_thisproc_membind) + return topology->binding_hooks.get_thisproc_membind(topology, nodeset, policy, flags); + } else if (flags & HWLOC_MEMBIND_THREAD) { + if (topology->binding_hooks.get_thisthread_membind) + return topology->binding_hooks.get_thisthread_membind(topology, nodeset, policy, flags); + } else { + if (topology->binding_hooks.get_thisproc_membind) { + int err = topology->binding_hooks.get_thisproc_membind(topology, nodeset, policy, flags); + if (err >= 0 || errno != ENOSYS) + return err; + /* ENOSYS, fallback */ + } + if (topology->binding_hooks.get_thisthread_membind) + return topology->binding_hooks.get_thisthread_membind(topology, nodeset, policy, flags); + } + + errno = ENOSYS; + return -1; +} + +int +hwloc_get_membind(hwloc_topology_t topology, hwloc_bitmap_t set, hwloc_membind_policy_t * policy, int flags) +{ + int ret; + + if (flags & HWLOC_MEMBIND_BYNODESET) { + ret = hwloc_get_membind_by_nodeset(topology, set, policy, flags); + } else { + hwloc_nodeset_t nodeset = hwloc_bitmap_alloc(); + ret = hwloc_get_membind_by_nodeset(topology, nodeset, policy, flags); + if (!ret) + hwloc_cpuset_from_nodeset(topology, set, nodeset); + hwloc_bitmap_free(nodeset); + } + + return ret; +} + +static int +hwloc_set_proc_membind_by_nodeset(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags) +{ + if ((flags & ~HWLOC_MEMBIND_ALLFLAGS) || hwloc__check_membind_policy(policy) < 0) { + errno = EINVAL; + return -1; + } + + nodeset = hwloc_fix_membind(topology, nodeset); + if (!nodeset) + return -1; + + if (topology->binding_hooks.set_proc_membind) + return topology->binding_hooks.set_proc_membind(topology, pid, nodeset, policy, flags); + + errno = ENOSYS; + return -1; +} + + +int +hwloc_set_proc_membind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_bitmap_t set, hwloc_membind_policy_t policy, int flags) +{ + int ret; + + if (flags & HWLOC_MEMBIND_BYNODESET) { + ret = hwloc_set_proc_membind_by_nodeset(topology, pid, set, policy, flags); + } else { + hwloc_nodeset_t nodeset = hwloc_bitmap_alloc(); + if (hwloc_fix_membind_cpuset(topology, nodeset, set)) + ret = -1; + else + ret = hwloc_set_proc_membind_by_nodeset(topology, pid, nodeset, policy, flags); + hwloc_bitmap_free(nodeset); + } + + return ret; +} + +static int +hwloc_get_proc_membind_by_nodeset(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags) +{ + if (flags & ~HWLOC_MEMBIND_ALLFLAGS) { + errno = EINVAL; + return -1; + } + + if (topology->binding_hooks.get_proc_membind) + return topology->binding_hooks.get_proc_membind(topology, pid, nodeset, policy, flags); + + errno = ENOSYS; + return -1; +} + +int +hwloc_get_proc_membind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_bitmap_t set, hwloc_membind_policy_t * policy, int flags) +{ + int ret; + + if (flags & HWLOC_MEMBIND_BYNODESET) { + ret = hwloc_get_proc_membind_by_nodeset(topology, pid, set, policy, flags); + } else { + hwloc_nodeset_t nodeset = hwloc_bitmap_alloc(); + ret = hwloc_get_proc_membind_by_nodeset(topology, pid, nodeset, policy, flags); + if (!ret) + hwloc_cpuset_from_nodeset(topology, set, nodeset); + hwloc_bitmap_free(nodeset); + } + + return ret; +} + +static int +hwloc_set_area_membind_by_nodeset(hwloc_topology_t topology, const void *addr, size_t len, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags) +{ + if ((flags & ~HWLOC_MEMBIND_ALLFLAGS) || hwloc__check_membind_policy(policy) < 0) { + errno = EINVAL; + return -1; + } + + if (!len) + /* nothing to do */ + return 0; + + nodeset = hwloc_fix_membind(topology, nodeset); + if (!nodeset) + return -1; + + if (topology->binding_hooks.set_area_membind) + return topology->binding_hooks.set_area_membind(topology, addr, len, nodeset, policy, flags); + + errno = ENOSYS; + return -1; +} + +int +hwloc_set_area_membind(hwloc_topology_t topology, const void *addr, size_t len, hwloc_const_bitmap_t set, hwloc_membind_policy_t policy, int flags) +{ + int ret; + + if (flags & HWLOC_MEMBIND_BYNODESET) { + ret = hwloc_set_area_membind_by_nodeset(topology, addr, len, set, policy, flags); + } else { + hwloc_nodeset_t nodeset = hwloc_bitmap_alloc(); + if (hwloc_fix_membind_cpuset(topology, nodeset, set)) + ret = -1; + else + ret = hwloc_set_area_membind_by_nodeset(topology, addr, len, nodeset, policy, flags); + hwloc_bitmap_free(nodeset); + } + + return ret; +} + +static int +hwloc_get_area_membind_by_nodeset(hwloc_topology_t topology, const void *addr, size_t len, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags) +{ + if (flags & ~HWLOC_MEMBIND_ALLFLAGS) { + errno = EINVAL; + return -1; + } + + if (!len) { + /* nothing to query */ + errno = EINVAL; + return -1; + } + + if (topology->binding_hooks.get_area_membind) + return topology->binding_hooks.get_area_membind(topology, addr, len, nodeset, policy, flags); + + errno = ENOSYS; + return -1; +} + +int +hwloc_get_area_membind(hwloc_topology_t topology, const void *addr, size_t len, hwloc_bitmap_t set, hwloc_membind_policy_t * policy, int flags) +{ + int ret; + + if (flags & HWLOC_MEMBIND_BYNODESET) { + ret = hwloc_get_area_membind_by_nodeset(topology, addr, len, set, policy, flags); + } else { + hwloc_nodeset_t nodeset = hwloc_bitmap_alloc(); + ret = hwloc_get_area_membind_by_nodeset(topology, addr, len, nodeset, policy, flags); + if (!ret) + hwloc_cpuset_from_nodeset(topology, set, nodeset); + hwloc_bitmap_free(nodeset); + } + + return ret; +} + +static int +hwloc_get_area_memlocation_by_nodeset(hwloc_topology_t topology, const void *addr, size_t len, hwloc_nodeset_t nodeset, int flags) +{ + if (flags & ~HWLOC_MEMBIND_ALLFLAGS) { + errno = EINVAL; + return -1; + } + + if (!len) + /* nothing to do */ + return 0; + + if (topology->binding_hooks.get_area_memlocation) + return topology->binding_hooks.get_area_memlocation(topology, addr, len, nodeset, flags); + + errno = ENOSYS; + return -1; +} + +int +hwloc_get_area_memlocation(hwloc_topology_t topology, const void *addr, size_t len, hwloc_cpuset_t set, int flags) +{ + int ret; + + if (flags & HWLOC_MEMBIND_BYNODESET) { + ret = hwloc_get_area_memlocation_by_nodeset(topology, addr, len, set, flags); + } else { + hwloc_nodeset_t nodeset = hwloc_bitmap_alloc(); + ret = hwloc_get_area_memlocation_by_nodeset(topology, addr, len, nodeset, flags); + if (!ret) + hwloc_cpuset_from_nodeset(topology, set, nodeset); + hwloc_bitmap_free(nodeset); + } + + return ret; +} + +void * +hwloc_alloc_heap(hwloc_topology_t topology __hwloc_attribute_unused, size_t len) +{ + void *p = NULL; +#if defined(hwloc_getpagesize) && defined(HAVE_POSIX_MEMALIGN) + errno = posix_memalign(&p, hwloc_getpagesize(), len); + if (errno) + p = NULL; +#elif defined(hwloc_getpagesize) && defined(HAVE_MEMALIGN) + p = memalign(hwloc_getpagesize(), len); +#else + p = malloc(len); +#endif + return p; +} + +#ifdef MAP_ANONYMOUS +void * +hwloc_alloc_mmap(hwloc_topology_t topology __hwloc_attribute_unused, size_t len) +{ + void * buffer = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + return buffer == MAP_FAILED ? NULL : buffer; +} +#endif + +int +hwloc_free_heap(hwloc_topology_t topology __hwloc_attribute_unused, void *addr, size_t len __hwloc_attribute_unused) +{ + free(addr); + return 0; +} + +#ifdef MAP_ANONYMOUS +int +hwloc_free_mmap(hwloc_topology_t topology __hwloc_attribute_unused, void *addr, size_t len) +{ + if (!addr) + return 0; + return munmap(addr, len); +} +#endif + +void * +hwloc_alloc(hwloc_topology_t topology, size_t len) +{ + if (topology->binding_hooks.alloc) + return topology->binding_hooks.alloc(topology, len); + return hwloc_alloc_heap(topology, len); +} + +static void * +hwloc_alloc_membind_by_nodeset(hwloc_topology_t topology, size_t len, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags) +{ + void *p; + + if ((flags & ~HWLOC_MEMBIND_ALLFLAGS) || hwloc__check_membind_policy(policy) < 0) { + errno = EINVAL; + return NULL; + } + + nodeset = hwloc_fix_membind(topology, nodeset); + if (!nodeset) + goto fallback; + if (flags & HWLOC_MEMBIND_MIGRATE) { + errno = EINVAL; + goto fallback; + } + + if (topology->binding_hooks.alloc_membind) + return topology->binding_hooks.alloc_membind(topology, len, nodeset, policy, flags); + else if (topology->binding_hooks.set_area_membind) { + p = hwloc_alloc(topology, len); + if (!p) + return NULL; + if (topology->binding_hooks.set_area_membind(topology, p, len, nodeset, policy, flags) && flags & HWLOC_MEMBIND_STRICT) { + int error = errno; + free(p); + errno = error; + return NULL; + } + return p; + } else { + errno = ENOSYS; + } + +fallback: + if (flags & HWLOC_MEMBIND_STRICT) + /* Report error */ + return NULL; + /* Never mind, allocate anyway */ + return hwloc_alloc(topology, len); +} + +void * +hwloc_alloc_membind(hwloc_topology_t topology, size_t len, hwloc_const_bitmap_t set, hwloc_membind_policy_t policy, int flags) +{ + void *ret; + + if (flags & HWLOC_MEMBIND_BYNODESET) { + ret = hwloc_alloc_membind_by_nodeset(topology, len, set, policy, flags); + } else { + hwloc_nodeset_t nodeset = hwloc_bitmap_alloc(); + if (hwloc_fix_membind_cpuset(topology, nodeset, set)) { + if (flags & HWLOC_MEMBIND_STRICT) + ret = NULL; + else + ret = hwloc_alloc(topology, len); + } else + ret = hwloc_alloc_membind_by_nodeset(topology, len, nodeset, policy, flags); + hwloc_bitmap_free(nodeset); + } + + return ret; +} + +int +hwloc_free(hwloc_topology_t topology, void *addr, size_t len) +{ + if (topology->binding_hooks.free_membind) + return topology->binding_hooks.free_membind(topology, addr, len); + return hwloc_free_heap(topology, addr, len); +} + +/* + * Empty binding hooks always returning success + */ + +static int dontset_return_complete_cpuset(hwloc_topology_t topology, hwloc_cpuset_t set) +{ + hwloc_bitmap_copy(set, hwloc_topology_get_complete_cpuset(topology)); + return 0; +} + +static int dontset_thisthread_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_bitmap_t set __hwloc_attribute_unused, int flags __hwloc_attribute_unused) +{ + return 0; +} +static int dontget_thisthread_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_bitmap_t set, int flags __hwloc_attribute_unused) +{ + return dontset_return_complete_cpuset(topology, set); +} +static int dontset_thisproc_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_bitmap_t set __hwloc_attribute_unused, int flags __hwloc_attribute_unused) +{ + return 0; +} +static int dontget_thisproc_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_bitmap_t set, int flags __hwloc_attribute_unused) +{ + return dontset_return_complete_cpuset(topology, set); +} +static int dontset_proc_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_pid_t pid __hwloc_attribute_unused, hwloc_const_bitmap_t set __hwloc_attribute_unused, int flags __hwloc_attribute_unused) +{ + return 0; +} +static int dontget_proc_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_pid_t pid __hwloc_attribute_unused, hwloc_bitmap_t cpuset, int flags __hwloc_attribute_unused) +{ + return dontset_return_complete_cpuset(topology, cpuset); +} +#ifdef hwloc_thread_t +static int dontset_thread_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_thread_t tid __hwloc_attribute_unused, hwloc_const_bitmap_t set __hwloc_attribute_unused, int flags __hwloc_attribute_unused) +{ + return 0; +} +static int dontget_thread_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_thread_t tid __hwloc_attribute_unused, hwloc_bitmap_t cpuset, int flags __hwloc_attribute_unused) +{ + return dontset_return_complete_cpuset(topology, cpuset); +} +#endif + +static int dontset_return_complete_nodeset(hwloc_topology_t topology, hwloc_nodeset_t set, hwloc_membind_policy_t *policy) +{ + hwloc_bitmap_copy(set, hwloc_topology_get_complete_nodeset(topology)); + *policy = HWLOC_MEMBIND_MIXED; + return 0; +} + +static int dontset_thisproc_membind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_bitmap_t set __hwloc_attribute_unused, hwloc_membind_policy_t policy __hwloc_attribute_unused, int flags __hwloc_attribute_unused) +{ + return 0; +} +static int dontget_thisproc_membind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_bitmap_t set, hwloc_membind_policy_t * policy, int flags __hwloc_attribute_unused) +{ + return dontset_return_complete_nodeset(topology, set, policy); +} + +static int dontset_thisthread_membind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_bitmap_t set __hwloc_attribute_unused, hwloc_membind_policy_t policy __hwloc_attribute_unused, int flags __hwloc_attribute_unused) +{ + return 0; +} +static int dontget_thisthread_membind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_bitmap_t set, hwloc_membind_policy_t * policy, int flags __hwloc_attribute_unused) +{ + return dontset_return_complete_nodeset(topology, set, policy); +} + +static int dontset_proc_membind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_pid_t pid __hwloc_attribute_unused, hwloc_const_bitmap_t set __hwloc_attribute_unused, hwloc_membind_policy_t policy __hwloc_attribute_unused, int flags __hwloc_attribute_unused) +{ + return 0; +} +static int dontget_proc_membind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_pid_t pid __hwloc_attribute_unused, hwloc_bitmap_t set, hwloc_membind_policy_t * policy, int flags __hwloc_attribute_unused) +{ + return dontset_return_complete_nodeset(topology, set, policy); +} + +static int dontset_area_membind(hwloc_topology_t topology __hwloc_attribute_unused, const void *addr __hwloc_attribute_unused, size_t size __hwloc_attribute_unused, hwloc_const_bitmap_t set __hwloc_attribute_unused, hwloc_membind_policy_t policy __hwloc_attribute_unused, int flags __hwloc_attribute_unused) +{ + return 0; +} +static int dontget_area_membind(hwloc_topology_t topology __hwloc_attribute_unused, const void *addr __hwloc_attribute_unused, size_t size __hwloc_attribute_unused, hwloc_bitmap_t set, hwloc_membind_policy_t * policy, int flags __hwloc_attribute_unused) +{ + return dontset_return_complete_nodeset(topology, set, policy); +} +static int dontget_area_memlocation(hwloc_topology_t topology __hwloc_attribute_unused, const void *addr __hwloc_attribute_unused, size_t size __hwloc_attribute_unused, hwloc_bitmap_t set, int flags __hwloc_attribute_unused) +{ + hwloc_membind_policy_t policy; + return dontset_return_complete_nodeset(topology, set, &policy); +} + +static void * dontalloc_membind(hwloc_topology_t topology __hwloc_attribute_unused, size_t size __hwloc_attribute_unused, hwloc_const_bitmap_t set __hwloc_attribute_unused, hwloc_membind_policy_t policy __hwloc_attribute_unused, int flags __hwloc_attribute_unused) +{ + return malloc(size); +} +static int dontfree_membind(hwloc_topology_t topology __hwloc_attribute_unused, void *addr __hwloc_attribute_unused, size_t size __hwloc_attribute_unused) +{ + free(addr); + return 0; +} + +static void hwloc_set_dummy_hooks(struct hwloc_binding_hooks *hooks, + struct hwloc_topology_support *support __hwloc_attribute_unused) +{ + hooks->set_thisproc_cpubind = dontset_thisproc_cpubind; + hooks->get_thisproc_cpubind = dontget_thisproc_cpubind; + hooks->set_thisthread_cpubind = dontset_thisthread_cpubind; + hooks->get_thisthread_cpubind = dontget_thisthread_cpubind; + hooks->set_proc_cpubind = dontset_proc_cpubind; + hooks->get_proc_cpubind = dontget_proc_cpubind; +#ifdef hwloc_thread_t + hooks->set_thread_cpubind = dontset_thread_cpubind; + hooks->get_thread_cpubind = dontget_thread_cpubind; +#endif + hooks->get_thisproc_last_cpu_location = dontget_thisproc_cpubind; /* cpubind instead of last_cpu_location is ok */ + hooks->get_thisthread_last_cpu_location = dontget_thisthread_cpubind; /* cpubind instead of last_cpu_location is ok */ + hooks->get_proc_last_cpu_location = dontget_proc_cpubind; /* cpubind instead of last_cpu_location is ok */ + /* TODO: get_thread_last_cpu_location */ + hooks->set_thisproc_membind = dontset_thisproc_membind; + hooks->get_thisproc_membind = dontget_thisproc_membind; + hooks->set_thisthread_membind = dontset_thisthread_membind; + hooks->get_thisthread_membind = dontget_thisthread_membind; + hooks->set_proc_membind = dontset_proc_membind; + hooks->get_proc_membind = dontget_proc_membind; + hooks->set_area_membind = dontset_area_membind; + hooks->get_area_membind = dontget_area_membind; + hooks->get_area_memlocation = dontget_area_memlocation; + hooks->alloc_membind = dontalloc_membind; + hooks->free_membind = dontfree_membind; +} + +void +hwloc_set_native_binding_hooks(struct hwloc_binding_hooks *hooks, struct hwloc_topology_support *support) +{ +# ifdef HWLOC_LINUX_SYS + hwloc_set_linuxfs_hooks(hooks, support); +# endif /* HWLOC_LINUX_SYS */ + +# ifdef HWLOC_BGQ_SYS + hwloc_set_bgq_hooks(hooks, support); +# endif /* HWLOC_BGQ_SYS */ + +# ifdef HWLOC_AIX_SYS + hwloc_set_aix_hooks(hooks, support); +# endif /* HWLOC_AIX_SYS */ + +# ifdef HWLOC_SOLARIS_SYS + hwloc_set_solaris_hooks(hooks, support); +# endif /* HWLOC_SOLARIS_SYS */ + +# ifdef HWLOC_WIN_SYS + hwloc_set_windows_hooks(hooks, support); +# endif /* HWLOC_WIN_SYS */ + +# ifdef HWLOC_DARWIN_SYS + hwloc_set_darwin_hooks(hooks, support); +# endif /* HWLOC_DARWIN_SYS */ + +# ifdef HWLOC_FREEBSD_SYS + hwloc_set_freebsd_hooks(hooks, support); +# endif /* HWLOC_FREEBSD_SYS */ + +# ifdef HWLOC_NETBSD_SYS + hwloc_set_netbsd_hooks(hooks, support); +# endif /* HWLOC_NETBSD_SYS */ + +# ifdef HWLOC_HPUX_SYS + hwloc_set_hpux_hooks(hooks, support); +# endif /* HWLOC_HPUX_SYS */ +} + +/* If the represented system is actually not this system, use dummy binding hooks. */ +void +hwloc_set_binding_hooks(struct hwloc_topology *topology) +{ + if (topology->is_thissystem) { + hwloc_set_native_binding_hooks(&topology->binding_hooks, &topology->support); + /* every hook not set above will return ENOSYS */ + } else { + /* not this system, use dummy binding hooks that do nothing (but don't return ENOSYS) */ + hwloc_set_dummy_hooks(&topology->binding_hooks, &topology->support); + } + + /* if not is_thissystem, set_cpubind is fake + * and get_cpubind returns the whole system cpuset, + * so don't report that set/get_cpubind as supported + */ + if (topology->is_thissystem) { +#define DO(which,kind) \ + if (topology->binding_hooks.kind) \ + topology->support.which##bind->kind = 1; + DO(cpu,set_thisproc_cpubind); + DO(cpu,get_thisproc_cpubind); + DO(cpu,set_proc_cpubind); + DO(cpu,get_proc_cpubind); + DO(cpu,set_thisthread_cpubind); + DO(cpu,get_thisthread_cpubind); +#ifdef hwloc_thread_t + DO(cpu,set_thread_cpubind); + DO(cpu,get_thread_cpubind); +#endif + DO(cpu,get_thisproc_last_cpu_location); + DO(cpu,get_proc_last_cpu_location); + DO(cpu,get_thisthread_last_cpu_location); + DO(mem,set_thisproc_membind); + DO(mem,get_thisproc_membind); + DO(mem,set_thisthread_membind); + DO(mem,get_thisthread_membind); + DO(mem,set_proc_membind); + DO(mem,get_proc_membind); + DO(mem,set_area_membind); + DO(mem,get_area_membind); + DO(mem,get_area_memlocation); + DO(mem,alloc_membind); + } +} diff --git a/src/3rdparty/hwloc/src/bitmap.c b/src/3rdparty/hwloc/src/bitmap.c new file mode 100644 index 00000000..ea1264af --- /dev/null +++ b/src/3rdparty/hwloc/src/bitmap.c @@ -0,0 +1,1676 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2017 Inria. All rights reserved. + * Copyright © 2009-2011 Université Bordeaux + * Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/* + * possible improvements: + * - have a way to change the initial allocation size: + * add hwloc_bitmap_set_foo() to changes a global here, + * and make the hwloc core call based on the early number of PUs + * - make HWLOC_BITMAP_PREALLOC_BITS configurable, and detectable + * by parsing /proc/cpuinfo during configure on Linux. + * - preallocate inside the bitmap structure (so that the whole structure is a cacheline for instance) + * and allocate a dedicated array only later when reallocating larger + * - add a bitmap->ulongs_empty_first which guarantees that some first ulongs are empty, + * making tests much faster for big bitmaps since there's no need to look at first ulongs. + * no need for ulongs_empty_first to be exactly the max number of empty ulongs, + * clearing bits that were set earlier isn't very common. + */ + +/* magic number */ +#define HWLOC_BITMAP_MAGIC 0x20091007 + +/* preallocated bits in every bitmap */ +#define HWLOC_BITMAP_PREALLOC_BITS 512 +#define HWLOC_BITMAP_PREALLOC_ULONGS (HWLOC_BITMAP_PREALLOC_BITS/HWLOC_BITS_PER_LONG) + +/* actual opaque type internals */ +struct hwloc_bitmap_s { + unsigned ulongs_count; /* how many ulong bitmasks are valid, >= 1 */ + unsigned ulongs_allocated; /* how many ulong bitmasks are allocated, >= ulongs_count */ + unsigned long *ulongs; + int infinite; /* set to 1 if all bits beyond ulongs are set */ +#ifdef HWLOC_DEBUG + int magic; +#endif +}; + +/* overzealous check in debug-mode, not as powerful as valgrind but still useful */ +#ifdef HWLOC_DEBUG +#define HWLOC__BITMAP_CHECK(set) do { \ + assert((set)->magic == HWLOC_BITMAP_MAGIC); \ + assert((set)->ulongs_count >= 1); \ + assert((set)->ulongs_allocated >= (set)->ulongs_count); \ +} while (0) +#else +#define HWLOC__BITMAP_CHECK(set) +#endif + +/* extract a subset from a set using an index or a cpu */ +#define HWLOC_SUBBITMAP_INDEX(cpu) ((cpu)/(HWLOC_BITS_PER_LONG)) +#define HWLOC_SUBBITMAP_CPU_ULBIT(cpu) ((cpu)%(HWLOC_BITS_PER_LONG)) +/* Read from a bitmap ulong without knowing whether x is valid. + * Writers should make sure that x is valid and modify set->ulongs[x] directly. + */ +#define HWLOC_SUBBITMAP_READULONG(set,x) ((x) < (set)->ulongs_count ? (set)->ulongs[x] : (set)->infinite ? HWLOC_SUBBITMAP_FULL : HWLOC_SUBBITMAP_ZERO) + +/* predefined subset values */ +#define HWLOC_SUBBITMAP_ZERO 0UL +#define HWLOC_SUBBITMAP_FULL (~0UL) +#define HWLOC_SUBBITMAP_ULBIT(bit) (1UL<<(bit)) +#define HWLOC_SUBBITMAP_CPU(cpu) HWLOC_SUBBITMAP_ULBIT(HWLOC_SUBBITMAP_CPU_ULBIT(cpu)) +#define HWLOC_SUBBITMAP_ULBIT_TO(bit) (HWLOC_SUBBITMAP_FULL>>(HWLOC_BITS_PER_LONG-1-(bit))) +#define HWLOC_SUBBITMAP_ULBIT_FROM(bit) (HWLOC_SUBBITMAP_FULL<<(bit)) +#define HWLOC_SUBBITMAP_ULBIT_FROMTO(begin,end) (HWLOC_SUBBITMAP_ULBIT_TO(end) & HWLOC_SUBBITMAP_ULBIT_FROM(begin)) + +struct hwloc_bitmap_s * hwloc_bitmap_alloc(void) +{ + struct hwloc_bitmap_s * set; + + set = malloc(sizeof(struct hwloc_bitmap_s)); + if (!set) + return NULL; + + set->ulongs_count = 1; + set->ulongs_allocated = HWLOC_BITMAP_PREALLOC_ULONGS; + set->ulongs = malloc(HWLOC_BITMAP_PREALLOC_ULONGS * sizeof(unsigned long)); + if (!set->ulongs) { + free(set); + return NULL; + } + + set->ulongs[0] = HWLOC_SUBBITMAP_ZERO; + set->infinite = 0; +#ifdef HWLOC_DEBUG + set->magic = HWLOC_BITMAP_MAGIC; +#endif + return set; +} + +struct hwloc_bitmap_s * hwloc_bitmap_alloc_full(void) +{ + struct hwloc_bitmap_s * set = hwloc_bitmap_alloc(); + if (set) { + set->infinite = 1; + set->ulongs[0] = HWLOC_SUBBITMAP_FULL; + } + return set; +} + +void hwloc_bitmap_free(struct hwloc_bitmap_s * set) +{ + if (!set) + return; + + HWLOC__BITMAP_CHECK(set); +#ifdef HWLOC_DEBUG + set->magic = 0; +#endif + + free(set->ulongs); + free(set); +} + +/* enlarge until it contains at least needed_count ulongs. + */ +static int +hwloc_bitmap_enlarge_by_ulongs(struct hwloc_bitmap_s * set, unsigned needed_count) __hwloc_attribute_warn_unused_result; +static int +hwloc_bitmap_enlarge_by_ulongs(struct hwloc_bitmap_s * set, unsigned needed_count) +{ + unsigned tmp = 1U << hwloc_flsl((unsigned long) needed_count - 1); + if (tmp > set->ulongs_allocated) { + unsigned long *tmpulongs; + tmpulongs = realloc(set->ulongs, tmp * sizeof(unsigned long)); + if (!tmpulongs) + return -1; + set->ulongs = tmpulongs; + set->ulongs_allocated = tmp; + } + return 0; +} + +/* enlarge until it contains at least needed_count ulongs, + * and update new ulongs according to the infinite field. + */ +static int +hwloc_bitmap_realloc_by_ulongs(struct hwloc_bitmap_s * set, unsigned needed_count) __hwloc_attribute_warn_unused_result; +static int +hwloc_bitmap_realloc_by_ulongs(struct hwloc_bitmap_s * set, unsigned needed_count) +{ + unsigned i; + + HWLOC__BITMAP_CHECK(set); + + if (needed_count <= set->ulongs_count) + return 0; + + /* realloc larger if needed */ + if (hwloc_bitmap_enlarge_by_ulongs(set, needed_count) < 0) + return -1; + + /* fill the newly allocated subset depending on the infinite flag */ + for(i=set->ulongs_count; iulongs[i] = set->infinite ? HWLOC_SUBBITMAP_FULL : HWLOC_SUBBITMAP_ZERO; + set->ulongs_count = needed_count; + return 0; +} + +/* realloc until it contains at least cpu+1 bits */ +#define hwloc_bitmap_realloc_by_cpu_index(set, cpu) hwloc_bitmap_realloc_by_ulongs(set, ((cpu)/HWLOC_BITS_PER_LONG)+1) + +/* reset a bitmap to exactely the needed size. + * the caller must reinitialize all ulongs and the infinite flag later. + */ +static int +hwloc_bitmap_reset_by_ulongs(struct hwloc_bitmap_s * set, unsigned needed_count) __hwloc_attribute_warn_unused_result; +static int +hwloc_bitmap_reset_by_ulongs(struct hwloc_bitmap_s * set, unsigned needed_count) +{ + if (hwloc_bitmap_enlarge_by_ulongs(set, needed_count)) + return -1; + set->ulongs_count = needed_count; + return 0; +} + +/* reset until it contains exactly cpu+1 bits (roundup to a ulong). + * the caller must reinitialize all ulongs and the infinite flag later. + */ +#define hwloc_bitmap_reset_by_cpu_index(set, cpu) hwloc_bitmap_reset_by_ulongs(set, ((cpu)/HWLOC_BITS_PER_LONG)+1) + +struct hwloc_bitmap_s * hwloc_bitmap_tma_dup(struct hwloc_tma *tma, const struct hwloc_bitmap_s * old) +{ + struct hwloc_bitmap_s * new; + + if (!old) + return NULL; + + HWLOC__BITMAP_CHECK(old); + + new = hwloc_tma_malloc(tma, sizeof(struct hwloc_bitmap_s)); + if (!new) + return NULL; + + new->ulongs = hwloc_tma_malloc(tma, old->ulongs_allocated * sizeof(unsigned long)); + if (!new->ulongs) { + free(new); + return NULL; + } + new->ulongs_allocated = old->ulongs_allocated; + new->ulongs_count = old->ulongs_count; + memcpy(new->ulongs, old->ulongs, new->ulongs_count * sizeof(unsigned long)); + new->infinite = old->infinite; +#ifdef HWLOC_DEBUG + new->magic = HWLOC_BITMAP_MAGIC; +#endif + return new; +} + +struct hwloc_bitmap_s * hwloc_bitmap_dup(const struct hwloc_bitmap_s * old) +{ + return hwloc_bitmap_tma_dup(NULL, old); +} + +int hwloc_bitmap_copy(struct hwloc_bitmap_s * dst, const struct hwloc_bitmap_s * src) +{ + HWLOC__BITMAP_CHECK(dst); + HWLOC__BITMAP_CHECK(src); + + if (hwloc_bitmap_reset_by_ulongs(dst, src->ulongs_count) < 0) + return -1; + + memcpy(dst->ulongs, src->ulongs, src->ulongs_count * sizeof(unsigned long)); + dst->infinite = src->infinite; + return 0; +} + +/* Strings always use 32bit groups */ +#define HWLOC_PRIxSUBBITMAP "%08lx" +#define HWLOC_BITMAP_SUBSTRING_SIZE 32 +#define HWLOC_BITMAP_SUBSTRING_LENGTH (HWLOC_BITMAP_SUBSTRING_SIZE/4) +#define HWLOC_BITMAP_STRING_PER_LONG (HWLOC_BITS_PER_LONG/HWLOC_BITMAP_SUBSTRING_SIZE) + +int hwloc_bitmap_snprintf(char * __hwloc_restrict buf, size_t buflen, const struct hwloc_bitmap_s * __hwloc_restrict set) +{ + ssize_t size = buflen; + char *tmp = buf; + int res, ret = 0; + int needcomma = 0; + int i; + unsigned long accum = 0; + int accumed = 0; +#if HWLOC_BITS_PER_LONG == HWLOC_BITMAP_SUBSTRING_SIZE + const unsigned long accum_mask = ~0UL; +#else /* HWLOC_BITS_PER_LONG != HWLOC_BITMAP_SUBSTRING_SIZE */ + const unsigned long accum_mask = ((1UL << HWLOC_BITMAP_SUBSTRING_SIZE) - 1) << (HWLOC_BITS_PER_LONG - HWLOC_BITMAP_SUBSTRING_SIZE); +#endif /* HWLOC_BITS_PER_LONG != HWLOC_BITMAP_SUBSTRING_SIZE */ + + HWLOC__BITMAP_CHECK(set); + + /* mark the end in case we do nothing later */ + if (buflen > 0) + tmp[0] = '\0'; + + if (set->infinite) { + res = hwloc_snprintf(tmp, size, "0xf...f"); + needcomma = 1; + if (res < 0) + return -1; + ret += res; + if (res >= size) + res = size>0 ? (int)size - 1 : 0; + tmp += res; + size -= res; + } + + i=(int) set->ulongs_count-1; + + if (set->infinite) { + /* ignore starting FULL since we have 0xf...f already */ + while (i>=0 && set->ulongs[i] == HWLOC_SUBBITMAP_FULL) + i--; + } else { + /* ignore starting ZERO except the last one */ + while (i>=0 && set->ulongs[i] == HWLOC_SUBBITMAP_ZERO) + i--; + } + + while (i>=0 || accumed) { + /* Refill accumulator */ + if (!accumed) { + accum = set->ulongs[i--]; + accumed = HWLOC_BITS_PER_LONG; + } + + if (accum & accum_mask) { + /* print the whole subset if not empty */ + res = hwloc_snprintf(tmp, size, needcomma ? ",0x" HWLOC_PRIxSUBBITMAP : "0x" HWLOC_PRIxSUBBITMAP, + (accum & accum_mask) >> (HWLOC_BITS_PER_LONG - HWLOC_BITMAP_SUBSTRING_SIZE)); + needcomma = 1; + } else if (i == -1 && accumed == HWLOC_BITMAP_SUBSTRING_SIZE) { + /* print a single 0 to mark the last subset */ + res = hwloc_snprintf(tmp, size, needcomma ? ",0x0" : "0x0"); + } else if (needcomma) { + res = hwloc_snprintf(tmp, size, ","); + } else { + res = 0; + } + if (res < 0) + return -1; + ret += res; + +#if HWLOC_BITS_PER_LONG == HWLOC_BITMAP_SUBSTRING_SIZE + accum = 0; + accumed = 0; +#else + accum <<= HWLOC_BITMAP_SUBSTRING_SIZE; + accumed -= HWLOC_BITMAP_SUBSTRING_SIZE; +#endif + + if (res >= size) + res = size>0 ? (int)size - 1 : 0; + + tmp += res; + size -= res; + } + + /* if didn't display anything, display 0x0 */ + if (!ret) { + res = hwloc_snprintf(tmp, size, "0x0"); + if (res < 0) + return -1; + ret += res; + } + + return ret; +} + +int hwloc_bitmap_asprintf(char ** strp, const struct hwloc_bitmap_s * __hwloc_restrict set) +{ + int len; + char *buf; + + HWLOC__BITMAP_CHECK(set); + + len = hwloc_bitmap_snprintf(NULL, 0, set); + buf = malloc(len+1); + if (!buf) + return -1; + *strp = buf; + return hwloc_bitmap_snprintf(buf, len+1, set); +} + +int hwloc_bitmap_sscanf(struct hwloc_bitmap_s *set, const char * __hwloc_restrict string) +{ + const char * current = string; + unsigned long accum = 0; + int count=0; + int infinite = 0; + + /* count how many substrings there are */ + count++; + while ((current = strchr(current+1, ',')) != NULL) + count++; + + current = string; + if (!strncmp("0xf...f", current, 7)) { + current += 7; + if (*current != ',') { + /* special case for infinite/full bitmap */ + hwloc_bitmap_fill(set); + return 0; + } + current++; + infinite = 1; + count--; + } + + if (hwloc_bitmap_reset_by_ulongs(set, (count + HWLOC_BITMAP_STRING_PER_LONG - 1) / HWLOC_BITMAP_STRING_PER_LONG) < 0) + return -1; + set->infinite = 0; + + while (*current != '\0') { + unsigned long val; + char *next; + val = strtoul(current, &next, 16); + + assert(count > 0); + count--; + + accum |= (val << ((count * HWLOC_BITMAP_SUBSTRING_SIZE) % HWLOC_BITS_PER_LONG)); + if (!(count % HWLOC_BITMAP_STRING_PER_LONG)) { + set->ulongs[count / HWLOC_BITMAP_STRING_PER_LONG] = accum; + accum = 0; + } + + if (*next != ',') { + if (*next || count > 0) + goto failed; + else + break; + } + current = (const char*) next+1; + } + + set->infinite = infinite; /* set at the end, to avoid spurious realloc with filled new ulongs */ + + return 0; + + failed: + /* failure to parse */ + hwloc_bitmap_zero(set); + return -1; +} + +int hwloc_bitmap_list_snprintf(char * __hwloc_restrict buf, size_t buflen, const struct hwloc_bitmap_s * __hwloc_restrict set) +{ + int prev = -1; + ssize_t size = buflen; + char *tmp = buf; + int res, ret = 0; + int needcomma = 0; + + HWLOC__BITMAP_CHECK(set); + + /* mark the end in case we do nothing later */ + if (buflen > 0) + tmp[0] = '\0'; + + while (1) { + int begin, end; + + begin = hwloc_bitmap_next(set, prev); + if (begin == -1) + break; + end = hwloc_bitmap_next_unset(set, begin); + + if (end == begin+1) { + res = hwloc_snprintf(tmp, size, needcomma ? ",%d" : "%d", begin); + } else if (end == -1) { + res = hwloc_snprintf(tmp, size, needcomma ? ",%d-" : "%d-", begin); + } else { + res = hwloc_snprintf(tmp, size, needcomma ? ",%d-%d" : "%d-%d", begin, end-1); + } + if (res < 0) + return -1; + ret += res; + + if (res >= size) + res = size>0 ? (int)size - 1 : 0; + + tmp += res; + size -= res; + needcomma = 1; + + if (end == -1) + break; + else + prev = end - 1; + } + + return ret; +} + +int hwloc_bitmap_list_asprintf(char ** strp, const struct hwloc_bitmap_s * __hwloc_restrict set) +{ + int len; + char *buf; + + HWLOC__BITMAP_CHECK(set); + + len = hwloc_bitmap_list_snprintf(NULL, 0, set); + buf = malloc(len+1); + if (!buf) + return -1; + *strp = buf; + return hwloc_bitmap_list_snprintf(buf, len+1, set); +} + +int hwloc_bitmap_list_sscanf(struct hwloc_bitmap_s *set, const char * __hwloc_restrict string) +{ + const char * current = string; + char *next; + long begin = -1, val; + + hwloc_bitmap_zero(set); + + while (*current != '\0') { + + /* ignore empty ranges */ + while (*current == ',' || *current == ' ') + current++; + + val = strtoul(current, &next, 0); + /* make sure we got at least one digit */ + if (next == current) + goto failed; + + if (begin != -1) { + /* finishing a range */ + hwloc_bitmap_set_range(set, begin, val); + begin = -1; + + } else if (*next == '-') { + /* starting a new range */ + if (*(next+1) == '\0') { + /* infinite range */ + hwloc_bitmap_set_range(set, val, -1); + break; + } else { + /* normal range */ + begin = val; + } + + } else if (*next == ',' || *next == ' ' || *next == '\0') { + /* single digit */ + hwloc_bitmap_set(set, val); + } + + if (*next == '\0') + break; + current = next+1; + } + + return 0; + + failed: + /* failure to parse */ + hwloc_bitmap_zero(set); + return -1; +} + +int hwloc_bitmap_taskset_snprintf(char * __hwloc_restrict buf, size_t buflen, const struct hwloc_bitmap_s * __hwloc_restrict set) +{ + ssize_t size = buflen; + char *tmp = buf; + int res, ret = 0; + int started = 0; + int i; + + HWLOC__BITMAP_CHECK(set); + + /* mark the end in case we do nothing later */ + if (buflen > 0) + tmp[0] = '\0'; + + if (set->infinite) { + res = hwloc_snprintf(tmp, size, "0xf...f"); + started = 1; + if (res < 0) + return -1; + ret += res; + if (res >= size) + res = size>0 ? (int)size - 1 : 0; + tmp += res; + size -= res; + } + + i=set->ulongs_count-1; + + if (set->infinite) { + /* ignore starting FULL since we have 0xf...f already */ + while (i>=0 && set->ulongs[i] == HWLOC_SUBBITMAP_FULL) + i--; + } else { + /* ignore starting ZERO except the last one */ + while (i>=1 && set->ulongs[i] == HWLOC_SUBBITMAP_ZERO) + i--; + } + + while (i>=0) { + unsigned long val = set->ulongs[i--]; + if (started) { + /* print the whole subset */ +#if HWLOC_BITS_PER_LONG == 64 + res = hwloc_snprintf(tmp, size, "%016lx", val); +#else + res = hwloc_snprintf(tmp, size, "%08lx", val); +#endif + } else if (val || i == -1) { + res = hwloc_snprintf(tmp, size, "0x%lx", val); + started = 1; + } else { + res = 0; + } + if (res < 0) + return -1; + ret += res; + if (res >= size) + res = size>0 ? (int)size - 1 : 0; + tmp += res; + size -= res; + } + + /* if didn't display anything, display 0x0 */ + if (!ret) { + res = hwloc_snprintf(tmp, size, "0x0"); + if (res < 0) + return -1; + ret += res; + } + + return ret; +} + +int hwloc_bitmap_taskset_asprintf(char ** strp, const struct hwloc_bitmap_s * __hwloc_restrict set) +{ + int len; + char *buf; + + HWLOC__BITMAP_CHECK(set); + + len = hwloc_bitmap_taskset_snprintf(NULL, 0, set); + buf = malloc(len+1); + if (!buf) + return -1; + *strp = buf; + return hwloc_bitmap_taskset_snprintf(buf, len+1, set); +} + +int hwloc_bitmap_taskset_sscanf(struct hwloc_bitmap_s *set, const char * __hwloc_restrict string) +{ + const char * current = string; + int chars; + int count; + int infinite = 0; + + if (!strncmp("0xf...f", current, 7)) { + /* infinite bitmap */ + infinite = 1; + current += 7; + if (*current == '\0') { + /* special case for infinite/full bitmap */ + hwloc_bitmap_fill(set); + return 0; + } + } else { + /* finite bitmap */ + if (!strncmp("0x", current, 2)) + current += 2; + if (*current == '\0') { + /* special case for empty bitmap */ + hwloc_bitmap_zero(set); + return 0; + } + } + /* we know there are other characters now */ + + chars = (int)strlen(current); + count = (chars * 4 + HWLOC_BITS_PER_LONG - 1) / HWLOC_BITS_PER_LONG; + + if (hwloc_bitmap_reset_by_ulongs(set, count) < 0) + return -1; + set->infinite = 0; + + while (*current != '\0') { + int tmpchars; + char ustr[17]; + unsigned long val; + char *next; + + tmpchars = chars % (HWLOC_BITS_PER_LONG/4); + if (!tmpchars) + tmpchars = (HWLOC_BITS_PER_LONG/4); + + memcpy(ustr, current, tmpchars); + ustr[tmpchars] = '\0'; + val = strtoul(ustr, &next, 16); + if (*next != '\0') + goto failed; + + set->ulongs[count-1] = val; + + current += tmpchars; + chars -= tmpchars; + count--; + } + + set->infinite = infinite; /* set at the end, to avoid spurious realloc with filled new ulongs */ + + return 0; + + failed: + /* failure to parse */ + hwloc_bitmap_zero(set); + return -1; +} + +static void hwloc_bitmap__zero(struct hwloc_bitmap_s *set) +{ + unsigned i; + for(i=0; iulongs_count; i++) + set->ulongs[i] = HWLOC_SUBBITMAP_ZERO; + set->infinite = 0; +} + +void hwloc_bitmap_zero(struct hwloc_bitmap_s * set) +{ + HWLOC__BITMAP_CHECK(set); + + HWLOC_BUILD_ASSERT(HWLOC_BITMAP_PREALLOC_ULONGS >= 1); + if (hwloc_bitmap_reset_by_ulongs(set, 1) < 0) { + /* cannot fail since we preallocate some ulongs. + * if we ever preallocate nothing, we'll reset to 0 ulongs. + */ + } + hwloc_bitmap__zero(set); +} + +static void hwloc_bitmap__fill(struct hwloc_bitmap_s * set) +{ + unsigned i; + for(i=0; iulongs_count; i++) + set->ulongs[i] = HWLOC_SUBBITMAP_FULL; + set->infinite = 1; +} + +void hwloc_bitmap_fill(struct hwloc_bitmap_s * set) +{ + HWLOC__BITMAP_CHECK(set); + + HWLOC_BUILD_ASSERT(HWLOC_BITMAP_PREALLOC_ULONGS >= 1); + if (hwloc_bitmap_reset_by_ulongs(set, 1) < 0) { + /* cannot fail since we pre-allocate some ulongs. + * if we ever pre-allocate nothing, we'll reset to 0 ulongs. + */ + } + hwloc_bitmap__fill(set); +} + +int hwloc_bitmap_from_ulong(struct hwloc_bitmap_s *set, unsigned long mask) +{ + HWLOC__BITMAP_CHECK(set); + + HWLOC_BUILD_ASSERT(HWLOC_BITMAP_PREALLOC_ULONGS >= 1); + if (hwloc_bitmap_reset_by_ulongs(set, 1) < 0) { + /* cannot fail since we pre-allocate some ulongs. + * if ever pre-allocate nothing, we may have to return a failure. + */ + } + set->ulongs[0] = mask; /* there's always at least one ulong allocated */ + set->infinite = 0; + return 0; +} + +int hwloc_bitmap_from_ith_ulong(struct hwloc_bitmap_s *set, unsigned i, unsigned long mask) +{ + unsigned j; + + HWLOC__BITMAP_CHECK(set); + + if (hwloc_bitmap_reset_by_ulongs(set, i+1) < 0) + return -1; + + set->ulongs[i] = mask; + for(j=0; julongs[j] = HWLOC_SUBBITMAP_ZERO; + set->infinite = 0; + return 0; +} + +unsigned long hwloc_bitmap_to_ulong(const struct hwloc_bitmap_s *set) +{ + HWLOC__BITMAP_CHECK(set); + + return set->ulongs[0]; /* there's always at least one ulong allocated */ +} + +unsigned long hwloc_bitmap_to_ith_ulong(const struct hwloc_bitmap_s *set, unsigned i) +{ + HWLOC__BITMAP_CHECK(set); + + return HWLOC_SUBBITMAP_READULONG(set, i); +} + +int hwloc_bitmap_only(struct hwloc_bitmap_s * set, unsigned cpu) +{ + unsigned index_ = HWLOC_SUBBITMAP_INDEX(cpu); + + HWLOC__BITMAP_CHECK(set); + + if (hwloc_bitmap_reset_by_cpu_index(set, cpu) < 0) + return -1; + + hwloc_bitmap__zero(set); + set->ulongs[index_] |= HWLOC_SUBBITMAP_CPU(cpu); + return 0; +} + +int hwloc_bitmap_allbut(struct hwloc_bitmap_s * set, unsigned cpu) +{ + unsigned index_ = HWLOC_SUBBITMAP_INDEX(cpu); + + HWLOC__BITMAP_CHECK(set); + + if (hwloc_bitmap_reset_by_cpu_index(set, cpu) < 0) + return -1; + + hwloc_bitmap__fill(set); + set->ulongs[index_] &= ~HWLOC_SUBBITMAP_CPU(cpu); + return 0; +} + +int hwloc_bitmap_set(struct hwloc_bitmap_s * set, unsigned cpu) +{ + unsigned index_ = HWLOC_SUBBITMAP_INDEX(cpu); + + HWLOC__BITMAP_CHECK(set); + + /* nothing to do if setting inside the infinite part of the bitmap */ + if (set->infinite && cpu >= set->ulongs_count * HWLOC_BITS_PER_LONG) + return 0; + + if (hwloc_bitmap_realloc_by_cpu_index(set, cpu) < 0) + return -1; + + set->ulongs[index_] |= HWLOC_SUBBITMAP_CPU(cpu); + return 0; +} + +int hwloc_bitmap_set_range(struct hwloc_bitmap_s * set, unsigned begincpu, int _endcpu) +{ + unsigned i; + unsigned beginset,endset; + unsigned endcpu = (unsigned) _endcpu; + + HWLOC__BITMAP_CHECK(set); + + if (endcpu < begincpu) + return 0; + if (set->infinite && begincpu >= set->ulongs_count * HWLOC_BITS_PER_LONG) + /* setting only in the already-set infinite part, nothing to do */ + return 0; + + if (_endcpu == -1) { + /* infinite range */ + + /* make sure we can play with the ulong that contains begincpu */ + if (hwloc_bitmap_realloc_by_cpu_index(set, begincpu) < 0) + return -1; + + /* update the ulong that contains begincpu */ + beginset = HWLOC_SUBBITMAP_INDEX(begincpu); + set->ulongs[beginset] |= HWLOC_SUBBITMAP_ULBIT_FROM(HWLOC_SUBBITMAP_CPU_ULBIT(begincpu)); + /* set ulongs after begincpu if any already allocated */ + for(i=beginset+1; iulongs_count; i++) + set->ulongs[i] = HWLOC_SUBBITMAP_FULL; + /* mark the infinity as set */ + set->infinite = 1; + } else { + /* finite range */ + + /* ignore the part of the range that overlaps with the already-set infinite part */ + if (set->infinite && endcpu >= set->ulongs_count * HWLOC_BITS_PER_LONG) + endcpu = set->ulongs_count * HWLOC_BITS_PER_LONG - 1; + /* make sure we can play with the ulongs that contain begincpu and endcpu */ + if (hwloc_bitmap_realloc_by_cpu_index(set, endcpu) < 0) + return -1; + + /* update first and last ulongs */ + beginset = HWLOC_SUBBITMAP_INDEX(begincpu); + endset = HWLOC_SUBBITMAP_INDEX(endcpu); + if (beginset == endset) { + set->ulongs[beginset] |= HWLOC_SUBBITMAP_ULBIT_FROMTO(HWLOC_SUBBITMAP_CPU_ULBIT(begincpu), HWLOC_SUBBITMAP_CPU_ULBIT(endcpu)); + } else { + set->ulongs[beginset] |= HWLOC_SUBBITMAP_ULBIT_FROM(HWLOC_SUBBITMAP_CPU_ULBIT(begincpu)); + set->ulongs[endset] |= HWLOC_SUBBITMAP_ULBIT_TO(HWLOC_SUBBITMAP_CPU_ULBIT(endcpu)); + } + /* set ulongs in the middle of the range */ + for(i=beginset+1; iulongs[i] = HWLOC_SUBBITMAP_FULL; + } + + return 0; +} + +int hwloc_bitmap_set_ith_ulong(struct hwloc_bitmap_s *set, unsigned i, unsigned long mask) +{ + HWLOC__BITMAP_CHECK(set); + + if (hwloc_bitmap_realloc_by_ulongs(set, i+1) < 0) + return -1; + + set->ulongs[i] = mask; + return 0; +} + +int hwloc_bitmap_clr(struct hwloc_bitmap_s * set, unsigned cpu) +{ + unsigned index_ = HWLOC_SUBBITMAP_INDEX(cpu); + + HWLOC__BITMAP_CHECK(set); + + /* nothing to do if clearing inside the infinitely-unset part of the bitmap */ + if (!set->infinite && cpu >= set->ulongs_count * HWLOC_BITS_PER_LONG) + return 0; + + if (hwloc_bitmap_realloc_by_cpu_index(set, cpu) < 0) + return -1; + + set->ulongs[index_] &= ~HWLOC_SUBBITMAP_CPU(cpu); + return 0; +} + +int hwloc_bitmap_clr_range(struct hwloc_bitmap_s * set, unsigned begincpu, int _endcpu) +{ + unsigned i; + unsigned beginset,endset; + unsigned endcpu = (unsigned) _endcpu; + + HWLOC__BITMAP_CHECK(set); + + if (endcpu < begincpu) + return 0; + + if (!set->infinite && begincpu >= set->ulongs_count * HWLOC_BITS_PER_LONG) + /* clearing only in the already-unset infinite part, nothing to do */ + return 0; + + if (_endcpu == -1) { + /* infinite range */ + + /* make sure we can play with the ulong that contains begincpu */ + if (hwloc_bitmap_realloc_by_cpu_index(set, begincpu) < 0) + return -1; + + /* update the ulong that contains begincpu */ + beginset = HWLOC_SUBBITMAP_INDEX(begincpu); + set->ulongs[beginset] &= ~HWLOC_SUBBITMAP_ULBIT_FROM(HWLOC_SUBBITMAP_CPU_ULBIT(begincpu)); + /* clear ulong after begincpu if any already allocated */ + for(i=beginset+1; iulongs_count; i++) + set->ulongs[i] = HWLOC_SUBBITMAP_ZERO; + /* mark the infinity as unset */ + set->infinite = 0; + } else { + /* finite range */ + + /* ignore the part of the range that overlaps with the already-unset infinite part */ + if (!set->infinite && endcpu >= set->ulongs_count * HWLOC_BITS_PER_LONG) + endcpu = set->ulongs_count * HWLOC_BITS_PER_LONG - 1; + /* make sure we can play with the ulongs that contain begincpu and endcpu */ + if (hwloc_bitmap_realloc_by_cpu_index(set, endcpu) < 0) + return -1; + + /* update first and last ulongs */ + beginset = HWLOC_SUBBITMAP_INDEX(begincpu); + endset = HWLOC_SUBBITMAP_INDEX(endcpu); + if (beginset == endset) { + set->ulongs[beginset] &= ~HWLOC_SUBBITMAP_ULBIT_FROMTO(HWLOC_SUBBITMAP_CPU_ULBIT(begincpu), HWLOC_SUBBITMAP_CPU_ULBIT(endcpu)); + } else { + set->ulongs[beginset] &= ~HWLOC_SUBBITMAP_ULBIT_FROM(HWLOC_SUBBITMAP_CPU_ULBIT(begincpu)); + set->ulongs[endset] &= ~HWLOC_SUBBITMAP_ULBIT_TO(HWLOC_SUBBITMAP_CPU_ULBIT(endcpu)); + } + /* clear ulongs in the middle of the range */ + for(i=beginset+1; iulongs[i] = HWLOC_SUBBITMAP_ZERO; + } + + return 0; +} + +int hwloc_bitmap_isset(const struct hwloc_bitmap_s * set, unsigned cpu) +{ + unsigned index_ = HWLOC_SUBBITMAP_INDEX(cpu); + + HWLOC__BITMAP_CHECK(set); + + return (HWLOC_SUBBITMAP_READULONG(set, index_) & HWLOC_SUBBITMAP_CPU(cpu)) != 0; +} + +int hwloc_bitmap_iszero(const struct hwloc_bitmap_s *set) +{ + unsigned i; + + HWLOC__BITMAP_CHECK(set); + + if (set->infinite) + return 0; + for(i=0; iulongs_count; i++) + if (set->ulongs[i] != HWLOC_SUBBITMAP_ZERO) + return 0; + return 1; +} + +int hwloc_bitmap_isfull(const struct hwloc_bitmap_s *set) +{ + unsigned i; + + HWLOC__BITMAP_CHECK(set); + + if (!set->infinite) + return 0; + for(i=0; iulongs_count; i++) + if (set->ulongs[i] != HWLOC_SUBBITMAP_FULL) + return 0; + return 1; +} + +int hwloc_bitmap_isequal (const struct hwloc_bitmap_s *set1, const struct hwloc_bitmap_s *set2) +{ + unsigned count1 = set1->ulongs_count; + unsigned count2 = set2->ulongs_count; + unsigned min_count = count1 < count2 ? count1 : count2; + unsigned i; + + HWLOC__BITMAP_CHECK(set1); + HWLOC__BITMAP_CHECK(set2); + + for(i=0; iulongs[i] != set2->ulongs[i]) + return 0; + + if (count1 != count2) { + unsigned long w1 = set1->infinite ? HWLOC_SUBBITMAP_FULL : HWLOC_SUBBITMAP_ZERO; + unsigned long w2 = set2->infinite ? HWLOC_SUBBITMAP_FULL : HWLOC_SUBBITMAP_ZERO; + for(i=min_count; iulongs[i] != w2) + return 0; + } + for(i=min_count; iulongs[i] != w1) + return 0; + } + } + + if (set1->infinite != set2->infinite) + return 0; + + return 1; +} + +int hwloc_bitmap_intersects (const struct hwloc_bitmap_s *set1, const struct hwloc_bitmap_s *set2) +{ + unsigned count1 = set1->ulongs_count; + unsigned count2 = set2->ulongs_count; + unsigned min_count = count1 < count2 ? count1 : count2; + unsigned i; + + HWLOC__BITMAP_CHECK(set1); + HWLOC__BITMAP_CHECK(set2); + + for(i=0; iulongs[i] & set2->ulongs[i]) + return 1; + + if (count1 != count2) { + if (set2->infinite) { + for(i=min_count; iulongs_count; i++) + if (set1->ulongs[i]) + return 1; + } + if (set1->infinite) { + for(i=min_count; iulongs_count; i++) + if (set2->ulongs[i]) + return 1; + } + } + + if (set1->infinite && set2->infinite) + return 1; + + return 0; +} + +int hwloc_bitmap_isincluded (const struct hwloc_bitmap_s *sub_set, const struct hwloc_bitmap_s *super_set) +{ + unsigned super_count = super_set->ulongs_count; + unsigned sub_count = sub_set->ulongs_count; + unsigned min_count = super_count < sub_count ? super_count : sub_count; + unsigned i; + + HWLOC__BITMAP_CHECK(sub_set); + HWLOC__BITMAP_CHECK(super_set); + + for(i=0; iulongs[i] != (super_set->ulongs[i] | sub_set->ulongs[i])) + return 0; + + if (super_count != sub_count) { + if (!super_set->infinite) + for(i=min_count; iulongs[i]) + return 0; + if (sub_set->infinite) + for(i=min_count; iulongs[i] != HWLOC_SUBBITMAP_FULL) + return 0; + } + + if (sub_set->infinite && !super_set->infinite) + return 0; + + return 1; +} + +int hwloc_bitmap_or (struct hwloc_bitmap_s *res, const struct hwloc_bitmap_s *set1, const struct hwloc_bitmap_s *set2) +{ + /* cache counts so that we can reset res even if it's also set1 or set2 */ + unsigned count1 = set1->ulongs_count; + unsigned count2 = set2->ulongs_count; + unsigned max_count = count1 > count2 ? count1 : count2; + unsigned min_count = count1 + count2 - max_count; + unsigned i; + + HWLOC__BITMAP_CHECK(res); + HWLOC__BITMAP_CHECK(set1); + HWLOC__BITMAP_CHECK(set2); + + if (hwloc_bitmap_reset_by_ulongs(res, max_count) < 0) + return -1; + + for(i=0; iulongs[i] = set1->ulongs[i] | set2->ulongs[i]; + + if (count1 != count2) { + if (min_count < count1) { + if (set2->infinite) { + res->ulongs_count = min_count; + } else { + for(i=min_count; iulongs[i] = set1->ulongs[i]; + } + } else { + if (set1->infinite) { + res->ulongs_count = min_count; + } else { + for(i=min_count; iulongs[i] = set2->ulongs[i]; + } + } + } + + res->infinite = set1->infinite || set2->infinite; + return 0; +} + +int hwloc_bitmap_and (struct hwloc_bitmap_s *res, const struct hwloc_bitmap_s *set1, const struct hwloc_bitmap_s *set2) +{ + /* cache counts so that we can reset res even if it's also set1 or set2 */ + unsigned count1 = set1->ulongs_count; + unsigned count2 = set2->ulongs_count; + unsigned max_count = count1 > count2 ? count1 : count2; + unsigned min_count = count1 + count2 - max_count; + unsigned i; + + HWLOC__BITMAP_CHECK(res); + HWLOC__BITMAP_CHECK(set1); + HWLOC__BITMAP_CHECK(set2); + + if (hwloc_bitmap_reset_by_ulongs(res, max_count) < 0) + return -1; + + for(i=0; iulongs[i] = set1->ulongs[i] & set2->ulongs[i]; + + if (count1 != count2) { + if (min_count < count1) { + if (set2->infinite) { + for(i=min_count; iulongs[i] = set1->ulongs[i]; + } else { + res->ulongs_count = min_count; + } + } else { + if (set1->infinite) { + for(i=min_count; iulongs[i] = set2->ulongs[i]; + } else { + res->ulongs_count = min_count; + } + } + } + + res->infinite = set1->infinite && set2->infinite; + return 0; +} + +int hwloc_bitmap_andnot (struct hwloc_bitmap_s *res, const struct hwloc_bitmap_s *set1, const struct hwloc_bitmap_s *set2) +{ + /* cache counts so that we can reset res even if it's also set1 or set2 */ + unsigned count1 = set1->ulongs_count; + unsigned count2 = set2->ulongs_count; + unsigned max_count = count1 > count2 ? count1 : count2; + unsigned min_count = count1 + count2 - max_count; + unsigned i; + + HWLOC__BITMAP_CHECK(res); + HWLOC__BITMAP_CHECK(set1); + HWLOC__BITMAP_CHECK(set2); + + if (hwloc_bitmap_reset_by_ulongs(res, max_count) < 0) + return -1; + + for(i=0; iulongs[i] = set1->ulongs[i] & ~set2->ulongs[i]; + + if (count1 != count2) { + if (min_count < count1) { + if (!set2->infinite) { + for(i=min_count; iulongs[i] = set1->ulongs[i]; + } else { + res->ulongs_count = min_count; + } + } else { + if (set1->infinite) { + for(i=min_count; iulongs[i] = ~set2->ulongs[i]; + } else { + res->ulongs_count = min_count; + } + } + } + + res->infinite = set1->infinite && !set2->infinite; + return 0; +} + +int hwloc_bitmap_xor (struct hwloc_bitmap_s *res, const struct hwloc_bitmap_s *set1, const struct hwloc_bitmap_s *set2) +{ + /* cache counts so that we can reset res even if it's also set1 or set2 */ + unsigned count1 = set1->ulongs_count; + unsigned count2 = set2->ulongs_count; + unsigned max_count = count1 > count2 ? count1 : count2; + unsigned min_count = count1 + count2 - max_count; + unsigned i; + + HWLOC__BITMAP_CHECK(res); + HWLOC__BITMAP_CHECK(set1); + HWLOC__BITMAP_CHECK(set2); + + if (hwloc_bitmap_reset_by_ulongs(res, max_count) < 0) + return -1; + + for(i=0; iulongs[i] = set1->ulongs[i] ^ set2->ulongs[i]; + + if (count1 != count2) { + if (min_count < count1) { + unsigned long w2 = set2->infinite ? HWLOC_SUBBITMAP_FULL : HWLOC_SUBBITMAP_ZERO; + for(i=min_count; iulongs[i] = set1->ulongs[i] ^ w2; + } else { + unsigned long w1 = set1->infinite ? HWLOC_SUBBITMAP_FULL : HWLOC_SUBBITMAP_ZERO; + for(i=min_count; iulongs[i] = set2->ulongs[i] ^ w1; + } + } + + res->infinite = (!set1->infinite) != (!set2->infinite); + return 0; +} + +int hwloc_bitmap_not (struct hwloc_bitmap_s *res, const struct hwloc_bitmap_s *set) +{ + unsigned count = set->ulongs_count; + unsigned i; + + HWLOC__BITMAP_CHECK(res); + HWLOC__BITMAP_CHECK(set); + + if (hwloc_bitmap_reset_by_ulongs(res, count) < 0) + return -1; + + for(i=0; iulongs[i] = ~set->ulongs[i]; + + res->infinite = !set->infinite; + return 0; +} + +int hwloc_bitmap_first(const struct hwloc_bitmap_s * set) +{ + unsigned i; + + HWLOC__BITMAP_CHECK(set); + + for(i=0; iulongs_count; i++) { + /* subsets are unsigned longs, use ffsl */ + unsigned long w = set->ulongs[i]; + if (w) + return hwloc_ffsl(w) - 1 + HWLOC_BITS_PER_LONG*i; + } + + if (set->infinite) + return set->ulongs_count * HWLOC_BITS_PER_LONG; + + return -1; +} + +int hwloc_bitmap_first_unset(const struct hwloc_bitmap_s * set) +{ + unsigned i; + + HWLOC__BITMAP_CHECK(set); + + for(i=0; iulongs_count; i++) { + /* subsets are unsigned longs, use ffsl */ + unsigned long w = ~set->ulongs[i]; + if (w) + return hwloc_ffsl(w) - 1 + HWLOC_BITS_PER_LONG*i; + } + + if (!set->infinite) + return set->ulongs_count * HWLOC_BITS_PER_LONG; + + return -1; +} + +int hwloc_bitmap_last(const struct hwloc_bitmap_s * set) +{ + int i; + + HWLOC__BITMAP_CHECK(set); + + if (set->infinite) + return -1; + + for(i=(int)set->ulongs_count-1; i>=0; i--) { + /* subsets are unsigned longs, use flsl */ + unsigned long w = set->ulongs[i]; + if (w) + return hwloc_flsl(w) - 1 + HWLOC_BITS_PER_LONG*i; + } + + return -1; +} + +int hwloc_bitmap_last_unset(const struct hwloc_bitmap_s * set) +{ + int i; + + HWLOC__BITMAP_CHECK(set); + + if (!set->infinite) + return -1; + + for(i=(int)set->ulongs_count-1; i>=0; i--) { + /* subsets are unsigned longs, use flsl */ + unsigned long w = ~set->ulongs[i]; + if (w) + return hwloc_flsl(w) - 1 + HWLOC_BITS_PER_LONG*i; + } + + return -1; +} + +int hwloc_bitmap_next(const struct hwloc_bitmap_s * set, int prev_cpu) +{ + unsigned i = HWLOC_SUBBITMAP_INDEX(prev_cpu + 1); + + HWLOC__BITMAP_CHECK(set); + + if (i >= set->ulongs_count) { + if (set->infinite) + return prev_cpu + 1; + else + return -1; + } + + for(; iulongs_count; i++) { + /* subsets are unsigned longs, use ffsl */ + unsigned long w = set->ulongs[i]; + + /* if the prev cpu is in the same word as the possible next one, + we need to mask out previous cpus */ + if (prev_cpu >= 0 && HWLOC_SUBBITMAP_INDEX((unsigned) prev_cpu) == i) + w &= ~HWLOC_SUBBITMAP_ULBIT_TO(HWLOC_SUBBITMAP_CPU_ULBIT(prev_cpu)); + + if (w) + return hwloc_ffsl(w) - 1 + HWLOC_BITS_PER_LONG*i; + } + + if (set->infinite) + return set->ulongs_count * HWLOC_BITS_PER_LONG; + + return -1; +} + +int hwloc_bitmap_next_unset(const struct hwloc_bitmap_s * set, int prev_cpu) +{ + unsigned i = HWLOC_SUBBITMAP_INDEX(prev_cpu + 1); + + HWLOC__BITMAP_CHECK(set); + + if (i >= set->ulongs_count) { + if (!set->infinite) + return prev_cpu + 1; + else + return -1; + } + + for(; iulongs_count; i++) { + /* subsets are unsigned longs, use ffsl */ + unsigned long w = ~set->ulongs[i]; + + /* if the prev cpu is in the same word as the possible next one, + we need to mask out previous cpus */ + if (prev_cpu >= 0 && HWLOC_SUBBITMAP_INDEX((unsigned) prev_cpu) == i) + w &= ~HWLOC_SUBBITMAP_ULBIT_TO(HWLOC_SUBBITMAP_CPU_ULBIT(prev_cpu)); + + if (w) + return hwloc_ffsl(w) - 1 + HWLOC_BITS_PER_LONG*i; + } + + if (!set->infinite) + return set->ulongs_count * HWLOC_BITS_PER_LONG; + + return -1; +} + +int hwloc_bitmap_singlify(struct hwloc_bitmap_s * set) +{ + unsigned i; + int found = 0; + + HWLOC__BITMAP_CHECK(set); + + for(i=0; iulongs_count; i++) { + if (found) { + set->ulongs[i] = HWLOC_SUBBITMAP_ZERO; + continue; + } else { + /* subsets are unsigned longs, use ffsl */ + unsigned long w = set->ulongs[i]; + if (w) { + int _ffs = hwloc_ffsl(w); + set->ulongs[i] = HWLOC_SUBBITMAP_CPU(_ffs-1); + found = 1; + } + } + } + + if (set->infinite) { + if (found) { + set->infinite = 0; + } else { + /* set the first non allocated bit */ + unsigned first = set->ulongs_count * HWLOC_BITS_PER_LONG; + set->infinite = 0; /* do not let realloc fill the newly allocated sets */ + return hwloc_bitmap_set(set, first); + } + } + + return 0; +} + +int hwloc_bitmap_compare_first(const struct hwloc_bitmap_s * set1, const struct hwloc_bitmap_s * set2) +{ + unsigned count1 = set1->ulongs_count; + unsigned count2 = set2->ulongs_count; + unsigned max_count = count1 > count2 ? count1 : count2; + unsigned min_count = count1 + count2 - max_count; + unsigned i; + + HWLOC__BITMAP_CHECK(set1); + HWLOC__BITMAP_CHECK(set2); + + for(i=0; iulongs[i]; + unsigned long w2 = set2->ulongs[i]; + if (w1 || w2) { + int _ffs1 = hwloc_ffsl(w1); + int _ffs2 = hwloc_ffsl(w2); + /* if both have a bit set, compare for real */ + if (_ffs1 && _ffs2) + return _ffs1-_ffs2; + /* one is empty, and it is considered higher, so reverse-compare them */ + return _ffs2-_ffs1; + } + } + + if (count1 != count2) { + if (min_count < count2) { + for(i=min_count; iulongs[i]; + if (set1->infinite) + return -!(w2 & 1); + else if (w2) + return 1; + } + } else { + for(i=min_count; iulongs[i]; + if (set2->infinite) + return !(w1 & 1); + else if (w1) + return -1; + } + } + } + + return !!set1->infinite - !!set2->infinite; +} + +int hwloc_bitmap_compare(const struct hwloc_bitmap_s * set1, const struct hwloc_bitmap_s * set2) +{ + unsigned count1 = set1->ulongs_count; + unsigned count2 = set2->ulongs_count; + unsigned max_count = count1 > count2 ? count1 : count2; + unsigned min_count = count1 + count2 - max_count; + int i; + + HWLOC__BITMAP_CHECK(set1); + HWLOC__BITMAP_CHECK(set2); + + if ((!set1->infinite) != (!set2->infinite)) + return !!set1->infinite - !!set2->infinite; + + if (count1 != count2) { + if (min_count < count2) { + unsigned long val1 = set1->infinite ? HWLOC_SUBBITMAP_FULL : HWLOC_SUBBITMAP_ZERO; + for(i=(int)max_count-1; i>=(int) min_count; i--) { + unsigned long val2 = set2->ulongs[i]; + if (val1 == val2) + continue; + return val1 < val2 ? -1 : 1; + } + } else { + unsigned long val2 = set2->infinite ? HWLOC_SUBBITMAP_FULL : HWLOC_SUBBITMAP_ZERO; + for(i=(int)max_count-1; i>=(int) min_count; i--) { + unsigned long val1 = set1->ulongs[i]; + if (val1 == val2) + continue; + return val1 < val2 ? -1 : 1; + } + } + } + + for(i=(int)min_count-1; i>=0; i--) { + unsigned long val1 = set1->ulongs[i]; + unsigned long val2 = set2->ulongs[i]; + if (val1 == val2) + continue; + return val1 < val2 ? -1 : 1; + } + + return 0; +} + +int hwloc_bitmap_weight(const struct hwloc_bitmap_s * set) +{ + int weight = 0; + unsigned i; + + HWLOC__BITMAP_CHECK(set); + + if (set->infinite) + return -1; + + for(i=0; iulongs_count; i++) + weight += hwloc_weight_long(set->ulongs[i]); + return weight; +} + +int hwloc_bitmap_compare_inclusion(const struct hwloc_bitmap_s * set1, const struct hwloc_bitmap_s * set2) +{ + unsigned max_count = set1->ulongs_count > set2->ulongs_count ? set1->ulongs_count : set2->ulongs_count; + int result = HWLOC_BITMAP_EQUAL; /* means empty sets return equal */ + int empty1 = 1; + int empty2 = 1; + unsigned i; + + HWLOC__BITMAP_CHECK(set1); + HWLOC__BITMAP_CHECK(set2); + + for(i=0; iinfinite) { + if (set2->infinite) { + /* set2 infinite only */ + if (result == HWLOC_BITMAP_CONTAINS) { + if (!empty2) + return HWLOC_BITMAP_INTERSECTS; + result = HWLOC_BITMAP_DIFFERENT; + } else if (result == HWLOC_BITMAP_EQUAL) { + result = HWLOC_BITMAP_INCLUDED; + } + /* no change otherwise */ + } + } else if (!set2->infinite) { + /* set1 infinite only */ + if (result == HWLOC_BITMAP_INCLUDED) { + if (!empty1) + return HWLOC_BITMAP_INTERSECTS; + result = HWLOC_BITMAP_DIFFERENT; + } else if (result == HWLOC_BITMAP_EQUAL) { + result = HWLOC_BITMAP_CONTAINS; + } + /* no change otherwise */ + } else { + /* both infinite */ + if (result == HWLOC_BITMAP_DIFFERENT) + return HWLOC_BITMAP_INTERSECTS; + /* equal/contains/included unchanged */ + } + + return result; +} diff --git a/src/3rdparty/hwloc/src/components.c b/src/3rdparty/hwloc/src/components.c new file mode 100644 index 00000000..bd7c00e3 --- /dev/null +++ b/src/3rdparty/hwloc/src/components.c @@ -0,0 +1,785 @@ +/* + * Copyright © 2009-2017 Inria. All rights reserved. + * Copyright © 2012 Université Bordeaux + * See COPYING in top-level directory. + */ + +#include +#include +#include +#include +#include + +#define HWLOC_COMPONENT_STOP_NAME "stop" +#define HWLOC_COMPONENT_EXCLUDE_CHAR '-' +#define HWLOC_COMPONENT_SEPS "," + +/* list of all registered discovery components, sorted by priority, higher priority first. + * noos is last because its priority is 0. + * others' priority is 10. + */ +static struct hwloc_disc_component * hwloc_disc_components = NULL; + +static unsigned hwloc_components_users = 0; /* first one initializes, last ones destroys */ + +static int hwloc_components_verbose = 0; +#ifdef HWLOC_HAVE_PLUGINS +static int hwloc_plugins_verbose = 0; +static const char * hwloc_plugins_blacklist = NULL; +#endif + +/* hwloc_components_mutex serializes: + * - loading/unloading plugins, and modifications of the hwloc_plugins list + * - calls to ltdl, including in hwloc_check_plugin_namespace() + * - registration of components with hwloc_disc_component_register() + * and hwloc_xml_callbacks_register() + */ +#ifdef HWLOC_WIN_SYS +/* Basic mutex on top of InterlockedCompareExchange() on windows, + * Far from perfect, but easy to maintain, and way enough given that this code will never be needed for real. */ +#include +static LONG hwloc_components_mutex = 0; +#define HWLOC_COMPONENTS_LOCK() do { \ + while (InterlockedCompareExchange(&hwloc_components_mutex, 1, 0) != 0) \ + SwitchToThread(); \ +} while (0) +#define HWLOC_COMPONENTS_UNLOCK() do { \ + assert(hwloc_components_mutex == 1); \ + hwloc_components_mutex = 0; \ +} while (0) + +#elif defined HWLOC_HAVE_PTHREAD_MUTEX +/* pthread mutex if available (except on windows) */ +#include +static pthread_mutex_t hwloc_components_mutex = PTHREAD_MUTEX_INITIALIZER; +#define HWLOC_COMPONENTS_LOCK() pthread_mutex_lock(&hwloc_components_mutex) +#define HWLOC_COMPONENTS_UNLOCK() pthread_mutex_unlock(&hwloc_components_mutex) + +#else /* HWLOC_WIN_SYS || HWLOC_HAVE_PTHREAD_MUTEX */ +#error No mutex implementation available +#endif + + +#ifdef HWLOC_HAVE_PLUGINS + +#include + +/* array of pointers to dynamically loaded plugins */ +static struct hwloc__plugin_desc { + char *name; + struct hwloc_component *component; + char *filename; + lt_dlhandle handle; + struct hwloc__plugin_desc *next; +} *hwloc_plugins = NULL; + +static int +hwloc__dlforeach_cb(const char *filename, void *_data __hwloc_attribute_unused) +{ + const char *basename; + lt_dlhandle handle; + struct hwloc_component *component; + struct hwloc__plugin_desc *desc, **prevdesc; + + if (hwloc_plugins_verbose) + fprintf(stderr, "Plugin dlforeach found `%s'\n", filename); + + basename = strrchr(filename, '/'); + if (!basename) + basename = filename; + else + basename++; + + if (hwloc_plugins_blacklist && strstr(hwloc_plugins_blacklist, basename)) { + if (hwloc_plugins_verbose) + fprintf(stderr, "Plugin `%s' is blacklisted in the environment\n", basename); + goto out; + } + + /* dlopen and get the component structure */ + handle = lt_dlopenext(filename); + if (!handle) { + if (hwloc_plugins_verbose) + fprintf(stderr, "Failed to load plugin: %s\n", lt_dlerror()); + goto out; + } + +{ + char componentsymbolname[strlen(basename)+10+1]; + sprintf(componentsymbolname, "%s_component", basename); + component = lt_dlsym(handle, componentsymbolname); + if (!component) { + if (hwloc_plugins_verbose) + fprintf(stderr, "Failed to find component symbol `%s'\n", + componentsymbolname); + goto out_with_handle; + } + if (component->abi != HWLOC_COMPONENT_ABI) { + if (hwloc_plugins_verbose) + fprintf(stderr, "Plugin symbol ABI %u instead of %d\n", + component->abi, HWLOC_COMPONENT_ABI); + goto out_with_handle; + } + if (hwloc_plugins_verbose) + fprintf(stderr, "Plugin contains expected symbol `%s'\n", + componentsymbolname); +} + + if (HWLOC_COMPONENT_TYPE_DISC == component->type) { + if (strncmp(basename, "hwloc_", 6)) { + if (hwloc_plugins_verbose) + fprintf(stderr, "Plugin name `%s' doesn't match its type DISCOVERY\n", basename); + goto out_with_handle; + } + } else if (HWLOC_COMPONENT_TYPE_XML == component->type) { + if (strncmp(basename, "hwloc_xml_", 10)) { + if (hwloc_plugins_verbose) + fprintf(stderr, "Plugin name `%s' doesn't match its type XML\n", basename); + goto out_with_handle; + } + } else { + if (hwloc_plugins_verbose) + fprintf(stderr, "Plugin name `%s' has invalid type %u\n", + basename, (unsigned) component->type); + goto out_with_handle; + } + + /* allocate a plugin_desc and queue it */ + desc = malloc(sizeof(*desc)); + if (!desc) + goto out_with_handle; + desc->name = strdup(basename); + desc->filename = strdup(filename); + desc->component = component; + desc->handle = handle; + desc->next = NULL; + if (hwloc_plugins_verbose) + fprintf(stderr, "Plugin descriptor `%s' ready\n", basename); + + /* append to the list */ + prevdesc = &hwloc_plugins; + while (*prevdesc) + prevdesc = &((*prevdesc)->next); + *prevdesc = desc; + if (hwloc_plugins_verbose) + fprintf(stderr, "Plugin descriptor `%s' queued\n", basename); + return 0; + + out_with_handle: + lt_dlclose(handle); + out: + return 0; +} + +static void +hwloc_plugins_exit(void) +{ + struct hwloc__plugin_desc *desc, *next; + + if (hwloc_plugins_verbose) + fprintf(stderr, "Closing all plugins\n"); + + desc = hwloc_plugins; + while (desc) { + next = desc->next; + lt_dlclose(desc->handle); + free(desc->name); + free(desc->filename); + free(desc); + desc = next; + } + hwloc_plugins = NULL; + + lt_dlexit(); +} + +static int +hwloc_plugins_init(void) +{ + const char *verboseenv; + const char *path = HWLOC_PLUGINS_PATH; + const char *env; + int err; + + verboseenv = getenv("HWLOC_PLUGINS_VERBOSE"); + hwloc_plugins_verbose = verboseenv ? atoi(verboseenv) : 0; + + hwloc_plugins_blacklist = getenv("HWLOC_PLUGINS_BLACKLIST"); + + err = lt_dlinit(); + if (err) + goto out; + + env = getenv("HWLOC_PLUGINS_PATH"); + if (env) + path = env; + + hwloc_plugins = NULL; + + if (hwloc_plugins_verbose) + fprintf(stderr, "Starting plugin dlforeach in %s\n", path); + err = lt_dlforeachfile(path, hwloc__dlforeach_cb, NULL); + if (err) + goto out_with_init; + + return 0; + + out_with_init: + hwloc_plugins_exit(); + out: + return -1; +} + +#endif /* HWLOC_HAVE_PLUGINS */ + +static const char * +hwloc_disc_component_type_string(hwloc_disc_component_type_t type) +{ + switch (type) { + case HWLOC_DISC_COMPONENT_TYPE_CPU: return "cpu"; + case HWLOC_DISC_COMPONENT_TYPE_GLOBAL: return "global"; + case HWLOC_DISC_COMPONENT_TYPE_MISC: return "misc"; + default: return "**unknown**"; + } +} + +static int +hwloc_disc_component_register(struct hwloc_disc_component *component, + const char *filename) +{ + struct hwloc_disc_component **prev; + + /* check that the component name is valid */ + if (!strcmp(component->name, HWLOC_COMPONENT_STOP_NAME)) { + if (hwloc_components_verbose) + fprintf(stderr, "Cannot register discovery component with reserved name `" HWLOC_COMPONENT_STOP_NAME "'\n"); + return -1; + } + if (strchr(component->name, HWLOC_COMPONENT_EXCLUDE_CHAR) + || strcspn(component->name, HWLOC_COMPONENT_SEPS) != strlen(component->name)) { + if (hwloc_components_verbose) + fprintf(stderr, "Cannot register discovery component with name `%s' containing reserved characters `%c" HWLOC_COMPONENT_SEPS "'\n", + component->name, HWLOC_COMPONENT_EXCLUDE_CHAR); + return -1; + } + /* check that the component type is valid */ + switch ((unsigned) component->type) { + case HWLOC_DISC_COMPONENT_TYPE_CPU: + case HWLOC_DISC_COMPONENT_TYPE_GLOBAL: + case HWLOC_DISC_COMPONENT_TYPE_MISC: + break; + default: + fprintf(stderr, "Cannot register discovery component `%s' with unknown type %u\n", + component->name, (unsigned) component->type); + return -1; + } + + prev = &hwloc_disc_components; + while (NULL != *prev) { + if (!strcmp((*prev)->name, component->name)) { + /* if two components have the same name, only keep the highest priority one */ + if ((*prev)->priority < component->priority) { + /* drop the existing component */ + if (hwloc_components_verbose) + fprintf(stderr, "Dropping previously registered discovery component `%s', priority %u lower than new one %u\n", + (*prev)->name, (*prev)->priority, component->priority); + *prev = (*prev)->next; + } else { + /* drop the new one */ + if (hwloc_components_verbose) + fprintf(stderr, "Ignoring new discovery component `%s', priority %u lower than previously registered one %u\n", + component->name, component->priority, (*prev)->priority); + return -1; + } + } + prev = &((*prev)->next); + } + if (hwloc_components_verbose) + fprintf(stderr, "Registered %s discovery component `%s' with priority %u (%s%s)\n", + hwloc_disc_component_type_string(component->type), component->name, component->priority, + filename ? "from plugin " : "statically build", filename ? filename : ""); + + prev = &hwloc_disc_components; + while (NULL != *prev) { + if ((*prev)->priority < component->priority) + break; + prev = &((*prev)->next); + } + component->next = *prev; + *prev = component; + return 0; +} + +#include + +static void (**hwloc_component_finalize_cbs)(unsigned long); +static unsigned hwloc_component_finalize_cb_count; + +void +hwloc_components_init(void) +{ +#ifdef HWLOC_HAVE_PLUGINS + struct hwloc__plugin_desc *desc; +#endif + const char *verboseenv; + unsigned i; + + HWLOC_COMPONENTS_LOCK(); + assert((unsigned) -1 != hwloc_components_users); + if (0 != hwloc_components_users++) { + HWLOC_COMPONENTS_UNLOCK(); + return; + } + + verboseenv = getenv("HWLOC_COMPONENTS_VERBOSE"); + hwloc_components_verbose = verboseenv ? atoi(verboseenv) : 0; + +#ifdef HWLOC_HAVE_PLUGINS + hwloc_plugins_init(); +#endif + + hwloc_component_finalize_cbs = NULL; + hwloc_component_finalize_cb_count = 0; + /* count the max number of finalize callbacks */ + for(i=0; NULL != hwloc_static_components[i]; i++) + hwloc_component_finalize_cb_count++; +#ifdef HWLOC_HAVE_PLUGINS + for(desc = hwloc_plugins; NULL != desc; desc = desc->next) + hwloc_component_finalize_cb_count++; +#endif + if (hwloc_component_finalize_cb_count) { + hwloc_component_finalize_cbs = calloc(hwloc_component_finalize_cb_count, + sizeof(*hwloc_component_finalize_cbs)); + assert(hwloc_component_finalize_cbs); + /* forget that max number and recompute the real one below */ + hwloc_component_finalize_cb_count = 0; + } + + /* hwloc_static_components is created by configure in static-components.h */ + for(i=0; NULL != hwloc_static_components[i]; i++) { + if (hwloc_static_components[i]->flags) { + fprintf(stderr, "Ignoring static component with invalid flags %lx\n", + hwloc_static_components[i]->flags); + continue; + } + + /* initialize the component */ + if (hwloc_static_components[i]->init && hwloc_static_components[i]->init(0) < 0) { + if (hwloc_components_verbose) + fprintf(stderr, "Ignoring static component, failed to initialize\n"); + continue; + } + /* queue ->finalize() callback if any */ + if (hwloc_static_components[i]->finalize) + hwloc_component_finalize_cbs[hwloc_component_finalize_cb_count++] = hwloc_static_components[i]->finalize; + + /* register for real now */ + if (HWLOC_COMPONENT_TYPE_DISC == hwloc_static_components[i]->type) + hwloc_disc_component_register(hwloc_static_components[i]->data, NULL); + else if (HWLOC_COMPONENT_TYPE_XML == hwloc_static_components[i]->type) + hwloc_xml_callbacks_register(hwloc_static_components[i]->data); + else + assert(0); + } + + /* dynamic plugins */ +#ifdef HWLOC_HAVE_PLUGINS + for(desc = hwloc_plugins; NULL != desc; desc = desc->next) { + if (desc->component->flags) { + fprintf(stderr, "Ignoring plugin `%s' component with invalid flags %lx\n", + desc->name, desc->component->flags); + continue; + } + + /* initialize the component */ + if (desc->component->init && desc->component->init(0) < 0) { + if (hwloc_components_verbose) + fprintf(stderr, "Ignoring plugin `%s', failed to initialize\n", desc->name); + continue; + } + /* queue ->finalize() callback if any */ + if (desc->component->finalize) + hwloc_component_finalize_cbs[hwloc_component_finalize_cb_count++] = desc->component->finalize; + + /* register for real now */ + if (HWLOC_COMPONENT_TYPE_DISC == desc->component->type) + hwloc_disc_component_register(desc->component->data, desc->filename); + else if (HWLOC_COMPONENT_TYPE_XML == desc->component->type) + hwloc_xml_callbacks_register(desc->component->data); + else + assert(0); + } +#endif + + HWLOC_COMPONENTS_UNLOCK(); +} + +void +hwloc_backends_init(struct hwloc_topology *topology) +{ + topology->backends = NULL; + topology->backend_excludes = 0; +} + +static struct hwloc_disc_component * +hwloc_disc_component_find(int type /* hwloc_disc_component_type_t or -1 if any */, + const char *name /* name of NULL if any */) +{ + struct hwloc_disc_component *comp = hwloc_disc_components; + while (NULL != comp) { + if ((-1 == type || type == (int) comp->type) + && (NULL == name || !strcmp(name, comp->name))) + return comp; + comp = comp->next; + } + return NULL; +} + +/* used by set_xml(), set_synthetic(), ... environment variables, ... to force the first backend */ +int +hwloc_disc_component_force_enable(struct hwloc_topology *topology, + int envvar_forced, + int type, const char *name, + const void *data1, const void *data2, const void *data3) +{ + struct hwloc_disc_component *comp; + struct hwloc_backend *backend; + + if (topology->is_loaded) { + errno = EBUSY; + return -1; + } + + comp = hwloc_disc_component_find(type, name); + if (!comp) { + errno = ENOSYS; + return -1; + } + + backend = comp->instantiate(comp, data1, data2, data3); + if (backend) { + backend->envvar_forced = envvar_forced; + if (topology->backends) + hwloc_backends_disable_all(topology); + return hwloc_backend_enable(topology, backend); + } else + return -1; +} + +static int +hwloc_disc_component_try_enable(struct hwloc_topology *topology, + struct hwloc_disc_component *comp, + const char *comparg, + int envvar_forced) +{ + struct hwloc_backend *backend; + + if (topology->backend_excludes & comp->type) { + if (hwloc_components_verbose) + /* do not warn if envvar_forced since system-wide HWLOC_COMPONENTS must be silently ignored after set_xml() etc. + */ + fprintf(stderr, "Excluding %s discovery component `%s', conflicts with excludes 0x%x\n", + hwloc_disc_component_type_string(comp->type), comp->name, topology->backend_excludes); + return -1; + } + + backend = comp->instantiate(comp, comparg, NULL, NULL); + if (!backend) { + if (hwloc_components_verbose || envvar_forced) + fprintf(stderr, "Failed to instantiate discovery component `%s'\n", comp->name); + return -1; + } + + backend->envvar_forced = envvar_forced; + return hwloc_backend_enable(topology, backend); +} + +void +hwloc_disc_components_enable_others(struct hwloc_topology *topology) +{ + struct hwloc_disc_component *comp; + struct hwloc_backend *backend; + int tryall = 1; + const char *_env; + char *env; /* we'll to modify the env value, so duplicate it */ + + _env = getenv("HWLOC_COMPONENTS"); + env = _env ? strdup(_env) : NULL; + + /* enable explicitly listed components */ + if (env) { + char *curenv = env; + size_t s; + + while (*curenv) { + s = strcspn(curenv, HWLOC_COMPONENT_SEPS); + if (s) { + char c; + + /* replace linuxpci with linuxio for backward compatibility with pre-v2.0 */ + if (!strncmp(curenv, "linuxpci", 8) && s == 8) { + curenv[5] = 'i'; + curenv[6] = 'o'; + curenv[7] = *HWLOC_COMPONENT_SEPS; + } else if (curenv[0] == HWLOC_COMPONENT_EXCLUDE_CHAR && !strncmp(curenv+1, "linuxpci", 8) && s == 9) { + curenv[6] = 'i'; + curenv[7] = 'o'; + curenv[8] = *HWLOC_COMPONENT_SEPS; + /* skip this name, it's a negated one */ + goto nextname; + } + + if (curenv[0] == HWLOC_COMPONENT_EXCLUDE_CHAR) + goto nextname; + + if (!strncmp(curenv, HWLOC_COMPONENT_STOP_NAME, s)) { + tryall = 0; + break; + } + + /* save the last char and replace with \0 */ + c = curenv[s]; + curenv[s] = '\0'; + + comp = hwloc_disc_component_find(-1, curenv); + if (comp) { + hwloc_disc_component_try_enable(topology, comp, NULL, 1 /* envvar forced */); + } else { + fprintf(stderr, "Cannot find discovery component `%s'\n", curenv); + } + + /* restore chars (the second loop below needs env to be unmodified) */ + curenv[s] = c; + } + +nextname: + curenv += s; + if (*curenv) + /* Skip comma */ + curenv++; + } + } + + /* env is still the same, the above loop didn't modify it */ + + /* now enable remaining components (except the explicitly '-'-listed ones) */ + if (tryall) { + comp = hwloc_disc_components; + while (NULL != comp) { + if (!comp->enabled_by_default) + goto nextcomp; + /* check if this component was explicitly excluded in env */ + if (env) { + char *curenv = env; + while (*curenv) { + size_t s = strcspn(curenv, HWLOC_COMPONENT_SEPS); + if (curenv[0] == HWLOC_COMPONENT_EXCLUDE_CHAR && !strncmp(curenv+1, comp->name, s-1) && strlen(comp->name) == s-1) { + if (hwloc_components_verbose) + fprintf(stderr, "Excluding %s discovery component `%s' because of HWLOC_COMPONENTS environment variable\n", + hwloc_disc_component_type_string(comp->type), comp->name); + goto nextcomp; + } + curenv += s; + if (*curenv) + /* Skip comma */ + curenv++; + } + } + hwloc_disc_component_try_enable(topology, comp, NULL, 0 /* defaults, not envvar forced */); +nextcomp: + comp = comp->next; + } + } + + if (hwloc_components_verbose) { + /* print a summary */ + int first = 1; + backend = topology->backends; + fprintf(stderr, "Final list of enabled discovery components: "); + while (backend != NULL) { + fprintf(stderr, "%s%s", first ? "" : ",", backend->component->name); + backend = backend->next; + first = 0; + } + fprintf(stderr, "\n"); + } + + free(env); +} + +void +hwloc_components_fini(void) +{ + unsigned i; + + HWLOC_COMPONENTS_LOCK(); + assert(0 != hwloc_components_users); + if (0 != --hwloc_components_users) { + HWLOC_COMPONENTS_UNLOCK(); + return; + } + + for(i=0; icomponent = component; + backend->flags = 0; + backend->discover = NULL; + backend->get_pci_busid_cpuset = NULL; + backend->disable = NULL; + backend->is_thissystem = -1; + backend->next = NULL; + backend->envvar_forced = 0; + return backend; +} + +static void +hwloc_backend_disable(struct hwloc_backend *backend) +{ + if (backend->disable) + backend->disable(backend); + free(backend); +} + +int +hwloc_backend_enable(struct hwloc_topology *topology, struct hwloc_backend *backend) +{ + struct hwloc_backend **pprev; + + /* check backend flags */ + if (backend->flags) { + fprintf(stderr, "Cannot enable %s discovery component `%s' with unknown flags %lx\n", + hwloc_disc_component_type_string(backend->component->type), backend->component->name, backend->flags); + return -1; + } + + /* make sure we didn't already enable this backend, we don't want duplicates */ + pprev = &topology->backends; + while (NULL != *pprev) { + if ((*pprev)->component == backend->component) { + if (hwloc_components_verbose) + fprintf(stderr, "Cannot enable %s discovery component `%s' twice\n", + hwloc_disc_component_type_string(backend->component->type), backend->component->name); + hwloc_backend_disable(backend); + errno = EBUSY; + return -1; + } + pprev = &((*pprev)->next); + } + + if (hwloc_components_verbose) + fprintf(stderr, "Enabling %s discovery component `%s'\n", + hwloc_disc_component_type_string(backend->component->type), backend->component->name); + + /* enqueue at the end */ + pprev = &topology->backends; + while (NULL != *pprev) + pprev = &((*pprev)->next); + backend->next = *pprev; + *pprev = backend; + + backend->topology = topology; + topology->backend_excludes |= backend->component->excludes; + return 0; +} + +void +hwloc_backends_is_thissystem(struct hwloc_topology *topology) +{ + struct hwloc_backend *backend; + const char *local_env; + + /* Apply is_thissystem topology flag before we enforce envvar backends. + * If the application changed the backend with set_foo(), + * it may use set_flags() update the is_thissystem flag here. + * If it changes the backend with environment variables below, + * it may use HWLOC_THISSYSTEM envvar below as well. + */ + + topology->is_thissystem = 1; + + /* apply thissystem from normally-given backends (envvar_forced=0, either set_foo() or defaults) */ + backend = topology->backends; + while (backend != NULL) { + if (backend->envvar_forced == 0 && backend->is_thissystem != -1) { + assert(backend->is_thissystem == 0); + topology->is_thissystem = 0; + } + backend = backend->next; + } + + /* override set_foo() with flags */ + if (topology->flags & HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM) + topology->is_thissystem = 1; + + /* now apply envvar-forced backend (envvar_forced=1) */ + backend = topology->backends; + while (backend != NULL) { + if (backend->envvar_forced == 1 && backend->is_thissystem != -1) { + assert(backend->is_thissystem == 0); + topology->is_thissystem = 0; + } + backend = backend->next; + } + + /* override with envvar-given flag */ + local_env = getenv("HWLOC_THISSYSTEM"); + if (local_env) + topology->is_thissystem = atoi(local_env); +} + +void +hwloc_backends_find_callbacks(struct hwloc_topology *topology) +{ + struct hwloc_backend *backend = topology->backends; + /* use the first backend's get_pci_busid_cpuset callback */ + topology->get_pci_busid_cpuset_backend = NULL; + while (backend != NULL) { + if (backend->get_pci_busid_cpuset) { + topology->get_pci_busid_cpuset_backend = backend; + return; + } + backend = backend->next; + } + return; +} + +void +hwloc_backends_disable_all(struct hwloc_topology *topology) +{ + struct hwloc_backend *backend; + + while (NULL != (backend = topology->backends)) { + struct hwloc_backend *next = backend->next; + if (hwloc_components_verbose) + fprintf(stderr, "Disabling %s discovery component `%s'\n", + hwloc_disc_component_type_string(backend->component->type), backend->component->name); + hwloc_backend_disable(backend); + topology->backends = next; + } + topology->backends = NULL; + topology->backend_excludes = 0; +} diff --git a/src/3rdparty/hwloc/src/diff.c b/src/3rdparty/hwloc/src/diff.c new file mode 100644 index 00000000..00811a7b --- /dev/null +++ b/src/3rdparty/hwloc/src/diff.c @@ -0,0 +1,492 @@ +/* + * Copyright © 2013-2018 Inria. All rights reserved. + * See COPYING in top-level directory. + */ + +#include +#include +#include + +int hwloc_topology_diff_destroy(hwloc_topology_diff_t diff) +{ + hwloc_topology_diff_t next; + while (diff) { + next = diff->generic.next; + switch (diff->generic.type) { + default: + break; + case HWLOC_TOPOLOGY_DIFF_OBJ_ATTR: + switch (diff->obj_attr.diff.generic.type) { + default: + break; + case HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_NAME: + case HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_INFO: + free(diff->obj_attr.diff.string.name); + free(diff->obj_attr.diff.string.oldvalue); + free(diff->obj_attr.diff.string.newvalue); + break; + } + break; + } + free(diff); + diff = next; + } + return 0; +} + +/************************ + * Computing diffs + */ + +static void hwloc_append_diff(hwloc_topology_diff_t newdiff, + hwloc_topology_diff_t *firstdiffp, + hwloc_topology_diff_t *lastdiffp) +{ + if (*firstdiffp) + (*lastdiffp)->generic.next = newdiff; + else + *firstdiffp = newdiff; + *lastdiffp = newdiff; + newdiff->generic.next = NULL; +} + +static int hwloc_append_diff_too_complex(hwloc_obj_t obj1, + hwloc_topology_diff_t *firstdiffp, + hwloc_topology_diff_t *lastdiffp) +{ + hwloc_topology_diff_t newdiff; + newdiff = malloc(sizeof(*newdiff)); + if (!newdiff) + return -1; + + newdiff->too_complex.type = HWLOC_TOPOLOGY_DIFF_TOO_COMPLEX; + newdiff->too_complex.obj_depth = obj1->depth; + newdiff->too_complex.obj_index = obj1->logical_index; + hwloc_append_diff(newdiff, firstdiffp, lastdiffp); + return 0; +} + +static int hwloc_append_diff_obj_attr_string(hwloc_obj_t obj, + hwloc_topology_diff_obj_attr_type_t type, + const char *name, + const char *oldvalue, + const char *newvalue, + hwloc_topology_diff_t *firstdiffp, + hwloc_topology_diff_t *lastdiffp) +{ + hwloc_topology_diff_t newdiff; + newdiff = malloc(sizeof(*newdiff)); + if (!newdiff) + return -1; + + newdiff->obj_attr.type = HWLOC_TOPOLOGY_DIFF_OBJ_ATTR; + newdiff->obj_attr.obj_depth = obj->depth; + newdiff->obj_attr.obj_index = obj->logical_index; + newdiff->obj_attr.diff.string.type = type; + newdiff->obj_attr.diff.string.name = name ? strdup(name) : NULL; + newdiff->obj_attr.diff.string.oldvalue = oldvalue ? strdup(oldvalue) : NULL; + newdiff->obj_attr.diff.string.newvalue = newvalue ? strdup(newvalue) : NULL; + hwloc_append_diff(newdiff, firstdiffp, lastdiffp); + return 0; +} + +static int hwloc_append_diff_obj_attr_uint64(hwloc_obj_t obj, + hwloc_topology_diff_obj_attr_type_t type, + hwloc_uint64_t idx, + hwloc_uint64_t oldvalue, + hwloc_uint64_t newvalue, + hwloc_topology_diff_t *firstdiffp, + hwloc_topology_diff_t *lastdiffp) +{ + hwloc_topology_diff_t newdiff; + newdiff = malloc(sizeof(*newdiff)); + if (!newdiff) + return -1; + + newdiff->obj_attr.type = HWLOC_TOPOLOGY_DIFF_OBJ_ATTR; + newdiff->obj_attr.obj_depth = obj->depth; + newdiff->obj_attr.obj_index = obj->logical_index; + newdiff->obj_attr.diff.uint64.type = type; + newdiff->obj_attr.diff.uint64.index = idx; + newdiff->obj_attr.diff.uint64.oldvalue = oldvalue; + newdiff->obj_attr.diff.uint64.newvalue = newvalue; + hwloc_append_diff(newdiff, firstdiffp, lastdiffp); + return 0; +} + +static int +hwloc_diff_trees(hwloc_topology_t topo1, hwloc_obj_t obj1, + hwloc_topology_t topo2, hwloc_obj_t obj2, + unsigned flags, + hwloc_topology_diff_t *firstdiffp, hwloc_topology_diff_t *lastdiffp) +{ + unsigned i; + int err; + hwloc_obj_t child1, child2; + + if (obj1->depth != obj2->depth) + goto out_too_complex; + + if (obj1->type != obj2->type) + goto out_too_complex; + if ((!obj1->subtype) != (!obj2->subtype) + || (obj1->subtype && strcmp(obj1->subtype, obj2->subtype))) + goto out_too_complex; + + if (obj1->os_index != obj2->os_index) + /* we could allow different os_index for non-PU non-NUMAnode objects + * but it's likely useless anyway */ + goto out_too_complex; + +#define _SETS_DIFFERENT(_set1, _set2) \ + ( ( !(_set1) != !(_set2) ) \ + || ( (_set1) && !hwloc_bitmap_isequal(_set1, _set2) ) ) +#define SETS_DIFFERENT(_set, _obj1, _obj2) _SETS_DIFFERENT((_obj1)->_set, (_obj2)->_set) + if (SETS_DIFFERENT(cpuset, obj1, obj2) + || SETS_DIFFERENT(complete_cpuset, obj1, obj2) + || SETS_DIFFERENT(nodeset, obj1, obj2) + || SETS_DIFFERENT(complete_nodeset, obj1, obj2)) + goto out_too_complex; + + /* no need to check logical_index, sibling_rank, symmetric_subtree, + * the parents did it */ + + /* gp_index don't have to be strictly identical */ + + if ((!obj1->name) != (!obj2->name) + || (obj1->name && strcmp(obj1->name, obj2->name))) { + err = hwloc_append_diff_obj_attr_string(obj1, + HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_NAME, + NULL, + obj1->name, + obj2->name, + firstdiffp, lastdiffp); + if (err < 0) + return err; + } + + /* type-specific attrs */ + switch (obj1->type) { + default: + break; + case HWLOC_OBJ_NUMANODE: + if (obj1->attr->numanode.local_memory != obj2->attr->numanode.local_memory) { + err = hwloc_append_diff_obj_attr_uint64(obj1, + HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_SIZE, + 0, + obj1->attr->numanode.local_memory, + obj2->attr->numanode.local_memory, + firstdiffp, lastdiffp); + if (err < 0) + return err; + } + /* ignore memory page_types */ + break; + case HWLOC_OBJ_L1CACHE: + case HWLOC_OBJ_L2CACHE: + case HWLOC_OBJ_L3CACHE: + case HWLOC_OBJ_L4CACHE: + case HWLOC_OBJ_L5CACHE: + case HWLOC_OBJ_L1ICACHE: + case HWLOC_OBJ_L2ICACHE: + case HWLOC_OBJ_L3ICACHE: + if (memcmp(obj1->attr, obj2->attr, sizeof(obj1->attr->cache))) + goto out_too_complex; + break; + case HWLOC_OBJ_GROUP: + if (memcmp(obj1->attr, obj2->attr, sizeof(obj1->attr->group))) + goto out_too_complex; + break; + case HWLOC_OBJ_PCI_DEVICE: + if (memcmp(obj1->attr, obj2->attr, sizeof(obj1->attr->pcidev))) + goto out_too_complex; + break; + case HWLOC_OBJ_BRIDGE: + if (memcmp(obj1->attr, obj2->attr, sizeof(obj1->attr->bridge))) + goto out_too_complex; + break; + case HWLOC_OBJ_OS_DEVICE: + if (memcmp(obj1->attr, obj2->attr, sizeof(obj1->attr->osdev))) + goto out_too_complex; + break; + } + + /* infos */ + if (obj1->infos_count != obj2->infos_count) + goto out_too_complex; + for(i=0; iinfos_count; i++) { + struct hwloc_info_s *info1 = &obj1->infos[i], *info2 = &obj2->infos[i]; + if (strcmp(info1->name, info2->name)) + goto out_too_complex; + if (strcmp(obj1->infos[i].value, obj2->infos[i].value)) { + err = hwloc_append_diff_obj_attr_string(obj1, + HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_INFO, + info1->name, + info1->value, + info2->value, + firstdiffp, lastdiffp); + if (err < 0) + return err; + } + } + + /* ignore userdata */ + + /* children */ + for(child1 = obj1->first_child, child2 = obj2->first_child; + child1 != NULL && child2 != NULL; + child1 = child1->next_sibling, child2 = child2->next_sibling) { + err = hwloc_diff_trees(topo1, child1, + topo2, child2, + flags, + firstdiffp, lastdiffp); + if (err < 0) + return err; + } + if (child1 || child2) + goto out_too_complex; + + /* memory children */ + for(child1 = obj1->memory_first_child, child2 = obj2->memory_first_child; + child1 != NULL && child2 != NULL; + child1 = child1->next_sibling, child2 = child2->next_sibling) { + err = hwloc_diff_trees(topo1, child1, + topo2, child2, + flags, + firstdiffp, lastdiffp); + if (err < 0) + return err; + } + if (child1 || child2) + goto out_too_complex; + + /* I/O children */ + for(child1 = obj1->io_first_child, child2 = obj2->io_first_child; + child1 != NULL && child2 != NULL; + child1 = child1->next_sibling, child2 = child2->next_sibling) { + err = hwloc_diff_trees(topo1, child1, + topo2, child2, + flags, + firstdiffp, lastdiffp); + if (err < 0) + return err; + } + if (child1 || child2) + goto out_too_complex; + + /* misc children */ + for(child1 = obj1->misc_first_child, child2 = obj2->misc_first_child; + child1 != NULL && child2 != NULL; + child1 = child1->next_sibling, child2 = child2->next_sibling) { + err = hwloc_diff_trees(topo1, child1, + topo2, child2, + flags, + firstdiffp, lastdiffp); + if (err < 0) + return err; + } + if (child1 || child2) + goto out_too_complex; + + return 0; + +out_too_complex: + hwloc_append_diff_too_complex(obj1, firstdiffp, lastdiffp); + return 0; +} + +int hwloc_topology_diff_build(hwloc_topology_t topo1, + hwloc_topology_t topo2, + unsigned long flags, + hwloc_topology_diff_t *diffp) +{ + hwloc_topology_diff_t lastdiff, tmpdiff; + struct hwloc_internal_distances_s *dist1, *dist2; + unsigned i; + int err; + + if (!topo1->is_loaded || !topo2->is_loaded) { + errno = EINVAL; + return -1; + } + + if (flags != 0) { + errno = EINVAL; + return -1; + } + + *diffp = NULL; + err = hwloc_diff_trees(topo1, hwloc_get_root_obj(topo1), + topo2, hwloc_get_root_obj(topo2), + flags, + diffp, &lastdiff); + if (!err) { + tmpdiff = *diffp; + while (tmpdiff) { + if (tmpdiff->generic.type == HWLOC_TOPOLOGY_DIFF_TOO_COMPLEX) { + err = 1; + break; + } + tmpdiff = tmpdiff->generic.next; + } + } + + if (!err) { + if (SETS_DIFFERENT(allowed_cpuset, topo1, topo2) + || SETS_DIFFERENT(allowed_nodeset, topo1, topo2)) { + hwloc_append_diff_too_complex(hwloc_get_root_obj(topo1), diffp, &lastdiff); + err = 1; + } + } + + if (!err) { + /* distances */ + hwloc_internal_distances_refresh(topo1); + hwloc_internal_distances_refresh(topo2); + dist1 = topo1->first_dist; + dist2 = topo2->first_dist; + while (dist1 || dist2) { + if (!!dist1 != !!dist2) { + hwloc_append_diff_too_complex(hwloc_get_root_obj(topo1), diffp, &lastdiff); + err = 1; + break; + } + if (dist1->type != dist2->type + || dist1->nbobjs != dist2->nbobjs + || dist1->kind != dist2->kind + || memcmp(dist1->values, dist2->values, dist1->nbobjs * dist1->nbobjs * sizeof(*dist1->values))) { + hwloc_append_diff_too_complex(hwloc_get_root_obj(topo1), diffp, &lastdiff); + err = 1; + break; + } + for(i=0; inbobjs; i++) + /* gp_index isn't enforced above. so compare logical_index instead, which is enforced. requires distances refresh() above */ + if (dist1->objs[i]->logical_index != dist2->objs[i]->logical_index) { + hwloc_append_diff_too_complex(hwloc_get_root_obj(topo1), diffp, &lastdiff); + err = 1; + break; + } + dist1 = dist1->next; + dist2 = dist2->next; + } + } + + return err; +} + +/******************** + * Applying diffs + */ + +static int +hwloc_apply_diff_one(hwloc_topology_t topology, + hwloc_topology_diff_t diff, + unsigned long flags) +{ + int reverse = !!(flags & HWLOC_TOPOLOGY_DIFF_APPLY_REVERSE); + + switch (diff->generic.type) { + case HWLOC_TOPOLOGY_DIFF_OBJ_ATTR: { + struct hwloc_topology_diff_obj_attr_s *obj_attr = &diff->obj_attr; + hwloc_obj_t obj = hwloc_get_obj_by_depth(topology, obj_attr->obj_depth, obj_attr->obj_index); + if (!obj) + return -1; + + switch (obj_attr->diff.generic.type) { + case HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_SIZE: { + hwloc_obj_t tmpobj; + hwloc_uint64_t oldvalue = reverse ? obj_attr->diff.uint64.newvalue : obj_attr->diff.uint64.oldvalue; + hwloc_uint64_t newvalue = reverse ? obj_attr->diff.uint64.oldvalue : obj_attr->diff.uint64.newvalue; + hwloc_uint64_t valuediff = newvalue - oldvalue; + if (obj->type != HWLOC_OBJ_NUMANODE) + return -1; + if (obj->attr->numanode.local_memory != oldvalue) + return -1; + obj->attr->numanode.local_memory = newvalue; + tmpobj = obj; + while (tmpobj) { + tmpobj->total_memory += valuediff; + tmpobj = tmpobj->parent; + } + break; + } + case HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_NAME: { + const char *oldvalue = reverse ? obj_attr->diff.string.newvalue : obj_attr->diff.string.oldvalue; + const char *newvalue = reverse ? obj_attr->diff.string.oldvalue : obj_attr->diff.string.newvalue; + if (!obj->name || strcmp(obj->name, oldvalue)) + return -1; + free(obj->name); + obj->name = strdup(newvalue); + break; + } + case HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_INFO: { + const char *name = obj_attr->diff.string.name; + const char *oldvalue = reverse ? obj_attr->diff.string.newvalue : obj_attr->diff.string.oldvalue; + const char *newvalue = reverse ? obj_attr->diff.string.oldvalue : obj_attr->diff.string.newvalue; + unsigned i; + int found = 0; + for(i=0; iinfos_count; i++) { + struct hwloc_info_s *info = &obj->infos[i]; + if (!strcmp(info->name, name) + && !strcmp(info->value, oldvalue)) { + free(info->value); + info->value = strdup(newvalue); + found = 1; + break; + } + } + if (!found) + return -1; + break; + } + default: + return -1; + } + + break; + } + default: + return -1; + } + + return 0; +} + +int hwloc_topology_diff_apply(hwloc_topology_t topology, + hwloc_topology_diff_t diff, + unsigned long flags) +{ + hwloc_topology_diff_t tmpdiff, tmpdiff2; + int err, nr; + + if (!topology->is_loaded) { + errno = EINVAL; + return -1; + } + + if (flags & ~HWLOC_TOPOLOGY_DIFF_APPLY_REVERSE) { + errno = EINVAL; + return -1; + } + + tmpdiff = diff; + nr = 0; + while (tmpdiff) { + nr++; + err = hwloc_apply_diff_one(topology, tmpdiff, flags); + if (err < 0) + goto cancel; + tmpdiff = tmpdiff->generic.next; + } + return 0; + +cancel: + tmpdiff2 = tmpdiff; + tmpdiff = diff; + while (tmpdiff != tmpdiff2) { + hwloc_apply_diff_one(topology, tmpdiff, flags ^ HWLOC_TOPOLOGY_DIFF_APPLY_REVERSE); + tmpdiff = tmpdiff->generic.next; + } + errno = EINVAL; + return -nr; /* return the index (starting at 1) of the first element that couldn't be applied */ +} diff --git a/src/3rdparty/hwloc/src/distances.c b/src/3rdparty/hwloc/src/distances.c new file mode 100644 index 00000000..f0b91f01 --- /dev/null +++ b/src/3rdparty/hwloc/src/distances.c @@ -0,0 +1,920 @@ +/* + * Copyright © 2010-2018 Inria. All rights reserved. + * Copyright © 2011-2012 Université Bordeaux + * Copyright © 2011 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +#include +#include +#include +#include +#include + +#include +#include + +/****************************************************** + * Global init, prepare, destroy, dup + */ + +/* called during topology init() */ +void hwloc_internal_distances_init(struct hwloc_topology *topology) +{ + topology->first_dist = topology->last_dist = NULL; + topology->next_dist_id = 0; +} + +/* called at the beginning of load() */ +void hwloc_internal_distances_prepare(struct hwloc_topology *topology) +{ + char *env; + hwloc_localeswitch_declare; + + topology->grouping = 1; + if (topology->type_filter[HWLOC_OBJ_GROUP] == HWLOC_TYPE_FILTER_KEEP_NONE) + topology->grouping = 0; + env = getenv("HWLOC_GROUPING"); + if (env && !atoi(env)) + topology->grouping = 0; + + if (topology->grouping) { + topology->grouping_next_subkind = 0; + + HWLOC_BUILD_ASSERT(sizeof(topology->grouping_accuracies)/sizeof(*topology->grouping_accuracies) == 5); + topology->grouping_accuracies[0] = 0.0f; + topology->grouping_accuracies[1] = 0.01f; + topology->grouping_accuracies[2] = 0.02f; + topology->grouping_accuracies[3] = 0.05f; + topology->grouping_accuracies[4] = 0.1f; + topology->grouping_nbaccuracies = 5; + + hwloc_localeswitch_init(); + env = getenv("HWLOC_GROUPING_ACCURACY"); + if (!env) { + /* only use 0.0 */ + topology->grouping_nbaccuracies = 1; + } else if (strcmp(env, "try")) { + /* use the given value */ + topology->grouping_nbaccuracies = 1; + topology->grouping_accuracies[0] = (float) atof(env); + } /* otherwise try all values */ + hwloc_localeswitch_fini(); + + topology->grouping_verbose = 0; + env = getenv("HWLOC_GROUPING_VERBOSE"); + if (env) + topology->grouping_verbose = atoi(env); + } +} + +static void hwloc_internal_distances_free(struct hwloc_internal_distances_s *dist) +{ + free(dist->indexes); + free(dist->objs); + free(dist->values); + free(dist); +} + +/* called during topology destroy */ +void hwloc_internal_distances_destroy(struct hwloc_topology * topology) +{ + struct hwloc_internal_distances_s *dist, *next = topology->first_dist; + while ((dist = next) != NULL) { + next = dist->next; + hwloc_internal_distances_free(dist); + } + topology->first_dist = topology->last_dist = NULL; +} + +static int hwloc_internal_distances_dup_one(struct hwloc_topology *new, struct hwloc_internal_distances_s *olddist) +{ + struct hwloc_tma *tma = new->tma; + struct hwloc_internal_distances_s *newdist; + unsigned nbobjs = olddist->nbobjs; + + newdist = hwloc_tma_malloc(tma, sizeof(*newdist)); + if (!newdist) + return -1; + + newdist->type = olddist->type; + newdist->nbobjs = nbobjs; + newdist->kind = olddist->kind; + newdist->id = olddist->id; + + newdist->indexes = hwloc_tma_malloc(tma, nbobjs * sizeof(*newdist->indexes)); + newdist->objs = hwloc_tma_calloc(tma, nbobjs * sizeof(*newdist->objs)); + newdist->objs_are_valid = 0; + newdist->values = hwloc_tma_malloc(tma, nbobjs*nbobjs * sizeof(*newdist->values)); + if (!newdist->indexes || !newdist->objs || !newdist->values) { + assert(!tma || !tma->dontfree); /* this tma cannot fail to allocate */ + hwloc_internal_distances_free(newdist); + return -1; + } + + memcpy(newdist->indexes, olddist->indexes, nbobjs * sizeof(*newdist->indexes)); + memcpy(newdist->values, olddist->values, nbobjs*nbobjs * sizeof(*newdist->values)); + + newdist->next = NULL; + newdist->prev = new->last_dist; + if (new->last_dist) + new->last_dist->next = newdist; + else + new->first_dist = newdist; + new->last_dist = newdist; + + return 0; +} + +/* This function may be called with topology->tma set, it cannot free() or realloc() */ +int hwloc_internal_distances_dup(struct hwloc_topology *new, struct hwloc_topology *old) +{ + struct hwloc_internal_distances_s *olddist; + int err; + new->next_dist_id = old->next_dist_id; + for(olddist = old->first_dist; olddist; olddist = olddist->next) { + err = hwloc_internal_distances_dup_one(new, olddist); + if (err < 0) + return err; + } + return 0; +} + +/****************************************************** + * Remove distances from the topology + */ + +int hwloc_distances_remove(hwloc_topology_t topology) +{ + if (!topology->is_loaded) { + errno = EINVAL; + return -1; + } + hwloc_internal_distances_destroy(topology); + return 0; +} + +int hwloc_distances_remove_by_depth(hwloc_topology_t topology, int depth) +{ + struct hwloc_internal_distances_s *dist, *next; + hwloc_obj_type_t type; + + if (!topology->is_loaded) { + errno = EINVAL; + return -1; + } + + /* switch back to types since we don't support groups for now */ + type = hwloc_get_depth_type(topology, depth); + if (type == (hwloc_obj_type_t)-1) { + errno = EINVAL; + return -1; + } + + next = topology->first_dist; + while ((dist = next) != NULL) { + next = dist->next; + if (dist->type == type) { + if (next) + next->prev = dist->prev; + else + topology->last_dist = dist->prev; + if (dist->prev) + dist->prev->next = dist->next; + else + topology->first_dist = dist->next; + hwloc_internal_distances_free(dist); + } + } + + return 0; +} + +/****************************************************** + * Add distances to the topology + */ + +static void +hwloc__groups_by_distances(struct hwloc_topology *topology, unsigned nbobjs, struct hwloc_obj **objs, uint64_t *values, unsigned long kind, unsigned nbaccuracies, float *accuracies, int needcheck); + +/* insert a distance matrix in the topology. + * the caller gives us the distances and objs pointers, we'll free them later. + */ +static int +hwloc_internal_distances__add(hwloc_topology_t topology, + hwloc_obj_type_t type, unsigned nbobjs, hwloc_obj_t *objs, uint64_t *indexes, uint64_t *values, + unsigned long kind) +{ + struct hwloc_internal_distances_s *dist = calloc(1, sizeof(*dist)); + if (!dist) + goto err; + + dist->type = type; + dist->nbobjs = nbobjs; + dist->kind = kind; + + if (!objs) { + assert(indexes); + /* we only have indexes, we'll refresh objs from there */ + dist->indexes = indexes; + dist->objs = calloc(nbobjs, sizeof(hwloc_obj_t)); + if (!dist->objs) + goto err_with_dist; + dist->objs_are_valid = 0; + + } else { + unsigned i; + assert(!indexes); + /* we only have objs, generate the indexes arrays so that we can refresh objs later */ + dist->objs = objs; + dist->objs_are_valid = 1; + dist->indexes = malloc(nbobjs * sizeof(*dist->indexes)); + if (!dist->indexes) + goto err_with_dist; + if (dist->type == HWLOC_OBJ_PU || dist->type == HWLOC_OBJ_NUMANODE) { + for(i=0; iindexes[i] = objs[i]->os_index; + } else { + for(i=0; iindexes[i] = objs[i]->gp_index; + } + } + + dist->values = values; + + dist->id = topology->next_dist_id++; + + if (topology->last_dist) + topology->last_dist->next = dist; + else + topology->first_dist = dist; + dist->prev = topology->last_dist; + dist->next = NULL; + topology->last_dist = dist; + return 0; + + err_with_dist: + free(dist); + err: + free(objs); + free(indexes); + free(values); + return -1; +} + +int hwloc_internal_distances_add_by_index(hwloc_topology_t topology, + hwloc_obj_type_t type, unsigned nbobjs, uint64_t *indexes, uint64_t *values, + unsigned long kind, unsigned long flags) +{ + if (nbobjs < 2) { + errno = EINVAL; + goto err; + } + + /* cannot group without objects, + * and we don't group from XML anyway since the hwloc that generated the XML should have grouped already. + */ + if (flags & HWLOC_DISTANCES_ADD_FLAG_GROUP) { + errno = EINVAL; + goto err; + } + + return hwloc_internal_distances__add(topology, type, nbobjs, NULL, indexes, values, kind); + + err: + free(indexes); + free(values); + return -1; +} + +int hwloc_internal_distances_add(hwloc_topology_t topology, + unsigned nbobjs, hwloc_obj_t *objs, uint64_t *values, + unsigned long kind, unsigned long flags) +{ + if (nbobjs < 2) { + errno = EINVAL; + goto err; + } + + if (topology->grouping && (flags & HWLOC_DISTANCES_ADD_FLAG_GROUP)) { + float full_accuracy = 0.f; + float *accuracies; + unsigned nbaccuracies; + + if (flags & HWLOC_DISTANCES_ADD_FLAG_GROUP_INACCURATE) { + accuracies = topology->grouping_accuracies; + nbaccuracies = topology->grouping_nbaccuracies; + } else { + accuracies = &full_accuracy; + nbaccuracies = 1; + } + + if (topology->grouping_verbose) { + unsigned i, j; + int gp = (objs[0]->type != HWLOC_OBJ_NUMANODE && objs[0]->type != HWLOC_OBJ_PU); + fprintf(stderr, "Trying to group objects using distance matrix:\n"); + fprintf(stderr, "%s", gp ? "gp_index" : "os_index"); + for(j=0; jgp_index : objs[j]->os_index)); + fprintf(stderr, "\n"); + for(i=0; igp_index : objs[i]->os_index)); + for(j=0; jtype, nbobjs, objs, NULL, values, kind); + + err: + free(objs); + free(values); + return -1; +} + +#define HWLOC_DISTANCES_KIND_FROM_ALL (HWLOC_DISTANCES_KIND_FROM_OS|HWLOC_DISTANCES_KIND_FROM_USER) +#define HWLOC_DISTANCES_KIND_MEANS_ALL (HWLOC_DISTANCES_KIND_MEANS_LATENCY|HWLOC_DISTANCES_KIND_MEANS_BANDWIDTH) +#define HWLOC_DISTANCES_KIND_ALL (HWLOC_DISTANCES_KIND_FROM_ALL|HWLOC_DISTANCES_KIND_MEANS_ALL) +#define HWLOC_DISTANCES_ADD_FLAG_ALL (HWLOC_DISTANCES_ADD_FLAG_GROUP|HWLOC_DISTANCES_ADD_FLAG_GROUP_INACCURATE) + +/* The actual function exported to the user + */ +int hwloc_distances_add(hwloc_topology_t topology, + unsigned nbobjs, hwloc_obj_t *objs, hwloc_uint64_t *values, + unsigned long kind, unsigned long flags) +{ + hwloc_obj_type_t type; + unsigned i; + uint64_t *_values; + hwloc_obj_t *_objs; + int err; + + if (nbobjs < 2 || !objs || !values || !topology->is_loaded) { + errno = EINVAL; + return -1; + } + if ((kind & ~HWLOC_DISTANCES_KIND_ALL) + || hwloc_weight_long(kind & HWLOC_DISTANCES_KIND_FROM_ALL) != 1 + || hwloc_weight_long(kind & HWLOC_DISTANCES_KIND_MEANS_ALL) != 1 + || (flags & ~HWLOC_DISTANCES_ADD_FLAG_ALL)) { + errno = EINVAL; + return -1; + } + + /* no strict need to check for duplicates, things shouldn't break */ + + type = objs[0]->type; + if (type == HWLOC_OBJ_GROUP) { + /* not supported yet, would require we save the subkind together with the type. */ + errno = EINVAL; + return -1; + } + + for(i=1; itype != type) { + errno = EINVAL; + return -1; + } + + /* copy the input arrays and give them to the topology */ + _objs = malloc(nbobjs*sizeof(hwloc_obj_t)); + _values = malloc(nbobjs*nbobjs*sizeof(*_values)); + if (!_objs || !_values) + goto out_with_arrays; + + memcpy(_objs, objs, nbobjs*sizeof(hwloc_obj_t)); + memcpy(_values, values, nbobjs*nbobjs*sizeof(*_values)); + err = hwloc_internal_distances_add(topology, nbobjs, _objs, _values, kind, flags); + if (err < 0) + goto out; /* _objs and _values freed in hwloc_internal_distances_add() */ + + /* in case we added some groups, see if we need to reconnect */ + hwloc_topology_reconnect(topology, 0); + + return 0; + + out_with_arrays: + free(_values); + free(_objs); + out: + return -1; +} + +/****************************************************** + * Refresh objects in distances + */ + +static hwloc_obj_t hwloc_find_obj_by_type_and_gp_index(hwloc_topology_t topology, hwloc_obj_type_t type, uint64_t gp_index) +{ + hwloc_obj_t obj = hwloc_get_obj_by_type(topology, type, 0); + while (obj) { + if (obj->gp_index == gp_index) + return obj; + obj = obj->next_cousin; + } + return NULL; +} + +static void +hwloc_internal_distances_restrict(struct hwloc_internal_distances_s *dist, + hwloc_obj_t *objs, + unsigned disappeared) +{ + unsigned nbobjs = dist->nbobjs; + unsigned i, newi; + unsigned j, newj; + + for(i=0, newi=0; ivalues[newi*(nbobjs-disappeared)+newj] = dist->values[i*nbobjs+j]; + newj++; + } + newi++; + } + + for(i=0, newi=0; iindexes[newi] = dist->indexes[i]; + newi++; + } + + dist->nbobjs -= disappeared; +} + +static int +hwloc_internal_distances_refresh_one(hwloc_topology_t topology, + struct hwloc_internal_distances_s *dist) +{ + hwloc_obj_type_t type = dist->type; + unsigned nbobjs = dist->nbobjs; + hwloc_obj_t *objs = dist->objs; + uint64_t *indexes = dist->indexes; + unsigned disappeared = 0; + unsigned i; + + if (dist->objs_are_valid) + return 0; + + for(i=0; iobjs_are_valid = 1; + return 0; +} + +/* This function may be called with topology->tma set, it cannot free() or realloc() */ +void +hwloc_internal_distances_refresh(hwloc_topology_t topology) +{ + struct hwloc_internal_distances_s *dist, *next; + + for(dist = topology->first_dist; dist; dist = next) { + next = dist->next; + + if (hwloc_internal_distances_refresh_one(topology, dist) < 0) { + assert(!topology->tma || !topology->tma->dontfree); /* this tma cannot fail to allocate */ + if (dist->prev) + dist->prev->next = next; + else + topology->first_dist = next; + if (next) + next->prev = dist->prev; + else + topology->last_dist = dist->prev; + hwloc_internal_distances_free(dist); + continue; + } + } +} + +void +hwloc_internal_distances_invalidate_cached_objs(hwloc_topology_t topology) +{ + struct hwloc_internal_distances_s *dist; + for(dist = topology->first_dist; dist; dist = dist->next) + dist->objs_are_valid = 0; +} + +/****************************************************** + * User API for getting distances + */ + +void +hwloc_distances_release(hwloc_topology_t topology __hwloc_attribute_unused, + struct hwloc_distances_s *distances) +{ + free(distances->values); + free(distances->objs); + free(distances); +} + +static struct hwloc_distances_s * +hwloc_distances_get_one(hwloc_topology_t topology __hwloc_attribute_unused, + struct hwloc_internal_distances_s *dist) +{ + struct hwloc_distances_s *distances; + unsigned nbobjs; + + distances = malloc(sizeof(*distances)); + if (!distances) + return NULL; + + nbobjs = distances->nbobjs = dist->nbobjs; + + distances->objs = malloc(nbobjs * sizeof(hwloc_obj_t)); + if (!distances->objs) + goto out; + memcpy(distances->objs, dist->objs, nbobjs * sizeof(hwloc_obj_t)); + + distances->values = malloc(nbobjs * nbobjs * sizeof(*distances->values)); + if (!distances->values) + goto out_with_objs; + memcpy(distances->values, dist->values, nbobjs*nbobjs*sizeof(*distances->values)); + + distances->kind = dist->kind; + return distances; + + out_with_objs: + free(distances->objs); + out: + free(distances); + return NULL; +} + +static int +hwloc__distances_get(hwloc_topology_t topology, + hwloc_obj_type_t type, + unsigned *nrp, struct hwloc_distances_s **distancesp, + unsigned long kind, unsigned long flags __hwloc_attribute_unused) +{ + struct hwloc_internal_distances_s *dist; + unsigned nr = 0, i; + + /* We could return the internal arrays (as const), + * but it would require to prevent removing distances between get() and free(). + * Not performance critical anyway. + */ + + if (flags) { + errno = EINVAL; + return -1; + } + + /* we could refresh only the distances that match, but we won't have many distances anyway, + * so performance is totally negligible. + * + * This is also useful in multithreaded apps that modify the topology. + * They can call any valid hwloc_distances_get() to force a refresh after + * changing the topology, so that future concurrent get() won't cause + * concurrent refresh(). + */ + hwloc_internal_distances_refresh(topology); + + for(dist = topology->first_dist; dist; dist = dist->next) { + unsigned long kind_from = kind & HWLOC_DISTANCES_KIND_FROM_ALL; + unsigned long kind_means = kind & HWLOC_DISTANCES_KIND_MEANS_ALL; + + if (type != HWLOC_OBJ_TYPE_NONE && type != dist->type) + continue; + + if (kind_from && !(kind_from & dist->kind)) + continue; + if (kind_means && !(kind_means & dist->kind)) + continue; + + if (nr < *nrp) { + struct hwloc_distances_s *distances = hwloc_distances_get_one(topology, dist); + if (!distances) + goto error; + distancesp[nr] = distances; + } + nr++; + } + + for(i=nr; i<*nrp; i++) + distancesp[i] = NULL; + *nrp = nr; + return 0; + + error: + for(i=0; iis_loaded) { + errno = EINVAL; + return -1; + } + + return hwloc__distances_get(topology, HWLOC_OBJ_TYPE_NONE, nrp, distancesp, kind, flags); +} + +int +hwloc_distances_get_by_depth(hwloc_topology_t topology, int depth, + unsigned *nrp, struct hwloc_distances_s **distancesp, + unsigned long kind, unsigned long flags) +{ + hwloc_obj_type_t type; + + if (flags || !topology->is_loaded) { + errno = EINVAL; + return -1; + } + + /* switch back to types since we don't support groups for now */ + type = hwloc_get_depth_type(topology, depth); + if (type == (hwloc_obj_type_t)-1) { + errno = EINVAL; + return -1; + } + + return hwloc__distances_get(topology, type, nrp, distancesp, kind, flags); +} + +/****************************************************** + * Grouping objects according to distances + */ + +static void hwloc_report_user_distance_error(const char *msg, int line) +{ + static int reported = 0; + + if (!reported && !hwloc_hide_errors()) { + fprintf(stderr, "****************************************************************************\n"); + fprintf(stderr, "* hwloc %s was given invalid distances by the user.\n", HWLOC_VERSION); + fprintf(stderr, "*\n"); + fprintf(stderr, "* %s\n", msg); + fprintf(stderr, "* Error occurred in topology.c line %d\n", line); + fprintf(stderr, "*\n"); + fprintf(stderr, "* Please make sure that distances given through the programming API\n"); + fprintf(stderr, "* do not contradict any other topology information.\n"); + fprintf(stderr, "* \n"); + fprintf(stderr, "* hwloc will now ignore this invalid topology information and continue.\n"); + fprintf(stderr, "****************************************************************************\n"); + reported = 1; + } +} + +static int hwloc_compare_values(uint64_t a, uint64_t b, float accuracy) +{ + if (accuracy != 0.0f && fabsf((float)a-(float)b) < (float)a * accuracy) + return 0; + return a < b ? -1 : a == b ? 0 : 1; +} + +/* + * Place objects in groups if they are in a transitive graph of minimal values. + * Return how many groups were created, or 0 if some incomplete distance graphs were found. + */ +static unsigned +hwloc__find_groups_by_min_distance(unsigned nbobjs, + uint64_t *_values, + float accuracy, + unsigned *groupids, + int verbose) +{ + uint64_t min_distance = UINT64_MAX; + unsigned groupid = 1; + unsigned i,j,k; + unsigned skipped = 0; + +#define VALUE(i, j) _values[(i) * nbobjs + (j)] + + memset(groupids, 0, nbobjs*sizeof(*groupids)); + + /* find the minimal distance */ + for(i=0; igrouping_verbose; + + if (nbobjs <= 2) + return; + + if (!(kind & HWLOC_DISTANCES_KIND_MEANS_LATENCY)) + /* don't know use to use those for grouping */ + /* TODO hwloc__find_groups_by_max_distance() for bandwidth */ + return; + + for(i=0; itype), accuracies[i]); + if (needcheck && hwloc__check_grouping_matrix(nbobjs, _values, accuracies[i], verbose) < 0) + continue; + nbgroups = hwloc__find_groups_by_min_distance(nbobjs, _values, accuracies[i], groupids, verbose); + if (nbgroups) + break; + } + if (!nbgroups) + return; + + { + HWLOC_VLA(hwloc_obj_t, groupobjs, nbgroups); + HWLOC_VLA(unsigned, groupsizes, nbgroups); + HWLOC_VLA(uint64_t, groupvalues, nbgroups*nbgroups); + unsigned failed = 0; + + /* create new Group objects and record their size */ + memset(&(groupsizes[0]), 0, sizeof(groupsizes[0]) * nbgroups); + for(i=0; icpuset = hwloc_bitmap_alloc(); + group_obj->attr->group.kind = HWLOC_GROUP_KIND_DISTANCE; + group_obj->attr->group.subkind = topology->grouping_next_subkind; + for (j=0; jcpuset); + res_obj = hwloc__insert_object_by_cpuset(topology, NULL, group_obj, + (kind & HWLOC_DISTANCES_KIND_FROM_USER) ? hwloc_report_user_distance_error : hwloc_report_os_error); + /* res_obj may be NULL on failure to insert. */ + if (!res_obj) + failed++; + /* or it may be different from groupobjs if we got groups from XML import before grouping */ + groupobjs[i] = res_obj; + } + topology->grouping_next_subkind++; + + if (failed) + /* don't try to group above if we got a NULL group here, just keep this incomplete level */ + return; + + /* factorize values */ + memset(&(groupvalues[0]), 0, sizeof(groupvalues[0]) * nbgroups * nbgroups); +#undef VALUE +#define VALUE(i, j) _values[(i) * nbobjs + (j)] +#define GROUP_VALUE(i, j) groupvalues[(i) * nbgroups + (j)] + for(i=0; i +#include +#include + +#include +#ifdef HAVE_SYS_UTSNAME_H +#include +#endif +#include +#include +#include +#include +#include + +#ifdef HAVE_PROGRAM_INVOCATION_NAME +#include +extern char *program_invocation_name; +#endif +#ifdef HAVE___PROGNAME +extern char *__progname; +#endif + +int hwloc_snprintf(char *str, size_t size, const char *format, ...) +{ + int ret; + va_list ap; + static char bin; + size_t fakesize; + char *fakestr; + + /* Some systems crash on str == NULL */ + if (!size) { + str = &bin; + size = 1; + } + + va_start(ap, format); + ret = vsnprintf(str, size, format, ap); + va_end(ap); + + if (ret >= 0 && (size_t) ret != size-1) + return ret; + + /* vsnprintf returned size-1 or -1. That could be a system which reports the + * written data and not the actually required room. Try increasing buffer + * size to get the latter. */ + + fakesize = size; + fakestr = NULL; + do { + fakesize *= 2; + free(fakestr); + fakestr = malloc(fakesize); + if (NULL == fakestr) + return -1; + va_start(ap, format); + errno = 0; + ret = vsnprintf(fakestr, fakesize, format, ap); + va_end(ap); + } while ((size_t) ret == fakesize-1 || (ret < 0 && (!errno || errno == ERANGE))); + + if (ret >= 0 && size) { + if (size > (size_t) ret+1) + size = ret+1; + memcpy(str, fakestr, size-1); + str[size-1] = 0; + } + free(fakestr); + + return ret; +} + +int hwloc_namecoloncmp(const char *haystack, const char *needle, size_t n) +{ + size_t i = 0; + while (*haystack && *haystack != ':') { + int ha = *haystack++; + int low_h = tolower(ha); + int ne = *needle++; + int low_n = tolower(ne); + if (low_h != low_n) + return 1; + i++; + } + return i < n; +} + +void hwloc_add_uname_info(struct hwloc_topology *topology __hwloc_attribute_unused, + void *cached_uname __hwloc_attribute_unused) +{ +#ifdef HAVE_UNAME + struct utsname _utsname, *utsname; + + if (hwloc_obj_get_info_by_name(topology->levels[0][0], "OSName")) + /* don't annotate twice */ + return; + + if (cached_uname) + utsname = (struct utsname *) cached_uname; + else { + utsname = &_utsname; + if (uname(utsname) < 0) + return; + } + + if (*utsname->sysname) + hwloc_obj_add_info(topology->levels[0][0], "OSName", utsname->sysname); + if (*utsname->release) + hwloc_obj_add_info(topology->levels[0][0], "OSRelease", utsname->release); + if (*utsname->version) + hwloc_obj_add_info(topology->levels[0][0], "OSVersion", utsname->version); + if (*utsname->nodename) + hwloc_obj_add_info(topology->levels[0][0], "HostName", utsname->nodename); + if (*utsname->machine) + hwloc_obj_add_info(topology->levels[0][0], "Architecture", utsname->machine); +#endif /* HAVE_UNAME */ +} + +char * +hwloc_progname(struct hwloc_topology *topology __hwloc_attribute_unused) +{ +#if HAVE_DECL_GETMODULEFILENAME + char name[256], *local_basename; + unsigned res = GetModuleFileName(NULL, name, sizeof(name)); + if (res == sizeof(name) || !res) + return NULL; + local_basename = strrchr(name, '\\'); + if (!local_basename) + local_basename = name; + else + local_basename++; + return strdup(local_basename); +#else /* !HAVE_GETMODULEFILENAME */ + const char *name, *local_basename; +#if HAVE_DECL_GETPROGNAME + name = getprogname(); /* FreeBSD, NetBSD, some Solaris */ +#elif HAVE_DECL_GETEXECNAME + name = getexecname(); /* Solaris */ +#elif defined HAVE_PROGRAM_INVOCATION_NAME + name = program_invocation_name; /* Glibc. BGQ CNK. */ + /* could use program_invocation_short_name directly, but we have the code to remove the path below anyway */ +#elif defined HAVE___PROGNAME + name = __progname; /* fallback for most unix, used for OpenBSD */ +#else + /* TODO: _NSGetExecutablePath(path, &size) on Darwin */ + /* TODO: AIX, HPUX */ + name = NULL; +#endif + if (!name) + return NULL; + local_basename = strrchr(name, '/'); + if (!local_basename) + local_basename = name; + else + local_basename++; + return strdup(local_basename); +#endif /* !HAVE_GETMODULEFILENAME */ +} diff --git a/src/3rdparty/hwloc/src/pci-common.c b/src/3rdparty/hwloc/src/pci-common.c new file mode 100644 index 00000000..00f08a9e --- /dev/null +++ b/src/3rdparty/hwloc/src/pci-common.c @@ -0,0 +1,941 @@ +/* + * Copyright © 2009-2018 Inria. All rights reserved. + * See COPYING in top-level directory. + */ + +#include +#include +#include +#include +#include +#include + +#include +#ifdef HAVE_UNISTD_H +#include +#endif +#include + +#if defined(HWLOC_WIN_SYS) && !defined(__CYGWIN__) +#include +#define open _open +#define read _read +#define close _close +#endif + +static void +hwloc_pci_forced_locality_parse_one(struct hwloc_topology *topology, + const char *string /* must contain a ' ' */, + unsigned *allocated) +{ + unsigned nr = topology->pci_forced_locality_nr; + unsigned domain, bus_first, bus_last, dummy; + hwloc_bitmap_t set; + char *tmp; + + if (sscanf(string, "%x:%x-%x %x", &domain, &bus_first, &bus_last, &dummy) == 4) { + /* fine */ + } else if (sscanf(string, "%x:%x %x", &domain, &bus_first, &dummy) == 3) { + bus_last = bus_first; + } else if (sscanf(string, "%x %x", &domain, &dummy) == 2) { + bus_first = 0; + bus_last = 255; + } else + return; + + tmp = strchr(string, ' '); + if (!tmp) + return; + tmp++; + + set = hwloc_bitmap_alloc(); + hwloc_bitmap_sscanf(set, tmp); + + if (!*allocated) { + topology->pci_forced_locality = malloc(sizeof(*topology->pci_forced_locality)); + if (!topology->pci_forced_locality) + goto out_with_set; /* failed to allocate, ignore this forced locality */ + *allocated = 1; + } else if (nr >= *allocated) { + struct hwloc_pci_forced_locality_s *tmplocs; + tmplocs = realloc(topology->pci_forced_locality, + 2 * *allocated * sizeof(*topology->pci_forced_locality)); + if (!tmplocs) + goto out_with_set; /* failed to allocate, ignore this forced locality */ + topology->pci_forced_locality = tmplocs; + *allocated *= 2; + } + + topology->pci_forced_locality[nr].domain = domain; + topology->pci_forced_locality[nr].bus_first = bus_first; + topology->pci_forced_locality[nr].bus_last = bus_last; + topology->pci_forced_locality[nr].cpuset = set; + topology->pci_forced_locality_nr++; + return; + + out_with_set: + hwloc_bitmap_free(set); + return; +} + +static void +hwloc_pci_forced_locality_parse(struct hwloc_topology *topology, const char *_env) +{ + char *env = strdup(_env); + unsigned allocated = 0; + char *tmp = env; + + while (1) { + size_t len = strcspn(tmp, ";\r\n"); + char *next = NULL; + + if (tmp[len] != '\0') { + tmp[len] = '\0'; + if (tmp[len+1] != '\0') + next = &tmp[len]+1; + } + + hwloc_pci_forced_locality_parse_one(topology, tmp, &allocated); + + if (next) + tmp = next; + else + break; + } + + free(env); +} + +void +hwloc_pci_discovery_init(struct hwloc_topology *topology) +{ + topology->need_pci_belowroot_apply_locality = 0; + + topology->pci_has_forced_locality = 0; + topology->pci_forced_locality_nr = 0; + topology->pci_forced_locality = NULL; +} + +void +hwloc_pci_discovery_prepare(struct hwloc_topology *topology) +{ + char *env; + + env = getenv("HWLOC_PCI_LOCALITY"); + if (env) { + int fd; + + topology->pci_has_forced_locality = 1; + + fd = open(env, O_RDONLY); + if (fd >= 0) { + struct stat st; + char *buffer; + int err = fstat(fd, &st); + if (!err) { + if (st.st_size <= 64*1024) { /* random limit large enough to store multiple cpusets for thousands of PUs */ + buffer = malloc(st.st_size+1); + if (read(fd, buffer, st.st_size) == st.st_size) { + buffer[st.st_size] = '\0'; + hwloc_pci_forced_locality_parse(topology, buffer); + } + free(buffer); + } else { + fprintf(stderr, "Ignoring HWLOC_PCI_LOCALITY file `%s' too large (%lu bytes)\n", + env, (unsigned long) st.st_size); + } + } + close(fd); + } else + hwloc_pci_forced_locality_parse(topology, env); + } +} + +void +hwloc_pci_discovery_exit(struct hwloc_topology *topology __hwloc_attribute_unused) +{ + unsigned i; + for(i=0; ipci_forced_locality_nr; i++) + hwloc_bitmap_free(topology->pci_forced_locality[i].cpuset); + free(topology->pci_forced_locality); + + hwloc_pci_discovery_init(topology); +} + +#ifdef HWLOC_DEBUG +static void +hwloc_pci_traverse_print_cb(void * cbdata __hwloc_attribute_unused, + struct hwloc_obj *pcidev) +{ + char busid[14]; + hwloc_obj_t parent; + + /* indent */ + parent = pcidev->parent; + while (parent) { + hwloc_debug("%s", " "); + parent = parent->parent; + } + + snprintf(busid, sizeof(busid), "%04x:%02x:%02x.%01x", + pcidev->attr->pcidev.domain, pcidev->attr->pcidev.bus, pcidev->attr->pcidev.dev, pcidev->attr->pcidev.func); + + if (pcidev->type == HWLOC_OBJ_BRIDGE) { + if (pcidev->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_HOST) + hwloc_debug("HostBridge"); + else + hwloc_debug("%s Bridge [%04x:%04x]", busid, + pcidev->attr->pcidev.vendor_id, pcidev->attr->pcidev.device_id); + hwloc_debug(" to %04x:[%02x:%02x]\n", + pcidev->attr->bridge.downstream.pci.domain, pcidev->attr->bridge.downstream.pci.secondary_bus, pcidev->attr->bridge.downstream.pci.subordinate_bus); + } else + hwloc_debug("%s Device [%04x:%04x (%04x:%04x) rev=%02x class=%04x]\n", busid, + pcidev->attr->pcidev.vendor_id, pcidev->attr->pcidev.device_id, + pcidev->attr->pcidev.subvendor_id, pcidev->attr->pcidev.subdevice_id, + pcidev->attr->pcidev.revision, pcidev->attr->pcidev.class_id); +} + +static void +hwloc_pci_traverse(void * cbdata, struct hwloc_obj *tree, + void (*cb)(void * cbdata, struct hwloc_obj *)) +{ + hwloc_obj_t child; + cb(cbdata, tree); + for_each_io_child(child, tree) { + if (child->type == HWLOC_OBJ_BRIDGE) + hwloc_pci_traverse(cbdata, child, cb); + } +} +#endif /* HWLOC_DEBUG */ + +enum hwloc_pci_busid_comparison_e { + HWLOC_PCI_BUSID_LOWER, + HWLOC_PCI_BUSID_HIGHER, + HWLOC_PCI_BUSID_INCLUDED, + HWLOC_PCI_BUSID_SUPERSET +}; + +static enum hwloc_pci_busid_comparison_e +hwloc_pci_compare_busids(struct hwloc_obj *a, struct hwloc_obj *b) +{ +#ifdef HWLOC_DEBUG + if (a->type == HWLOC_OBJ_BRIDGE) + assert(a->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_PCI); + if (b->type == HWLOC_OBJ_BRIDGE) + assert(b->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_PCI); +#endif + + if (a->attr->pcidev.domain < b->attr->pcidev.domain) + return HWLOC_PCI_BUSID_LOWER; + if (a->attr->pcidev.domain > b->attr->pcidev.domain) + return HWLOC_PCI_BUSID_HIGHER; + + if (a->type == HWLOC_OBJ_BRIDGE + && b->attr->pcidev.bus >= a->attr->bridge.downstream.pci.secondary_bus + && b->attr->pcidev.bus <= a->attr->bridge.downstream.pci.subordinate_bus) + return HWLOC_PCI_BUSID_SUPERSET; + if (b->type == HWLOC_OBJ_BRIDGE + && a->attr->pcidev.bus >= b->attr->bridge.downstream.pci.secondary_bus + && a->attr->pcidev.bus <= b->attr->bridge.downstream.pci.subordinate_bus) + return HWLOC_PCI_BUSID_INCLUDED; + + if (a->attr->pcidev.bus < b->attr->pcidev.bus) + return HWLOC_PCI_BUSID_LOWER; + if (a->attr->pcidev.bus > b->attr->pcidev.bus) + return HWLOC_PCI_BUSID_HIGHER; + + if (a->attr->pcidev.dev < b->attr->pcidev.dev) + return HWLOC_PCI_BUSID_LOWER; + if (a->attr->pcidev.dev > b->attr->pcidev.dev) + return HWLOC_PCI_BUSID_HIGHER; + + if (a->attr->pcidev.func < b->attr->pcidev.func) + return HWLOC_PCI_BUSID_LOWER; + if (a->attr->pcidev.func > b->attr->pcidev.func) + return HWLOC_PCI_BUSID_HIGHER; + + /* Should never reach here. Abort on both debug builds and + non-debug builds */ + assert(0); + fprintf(stderr, "Bad assertion in hwloc %s:%d (aborting)\n", __FILE__, __LINE__); + exit(1); +} + +static void +hwloc_pci_add_object(struct hwloc_obj *parent, struct hwloc_obj **parent_io_first_child_p, struct hwloc_obj *new) +{ + struct hwloc_obj **curp, **childp; + + curp = parent_io_first_child_p; + while (*curp) { + enum hwloc_pci_busid_comparison_e comp = hwloc_pci_compare_busids(new, *curp); + switch (comp) { + case HWLOC_PCI_BUSID_HIGHER: + /* go further */ + curp = &(*curp)->next_sibling; + continue; + case HWLOC_PCI_BUSID_INCLUDED: + /* insert new below current bridge */ + hwloc_pci_add_object(*curp, &(*curp)->io_first_child, new); + return; + case HWLOC_PCI_BUSID_LOWER: + case HWLOC_PCI_BUSID_SUPERSET: { + /* insert new before current */ + new->next_sibling = *curp; + *curp = new; + new->parent = parent; + if (new->type == HWLOC_OBJ_BRIDGE) { + /* look at remaining siblings and move some below new */ + childp = &new->io_first_child; + curp = &new->next_sibling; + while (*curp) { + hwloc_obj_t cur = *curp; + if (hwloc_pci_compare_busids(new, cur) == HWLOC_PCI_BUSID_LOWER) { + /* this sibling remains under root, after new. */ + if (cur->attr->pcidev.domain > new->attr->pcidev.domain + || cur->attr->pcidev.bus > new->attr->bridge.downstream.pci.subordinate_bus) + /* this sibling is even above new's subordinate bus, no other sibling could go below new */ + return; + curp = &cur->next_sibling; + } else { + /* this sibling goes under new */ + *childp = cur; + *curp = cur->next_sibling; + (*childp)->parent = new; + (*childp)->next_sibling = NULL; + childp = &(*childp)->next_sibling; + } + } + } + return; + } + } + } + /* add to the end of the list if higher than everybody */ + new->parent = parent; + new->next_sibling = NULL; + *curp = new; +} + +void +hwloc_pcidisc_tree_insert_by_busid(struct hwloc_obj **treep, + struct hwloc_obj *obj) +{ + hwloc_pci_add_object(NULL /* no parent on top of tree */, treep, obj); +} + +int +hwloc_pcidisc_tree_attach(struct hwloc_topology *topology, struct hwloc_obj *old_tree) +{ + struct hwloc_obj **next_hb_p; + enum hwloc_type_filter_e bfilter; + + if (!old_tree) + /* found nothing, exit */ + return 0; + +#ifdef HWLOC_DEBUG + hwloc_debug("%s", "\nPCI hierarchy:\n"); + hwloc_pci_traverse(NULL, old_tree, hwloc_pci_traverse_print_cb); + hwloc_debug("%s", "\n"); +#endif + + next_hb_p = &hwloc_get_root_obj(topology)->io_first_child; + while (*next_hb_p) + next_hb_p = &((*next_hb_p)->next_sibling); + + bfilter = topology->type_filter[HWLOC_OBJ_BRIDGE]; + if (bfilter == HWLOC_TYPE_FILTER_KEEP_NONE) { + *next_hb_p = old_tree; + topology->modified = 1; + goto done; + } + + /* + * tree points to all objects connected to any upstream bus in the machine. + * We now create one real hostbridge object per upstream bus. + * It's not actually a PCI device so we have to create it. + */ + while (old_tree) { + /* start a new host bridge */ + struct hwloc_obj *hostbridge = hwloc_alloc_setup_object(topology, HWLOC_OBJ_BRIDGE, HWLOC_UNKNOWN_INDEX); + struct hwloc_obj **dstnextp = &hostbridge->io_first_child; + struct hwloc_obj **srcnextp = &old_tree; + struct hwloc_obj *child = *srcnextp; + unsigned short current_domain = child->attr->pcidev.domain; + unsigned char current_bus = child->attr->pcidev.bus; + unsigned char current_subordinate = current_bus; + + hwloc_debug("Starting new PCI hostbridge %04x:%02x\n", current_domain, current_bus); + + next_child: + /* remove next child from tree */ + *srcnextp = child->next_sibling; + /* append it to hostbridge */ + *dstnextp = child; + child->parent = hostbridge; + child->next_sibling = NULL; + dstnextp = &child->next_sibling; + + /* compute hostbridge secondary/subordinate buses */ + if (child->type == HWLOC_OBJ_BRIDGE + && child->attr->bridge.downstream.pci.subordinate_bus > current_subordinate) + current_subordinate = child->attr->bridge.downstream.pci.subordinate_bus; + + /* use next child if it has the same domains/bus */ + child = *srcnextp; + if (child + && child->attr->pcidev.domain == current_domain + && child->attr->pcidev.bus == current_bus) + goto next_child; + + /* finish setting up this hostbridge */ + hostbridge->attr->bridge.upstream_type = HWLOC_OBJ_BRIDGE_HOST; + hostbridge->attr->bridge.downstream_type = HWLOC_OBJ_BRIDGE_PCI; + hostbridge->attr->bridge.downstream.pci.domain = current_domain; + hostbridge->attr->bridge.downstream.pci.secondary_bus = current_bus; + hostbridge->attr->bridge.downstream.pci.subordinate_bus = current_subordinate; + hwloc_debug("New PCI hostbridge %04x:[%02x-%02x]\n", + current_domain, current_bus, current_subordinate); + + *next_hb_p = hostbridge; + next_hb_p = &hostbridge->next_sibling; + topology->modified = 1; /* needed in case somebody reconnects levels before the core calls hwloc_pci_belowroot_apply_locality() + * or if hwloc_pci_belowroot_apply_locality() keeps hostbridges below root. + */ + } + + done: + topology->need_pci_belowroot_apply_locality = 1; + return 0; +} + +static struct hwloc_obj * +hwloc_pci_fixup_busid_parent(struct hwloc_topology *topology __hwloc_attribute_unused, + struct hwloc_pcidev_attr_s *busid, + struct hwloc_obj *parent) +{ + /* Xeon E5v3 in cluster-on-die mode only have PCI on the first NUMA node of each package. + * but many dual-processor host report the second PCI hierarchy on 2nd NUMA of first package. + */ + if (parent->depth >= 2 + && parent->type == HWLOC_OBJ_NUMANODE + && parent->sibling_rank == 1 && parent->parent->arity == 2 + && parent->parent->type == HWLOC_OBJ_PACKAGE + && parent->parent->sibling_rank == 0 && parent->parent->parent->arity == 2) { + const char *cpumodel = hwloc_obj_get_info_by_name(parent->parent, "CPUModel"); + if (cpumodel && strstr(cpumodel, "Xeon")) { + if (!hwloc_hide_errors()) { + fprintf(stderr, "****************************************************************************\n"); + fprintf(stderr, "* hwloc %s has encountered an incorrect PCI locality information.\n", HWLOC_VERSION); + fprintf(stderr, "* PCI bus %04x:%02x is supposedly close to 2nd NUMA node of 1st package,\n", + busid->domain, busid->bus); + fprintf(stderr, "* however hwloc believes this is impossible on this architecture.\n"); + fprintf(stderr, "* Therefore the PCI bus will be moved to 1st NUMA node of 2nd package.\n"); + fprintf(stderr, "*\n"); + fprintf(stderr, "* If you feel this fixup is wrong, disable it by setting in your environment\n"); + fprintf(stderr, "* HWLOC_PCI_%04x_%02x_LOCALCPUS= (empty value), and report the problem\n", + busid->domain, busid->bus); + fprintf(stderr, "* to the hwloc's user mailing list together with the XML output of lstopo.\n"); + fprintf(stderr, "*\n"); + fprintf(stderr, "* You may silence this message by setting HWLOC_HIDE_ERRORS=1 in your environment.\n"); + fprintf(stderr, "****************************************************************************\n"); + } + return parent->parent->next_sibling->first_child; + } + } + + return parent; +} + +static struct hwloc_obj * +hwloc__pci_find_busid_parent(struct hwloc_topology *topology, struct hwloc_pcidev_attr_s *busid) +{ + hwloc_bitmap_t cpuset = hwloc_bitmap_alloc(); + hwloc_obj_t parent; + int forced = 0; + int noquirks = 0; + unsigned i; + int err; + + /* try to match a forced locality */ + if (topology->pci_has_forced_locality) { + for(i=0; ipci_forced_locality_nr; i++) { + if (busid->domain == topology->pci_forced_locality[i].domain + && busid->bus >= topology->pci_forced_locality[i].bus_first + && busid->bus <= topology->pci_forced_locality[i].bus_last) { + hwloc_bitmap_copy(cpuset, topology->pci_forced_locality[i].cpuset); + forced = 1; + break; + } + } + /* if pci locality was forced, even empty, don't let quirks change what the OS reports */ + noquirks = 1; + } + + /* deprecated force locality variables */ + if (!forced) { + const char *env; + char envname[256]; + /* override the cpuset with the environment if given */ + snprintf(envname, sizeof(envname), "HWLOC_PCI_%04x_%02x_LOCALCPUS", + busid->domain, busid->bus); + env = getenv(envname); + if (env) { + static int reported = 0; + if (!topology->pci_has_forced_locality && !reported) { + fprintf(stderr, "Environment variable %s is deprecated, please use HWLOC_PCI_LOCALITY instead.\n", env); + reported = 1; + } + if (*env) { + /* force the cpuset */ + hwloc_debug("Overriding localcpus using %s in the environment\n", envname); + hwloc_bitmap_sscanf(cpuset, env); + forced = 1; + } + /* if env exists, even empty, don't let quirks change what the OS reports */ + noquirks = 1; + } + } + + if (!forced) { + /* get the cpuset by asking the OS backend. */ + struct hwloc_backend *backend = topology->get_pci_busid_cpuset_backend; + if (backend) + err = backend->get_pci_busid_cpuset(backend, busid, cpuset); + else + err = -1; + if (err < 0) + /* if we got nothing, assume this PCI bus is attached to the top of hierarchy */ + hwloc_bitmap_copy(cpuset, hwloc_topology_get_topology_cpuset(topology)); + } + + hwloc_debug_bitmap("Attaching PCI tree to cpuset %s\n", cpuset); + + parent = hwloc_find_insert_io_parent_by_complete_cpuset(topology, cpuset); + if (parent) { + if (!noquirks) + /* We found a valid parent. Check that the OS didn't report invalid locality */ + parent = hwloc_pci_fixup_busid_parent(topology, busid, parent); + } else { + /* Fallback to root */ + parent = hwloc_get_root_obj(topology); + } + + hwloc_bitmap_free(cpuset); + return parent; +} + +struct hwloc_obj * +hwloc_pcidisc_find_busid_parent(struct hwloc_topology *topology, + unsigned domain, unsigned bus, unsigned dev, unsigned func) +{ + struct hwloc_pcidev_attr_s busid; + busid.domain = domain; + busid.bus = bus; + busid.dev = dev; + busid.func = func; + return hwloc__pci_find_busid_parent(topology, &busid); +} + +int +hwloc_pci_belowroot_apply_locality(struct hwloc_topology *topology) +{ + struct hwloc_obj *root = hwloc_get_root_obj(topology); + struct hwloc_obj **listp, *obj; + + if (!topology->need_pci_belowroot_apply_locality) + return 0; + topology->need_pci_belowroot_apply_locality = 0; + + /* root->io_first_child contains some PCI hierarchies, any maybe some non-PCI things. + * insert the PCI trees according to their PCI-locality. + */ + listp = &root->io_first_child; + while ((obj = *listp) != NULL) { + struct hwloc_pcidev_attr_s *busid; + struct hwloc_obj *parent; + + /* skip non-PCI objects */ + if (obj->type != HWLOC_OBJ_PCI_DEVICE + && !(obj->type == HWLOC_OBJ_BRIDGE && obj->attr->bridge.downstream_type == HWLOC_OBJ_BRIDGE_PCI) + && !(obj->type == HWLOC_OBJ_BRIDGE && obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_PCI)) { + listp = &obj->next_sibling; + continue; + } + + if (obj->type == HWLOC_OBJ_PCI_DEVICE + || (obj->type == HWLOC_OBJ_BRIDGE + && obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_PCI)) + busid = &obj->attr->pcidev; + else { + /* hostbridges don't have a PCI busid for looking up locality, use their first child if PCI */ + hwloc_obj_t child = obj->io_first_child; + if (child && (child->type == HWLOC_OBJ_PCI_DEVICE + || (child->type == HWLOC_OBJ_BRIDGE + && child->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_PCI))) + busid = &obj->io_first_child->attr->pcidev; + else + continue; + } + + /* attach the object (and children) where it belongs */ + parent = hwloc__pci_find_busid_parent(topology, busid); + if (parent == root) { + /* keep this object here */ + listp = &obj->next_sibling; + } else { + /* dequeue this object */ + *listp = obj->next_sibling; + obj->next_sibling = NULL; + hwloc_insert_object_by_parent(topology, parent, obj); + } + } + + return 0; +} + +static struct hwloc_obj * +hwloc__pci_belowroot_find_by_busid(hwloc_obj_t parent, + unsigned domain, unsigned bus, unsigned dev, unsigned func) +{ + hwloc_obj_t child; + + for_each_io_child(child, parent) { + if (child->type == HWLOC_OBJ_PCI_DEVICE + || (child->type == HWLOC_OBJ_BRIDGE + && child->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_PCI)) { + if (child->attr->pcidev.domain == domain + && child->attr->pcidev.bus == bus + && child->attr->pcidev.dev == dev + && child->attr->pcidev.func == func) + /* that's the right bus id */ + return child; + if (child->attr->pcidev.domain > domain + || (child->attr->pcidev.domain == domain + && child->attr->pcidev.bus > bus)) + /* bus id too high, won't find anything later, return parent */ + return parent; + if (child->type == HWLOC_OBJ_BRIDGE + && child->attr->bridge.downstream_type == HWLOC_OBJ_BRIDGE_PCI + && child->attr->bridge.downstream.pci.domain == domain + && child->attr->bridge.downstream.pci.secondary_bus <= bus + && child->attr->bridge.downstream.pci.subordinate_bus >= bus) + /* not the right bus id, but it's included in the bus below that bridge */ + return hwloc__pci_belowroot_find_by_busid(child, domain, bus, dev, func); + + } else if (child->type == HWLOC_OBJ_BRIDGE + && child->attr->bridge.upstream_type != HWLOC_OBJ_BRIDGE_PCI + && child->attr->bridge.downstream_type == HWLOC_OBJ_BRIDGE_PCI + /* non-PCI to PCI bridge, just look at the subordinate bus */ + && child->attr->bridge.downstream.pci.domain == domain + && child->attr->bridge.downstream.pci.secondary_bus <= bus + && child->attr->bridge.downstream.pci.subordinate_bus >= bus) { + /* contains our bus, recurse */ + return hwloc__pci_belowroot_find_by_busid(child, domain, bus, dev, func); + } + } + /* didn't find anything, return parent */ + return parent; +} + +struct hwloc_obj * +hwloc_pcidisc_find_by_busid(struct hwloc_topology *topology, + unsigned domain, unsigned bus, unsigned dev, unsigned func) +{ + hwloc_obj_t root = hwloc_get_root_obj(topology); + hwloc_obj_t parent = hwloc__pci_belowroot_find_by_busid(root, domain, bus, dev, func); + if (parent == root) + return NULL; + else + return parent; +} + +#define HWLOC_PCI_STATUS 0x06 +#define HWLOC_PCI_STATUS_CAP_LIST 0x10 +#define HWLOC_PCI_CAPABILITY_LIST 0x34 +#define HWLOC_PCI_CAP_LIST_ID 0 +#define HWLOC_PCI_CAP_LIST_NEXT 1 + +unsigned +hwloc_pcidisc_find_cap(const unsigned char *config, unsigned cap) +{ + unsigned char seen[256] = { 0 }; + unsigned char ptr; /* unsigned char to make sure we stay within the 256-byte config space */ + + if (!(config[HWLOC_PCI_STATUS] & HWLOC_PCI_STATUS_CAP_LIST)) + return 0; + + for (ptr = config[HWLOC_PCI_CAPABILITY_LIST] & ~3; + ptr; /* exit if next is 0 */ + ptr = config[ptr + HWLOC_PCI_CAP_LIST_NEXT] & ~3) { + unsigned char id; + + /* Looped around! */ + if (seen[ptr]) + break; + seen[ptr] = 1; + + id = config[ptr + HWLOC_PCI_CAP_LIST_ID]; + if (id == cap) + return ptr; + if (id == 0xff) /* exit if id is 0 or 0xff */ + break; + } + return 0; +} + +#define HWLOC_PCI_EXP_LNKSTA 0x12 +#define HWLOC_PCI_EXP_LNKSTA_SPEED 0x000f +#define HWLOC_PCI_EXP_LNKSTA_WIDTH 0x03f0 + +int +hwloc_pcidisc_find_linkspeed(const unsigned char *config, + unsigned offset, float *linkspeed) +{ + unsigned linksta, speed, width; + float lanespeed; + + memcpy(&linksta, &config[offset + HWLOC_PCI_EXP_LNKSTA], 4); + speed = linksta & HWLOC_PCI_EXP_LNKSTA_SPEED; /* PCIe generation */ + width = (linksta & HWLOC_PCI_EXP_LNKSTA_WIDTH) >> 4; /* how many lanes */ + /* PCIe Gen1 = 2.5GT/s signal-rate per lane with 8/10 encoding = 0.25GB/s data-rate per lane + * PCIe Gen2 = 5 GT/s signal-rate per lane with 8/10 encoding = 0.5 GB/s data-rate per lane + * PCIe Gen3 = 8 GT/s signal-rate per lane with 128/130 encoding = 1 GB/s data-rate per lane + * PCIe Gen4 = 16 GT/s signal-rate per lane with 128/130 encoding = 2 GB/s data-rate per lane + */ + + /* lanespeed in Gbit/s */ + if (speed <= 2) + lanespeed = 2.5f * speed * 0.8f; + else + lanespeed = 8.0f * (1<<(speed-3)) * 128/130; /* assume Gen5 will be 32 GT/s and so on */ + + /* linkspeed in GB/s */ + *linkspeed = lanespeed * width / 8; + return 0; +} + +#define HWLOC_PCI_HEADER_TYPE 0x0e +#define HWLOC_PCI_HEADER_TYPE_BRIDGE 1 +#define HWLOC_PCI_CLASS_BRIDGE_PCI 0x0604 + +hwloc_obj_type_t +hwloc_pcidisc_check_bridge_type(unsigned device_class, const unsigned char *config) +{ + unsigned char headertype; + + if (device_class != HWLOC_PCI_CLASS_BRIDGE_PCI) + return HWLOC_OBJ_PCI_DEVICE; + + headertype = config[HWLOC_PCI_HEADER_TYPE] & 0x7f; + return (headertype == HWLOC_PCI_HEADER_TYPE_BRIDGE) + ? HWLOC_OBJ_BRIDGE : HWLOC_OBJ_PCI_DEVICE; +} + +#define HWLOC_PCI_PRIMARY_BUS 0x18 +#define HWLOC_PCI_SECONDARY_BUS 0x19 +#define HWLOC_PCI_SUBORDINATE_BUS 0x1a + +int +hwloc_pcidisc_setup_bridge_attr(hwloc_obj_t obj, + const unsigned char *config) +{ + struct hwloc_bridge_attr_s *battr = &obj->attr->bridge; + struct hwloc_pcidev_attr_s *pattr = &battr->upstream.pci; + + if (config[HWLOC_PCI_PRIMARY_BUS] != pattr->bus) { + /* Sometimes the config space contains 00 instead of the actual primary bus number. + * Always trust the bus ID because it was built by the system which has more information + * to workaround such problems (e.g. ACPI information about PCI parent/children). + */ + hwloc_debug(" %04x:%02x:%02x.%01x bridge with (ignored) invalid PCI_PRIMARY_BUS %02x\n", + pattr->domain, pattr->bus, pattr->dev, pattr->func, config[HWLOC_PCI_PRIMARY_BUS]); + } + + battr->upstream_type = HWLOC_OBJ_BRIDGE_PCI; + battr->downstream_type = HWLOC_OBJ_BRIDGE_PCI; + battr->downstream.pci.domain = pattr->domain; + battr->downstream.pci.secondary_bus = config[HWLOC_PCI_SECONDARY_BUS]; + battr->downstream.pci.subordinate_bus = config[HWLOC_PCI_SUBORDINATE_BUS]; + + if (battr->downstream.pci.secondary_bus <= pattr->bus + || battr->downstream.pci.subordinate_bus <= pattr->bus + || battr->downstream.pci.secondary_bus > battr->downstream.pci.subordinate_bus) { + /* This should catch most cases of invalid bridge information + * (e.g. 00 for secondary and subordinate). + * Ideally we would also check that [secondary-subordinate] is included + * in the parent bridge [secondary+1:subordinate]. But that's hard to do + * because objects may be discovered out of order (especially in the fsroot case). + */ + hwloc_debug(" %04x:%02x:%02x.%01x bridge has invalid secondary-subordinate buses [%02x-%02x]\n", + pattr->domain, pattr->bus, pattr->dev, pattr->func, + battr->downstream.pci.secondary_bus, battr->downstream.pci.subordinate_bus); + hwloc_free_unlinked_object(obj); + return -1; + } + + return 0; +} + +const char * +hwloc_pci_class_string(unsigned short class_id) +{ + /* See https://pci-ids.ucw.cz/read/PD/ */ + switch ((class_id & 0xff00) >> 8) { + case 0x00: + switch (class_id) { + case 0x0001: return "VGA"; + } + break; + case 0x01: + switch (class_id) { + case 0x0100: return "SCSI"; + case 0x0101: return "IDE"; + case 0x0102: return "Floppy"; + case 0x0103: return "IPI"; + case 0x0104: return "RAID"; + case 0x0105: return "ATA"; + case 0x0106: return "SATA"; + case 0x0107: return "SAS"; + case 0x0108: return "NVMExp"; + } + return "Storage"; + case 0x02: + switch (class_id) { + case 0x0200: return "Ethernet"; + case 0x0201: return "TokenRing"; + case 0x0202: return "FDDI"; + case 0x0203: return "ATM"; + case 0x0204: return "ISDN"; + case 0x0205: return "WorldFip"; + case 0x0206: return "PICMG"; + case 0x0207: return "InfiniBand"; + case 0x0208: return "Fabric"; + } + return "Network"; + case 0x03: + switch (class_id) { + case 0x0300: return "VGA"; + case 0x0301: return "XGA"; + case 0x0302: return "3D"; + } + return "Display"; + case 0x04: + switch (class_id) { + case 0x0400: return "MultimediaVideo"; + case 0x0401: return "MultimediaAudio"; + case 0x0402: return "Telephony"; + case 0x0403: return "AudioDevice"; + } + return "Multimedia"; + case 0x05: + switch (class_id) { + case 0x0500: return "RAM"; + case 0x0501: return "Flash"; + } + return "Memory"; + case 0x06: + switch (class_id) { + case 0x0600: return "HostBridge"; + case 0x0601: return "ISABridge"; + case 0x0602: return "EISABridge"; + case 0x0603: return "MicroChannelBridge"; + case 0x0604: return "PCIBridge"; + case 0x0605: return "PCMCIABridge"; + case 0x0606: return "NubusBridge"; + case 0x0607: return "CardBusBridge"; + case 0x0608: return "RACEwayBridge"; + case 0x0609: return "SemiTransparentPCIBridge"; + case 0x060a: return "InfiniBandPCIHostBridge"; + } + return "Bridge"; + case 0x07: + switch (class_id) { + case 0x0700: return "Serial"; + case 0x0701: return "Parallel"; + case 0x0702: return "MultiportSerial"; + case 0x0703: return "Model"; + case 0x0704: return "GPIB"; + case 0x0705: return "SmartCard"; + } + return "Communication"; + case 0x08: + switch (class_id) { + case 0x0800: return "PIC"; + case 0x0801: return "DMA"; + case 0x0802: return "Timer"; + case 0x0803: return "RTC"; + case 0x0804: return "PCIHotPlug"; + case 0x0805: return "SDHost"; + case 0x0806: return "IOMMU"; + } + return "SystemPeripheral"; + case 0x09: + switch (class_id) { + case 0x0900: return "Keyboard"; + case 0x0901: return "DigitizerPen"; + case 0x0902: return "Mouse"; + case 0x0903: return "Scanern"; + case 0x0904: return "Gameport"; + } + return "Input"; + case 0x0a: + return "DockingStation"; + case 0x0b: + switch (class_id) { + case 0x0b00: return "386"; + case 0x0b01: return "486"; + case 0x0b02: return "Pentium"; +/* 0x0b03 and 0x0b04 might be Pentium and P6 ? */ + case 0x0b10: return "Alpha"; + case 0x0b20: return "PowerPC"; + case 0x0b30: return "MIPS"; + case 0x0b40: return "Co-Processor"; + } + return "Processor"; + case 0x0c: + switch (class_id) { + case 0x0c00: return "FireWire"; + case 0x0c01: return "ACCESS"; + case 0x0c02: return "SSA"; + case 0x0c03: return "USB"; + case 0x0c04: return "FibreChannel"; + case 0x0c05: return "SMBus"; + case 0x0c06: return "InfiniBand"; + case 0x0c07: return "IPMI-SMIC"; + case 0x0c08: return "SERCOS"; + case 0x0c09: return "CANBUS"; + } + return "SerialBus"; + case 0x0d: + switch (class_id) { + case 0x0d00: return "IRDA"; + case 0x0d01: return "ConsumerIR"; + case 0x0d10: return "RF"; + case 0x0d11: return "Bluetooth"; + case 0x0d12: return "Broadband"; + case 0x0d20: return "802.1a"; + case 0x0d21: return "802.1b"; + } + return "Wireless"; + case 0x0e: + switch (class_id) { + case 0x0e00: return "I2O"; + } + return "Intelligent"; + case 0x0f: + return "Satellite"; + case 0x10: + return "Encryption"; + case 0x11: + return "SignalProcessing"; + case 0x12: + return "ProcessingAccelerator"; + case 0x13: + return "Instrumentation"; + case 0x40: + return "Co-Processor"; + } + return "Other"; +} diff --git a/src/3rdparty/hwloc/src/shmem.c b/src/3rdparty/hwloc/src/shmem.c new file mode 100644 index 00000000..6c507f52 --- /dev/null +++ b/src/3rdparty/hwloc/src/shmem.c @@ -0,0 +1,287 @@ +/* + * Copyright © 2017-2018 Inria. All rights reserved. + * See COPYING in top-level directory. + */ + +#include +#include +#include +#include + +#ifndef HWLOC_WIN_SYS + +#include +#ifdef HAVE_UNISTD_H +#include +#endif +#include + +#define HWLOC_SHMEM_HEADER_VERSION 1 + +struct hwloc_shmem_header { + uint32_t header_version; /* sanity check */ + uint32_t header_length; /* where the actual topology starts in the file/mapping */ + uint64_t mmap_address; /* virtual address to pass to mmap */ + uint64_t mmap_length; /* length to pass to mmap (includes the header) */ +}; + +#define HWLOC_SHMEM_MALLOC_ALIGN 8UL + +static void * +tma_shmem_malloc(struct hwloc_tma * tma, + size_t length) +{ + void *current = tma->data; + tma->data = (char*)tma->data + ((length + HWLOC_SHMEM_MALLOC_ALIGN - 1) & ~(HWLOC_SHMEM_MALLOC_ALIGN - 1)); + return current; + +} + +static void * +tma_get_length_malloc(struct hwloc_tma * tma, + size_t length) +{ + size_t *tma_length = tma->data; + *tma_length += (length + HWLOC_SHMEM_MALLOC_ALIGN - 1) & ~(HWLOC_SHMEM_MALLOC_ALIGN - 1); + return malloc(length); + +} + +int +hwloc_shmem_topology_get_length(hwloc_topology_t topology, + size_t *lengthp, + unsigned long flags) +{ + hwloc_topology_t new; + struct hwloc_tma tma; + size_t length = 0; + unsigned long pagesize = hwloc_getpagesize(); /* round-up to full page for mmap() */ + int err; + + if (flags) { + errno = EINVAL; + return -1; + } + + tma.malloc = tma_get_length_malloc; + tma.dontfree = 0; + tma.data = &length; + + err = hwloc__topology_dup(&new, topology, &tma); + if (err < 0) + return err; + hwloc_topology_destroy(new); + + *lengthp = (sizeof(struct hwloc_shmem_header) + length + pagesize - 1) & ~(pagesize - 1); + return 0; +} + +int +hwloc_shmem_topology_write(hwloc_topology_t topology, + int fd, hwloc_uint64_t fileoffset, + void *mmap_address, size_t length, + unsigned long flags) +{ + hwloc_topology_t new; + struct hwloc_tma tma; + struct hwloc_shmem_header header; + void *mmap_res; + int err; + + if (flags) { + errno = EINVAL; + return -1; + } + + /* refresh old topology distances so that we don't uselessly duplicate invalid distances + * without being able to free() them. + */ + hwloc_internal_distances_refresh(topology); + + header.header_version = HWLOC_SHMEM_HEADER_VERSION; + header.header_length = sizeof(header); + header.mmap_address = (uintptr_t) mmap_address; + header.mmap_length = length; + + err = lseek(fd, fileoffset, SEEK_SET); + if (err < 0) + return -1; + + err = write(fd, &header, sizeof(header)); + if (err != sizeof(header)) + return -1; + + err = ftruncate(fd, fileoffset + length); + if (err < 0) + return -1; + + mmap_res = mmap(mmap_address, length, PROT_READ|PROT_WRITE, MAP_SHARED, fd, fileoffset); + if (mmap_res == MAP_FAILED) + return -1; + if (mmap_res != mmap_address) { + munmap(mmap_res, length); + errno = EBUSY; + return -1; + } + + tma.malloc = tma_shmem_malloc; + tma.dontfree = 1; + tma.data = (char *)mmap_res + sizeof(header); + err = hwloc__topology_dup(&new, topology, &tma); + if (err < 0) + return err; + assert((char*)new == (char*)mmap_address + sizeof(header)); + + assert((char *)mmap_res <= (char *)mmap_address + length); + + /* now refresh the new distances so that adopters can use them without refreshing the R/O shmem mapping */ + hwloc_internal_distances_refresh(new); + + /* topology is saved, release resources now */ + munmap(mmap_address, length); + hwloc_components_fini(); + + return 0; +} + +int +hwloc_shmem_topology_adopt(hwloc_topology_t *topologyp, + int fd, hwloc_uint64_t fileoffset, + void *mmap_address, size_t length, + unsigned long flags) +{ + hwloc_topology_t new, old; + struct hwloc_shmem_header header; + void *mmap_res; + int err; + + if (flags) { + errno = EINVAL; + return -1; + } + + err = lseek(fd, fileoffset, SEEK_SET); + if (err < 0) + return -1; + + err = read(fd, &header, sizeof(header)); + if (err != sizeof(header)) + return -1; + + if (header.header_version != HWLOC_SHMEM_HEADER_VERSION + || header.header_length != sizeof(header) + || header.mmap_address != (uintptr_t) mmap_address + || header.mmap_length != length) { + errno = EINVAL; + return -1; + } + + mmap_res = mmap(mmap_address, length, PROT_READ, MAP_SHARED, fd, fileoffset); + if (mmap_res == MAP_FAILED) + return -1; + if (mmap_res != mmap_address) { + errno = EBUSY; + goto out_with_mmap; + } + + old = (hwloc_topology_t)((char*)mmap_address + sizeof(header)); + if (hwloc_topology_abi_check(old) < 0) { + errno = EINVAL; + goto out_with_mmap; + } + + /* enforced by dup() inside shmem_topology_write() */ + assert(old->is_loaded); + assert(old->backends == NULL); + assert(old->get_pci_busid_cpuset_backend == NULL); + + hwloc_components_init(); + + /* duplicate the topology object so that we ca change use local binding_hooks + * (those are likely not mapped at the same location in both processes). + */ + new = malloc(sizeof(struct hwloc_topology)); + if (!new) + goto out_with_components; + memcpy(new, old, sizeof(*old)); + new->tma = NULL; + new->adopted_shmem_addr = mmap_address; + new->adopted_shmem_length = length; + new->topology_abi = HWLOC_TOPOLOGY_ABI; + /* setting binding hooks will touch support arrays, so duplicate them too. + * could avoid that by requesting a R/W mmap + */ + new->support.discovery = malloc(sizeof(*new->support.discovery)); + new->support.cpubind = malloc(sizeof(*new->support.cpubind)); + new->support.membind = malloc(sizeof(*new->support.membind)); + memcpy(new->support.discovery, old->support.discovery, sizeof(*new->support.discovery)); + memcpy(new->support.cpubind, old->support.cpubind, sizeof(*new->support.cpubind)); + memcpy(new->support.membind, old->support.membind, sizeof(*new->support.membind)); + hwloc_set_binding_hooks(new); + /* clear userdata callbacks pointing to the writer process' functions */ + new->userdata_export_cb = NULL; + new->userdata_import_cb = NULL; + +#ifndef HWLOC_DEBUG + if (getenv("HWLOC_DEBUG_CHECK")) +#endif + hwloc_topology_check(new); + + *topologyp = new; + return 0; + + out_with_components: + hwloc_components_fini(); + out_with_mmap: + munmap(mmap_res, length); + return -1; +} + +void +hwloc__topology_disadopt(hwloc_topology_t topology) +{ + hwloc_components_fini(); + munmap(topology->adopted_shmem_addr, topology->adopted_shmem_length); + free(topology->support.discovery); + free(topology->support.cpubind); + free(topology->support.membind); + free(topology); +} + +#else /* HWLOC_WIN_SYS */ + +int +hwloc_shmem_topology_get_length(hwloc_topology_t topology __hwloc_attribute_unused, + size_t *lengthp __hwloc_attribute_unused, + unsigned long flags __hwloc_attribute_unused) +{ + errno = ENOSYS; + return -1; +} + +int +hwloc_shmem_topology_write(hwloc_topology_t topology __hwloc_attribute_unused, + int fd __hwloc_attribute_unused, hwloc_uint64_t fileoffset __hwloc_attribute_unused, + void *mmap_address __hwloc_attribute_unused, size_t length __hwloc_attribute_unused, + unsigned long flags __hwloc_attribute_unused) +{ + errno = ENOSYS; + return -1; +} + +int +hwloc_shmem_topology_adopt(hwloc_topology_t *topologyp __hwloc_attribute_unused, + int fd __hwloc_attribute_unused, hwloc_uint64_t fileoffset __hwloc_attribute_unused, + void *mmap_address __hwloc_attribute_unused, size_t length __hwloc_attribute_unused, + unsigned long flags __hwloc_attribute_unused) +{ + errno = ENOSYS; + return -1; +} + +void +hwloc__topology_disadopt(hwloc_topology_t topology __hwloc_attribute_unused) +{ +} + +#endif /* HWLOC_WIN_SYS */ diff --git a/src/3rdparty/hwloc/src/static-components.h b/src/3rdparty/hwloc/src/static-components.h new file mode 100644 index 00000000..dac227a6 --- /dev/null +++ b/src/3rdparty/hwloc/src/static-components.h @@ -0,0 +1,15 @@ +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_noos_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_xml_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_synthetic_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_xml_nolibxml_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_windows_component; +HWLOC_DECLSPEC extern const struct hwloc_component hwloc_x86_component; +static const struct hwloc_component * hwloc_static_components[] = { + &hwloc_noos_component, + &hwloc_xml_component, + &hwloc_synthetic_component, + &hwloc_xml_nolibxml_component, + &hwloc_windows_component, + &hwloc_x86_component, + NULL +}; diff --git a/src/3rdparty/hwloc/src/topology-noos.c b/src/3rdparty/hwloc/src/topology-noos.c new file mode 100644 index 00000000..77871eb1 --- /dev/null +++ b/src/3rdparty/hwloc/src/topology-noos.c @@ -0,0 +1,65 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2017 Inria. All rights reserved. + * Copyright © 2009-2012 Université Bordeaux + * Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +#include +#include +#include + +static int +hwloc_look_noos(struct hwloc_backend *backend) +{ + struct hwloc_topology *topology = backend->topology; + int nbprocs; + + if (topology->levels[0][0]->cpuset) + /* somebody discovered things */ + return -1; + + nbprocs = hwloc_fallback_nbprocessors(topology); + if (nbprocs >= 1) + topology->support.discovery->pu = 1; + else + nbprocs = 1; + + hwloc_alloc_root_sets(topology->levels[0][0]); + hwloc_setup_pu_level(topology, nbprocs); + hwloc_add_uname_info(topology, NULL); + return 0; +} + +static struct hwloc_backend * +hwloc_noos_component_instantiate(struct hwloc_disc_component *component, + const void *_data1 __hwloc_attribute_unused, + const void *_data2 __hwloc_attribute_unused, + const void *_data3 __hwloc_attribute_unused) +{ + struct hwloc_backend *backend; + backend = hwloc_backend_alloc(component); + if (!backend) + return NULL; + backend->discover = hwloc_look_noos; + return backend; +} + +static struct hwloc_disc_component hwloc_noos_disc_component = { + HWLOC_DISC_COMPONENT_TYPE_CPU, + "no_os", + HWLOC_DISC_COMPONENT_TYPE_GLOBAL, + hwloc_noos_component_instantiate, + 40, /* lower than native OS component, higher than globals */ + 1, + NULL +}; + +const struct hwloc_component hwloc_noos_component = { + HWLOC_COMPONENT_ABI, + NULL, NULL, + HWLOC_COMPONENT_TYPE_DISC, + 0, + &hwloc_noos_disc_component +}; diff --git a/src/3rdparty/hwloc/src/topology-synthetic.c b/src/3rdparty/hwloc/src/topology-synthetic.c new file mode 100644 index 00000000..1fe334d1 --- /dev/null +++ b/src/3rdparty/hwloc/src/topology-synthetic.c @@ -0,0 +1,1521 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2019 Inria. All rights reserved. + * Copyright © 2009-2010 Université Bordeaux + * Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +#include +#include +#include +#include +#include + +#include +#include +#ifdef HAVE_STRINGS_H +#include +#endif + +struct hwloc_synthetic_attr_s { + hwloc_obj_type_t type; + unsigned depth; /* For caches/groups */ + hwloc_obj_cache_type_t cachetype; /* For caches */ + hwloc_uint64_t memorysize; /* For caches/memory */ +}; + +struct hwloc_synthetic_indexes_s { + /* the indexes= attribute before parsing */ + const char *string; + unsigned long string_length; + /* the array of explicit indexes after parsing */ + unsigned *array; + + /* used while filling the topology */ + unsigned next; /* id of the next object for that level */ +}; + +struct hwloc_synthetic_level_data_s { + unsigned arity; + unsigned long totalwidth; + + struct hwloc_synthetic_attr_s attr; + struct hwloc_synthetic_indexes_s indexes; + + struct hwloc_synthetic_attached_s { + struct hwloc_synthetic_attr_s attr; + + struct hwloc_synthetic_attached_s *next; + } *attached; +}; + +struct hwloc_synthetic_backend_data_s { + /* synthetic backend parameters */ + char *string; + + unsigned long numa_attached_nr; + struct hwloc_synthetic_indexes_s numa_attached_indexes; + +#define HWLOC_SYNTHETIC_MAX_DEPTH 128 + struct hwloc_synthetic_level_data_s level[HWLOC_SYNTHETIC_MAX_DEPTH]; +}; + +struct hwloc_synthetic_intlv_loop_s { + unsigned step; + unsigned nb; + unsigned level_depth; +}; + +static void +hwloc_synthetic_process_indexes(struct hwloc_synthetic_backend_data_s *data, + struct hwloc_synthetic_indexes_s *indexes, + unsigned long total, + int verbose) +{ + const char *attr = indexes->string; + unsigned long length = indexes->string_length; + unsigned *array = NULL; + size_t i; + + if (!attr) + return; + + array = calloc(total, sizeof(*array)); + if (!array) { + if (verbose) + fprintf(stderr, "Failed to allocate synthetic index array of size %lu\n", total); + goto out; + } + + i = strspn(attr, "0123456789,"); + if (i == length) { + /* explicit array of indexes */ + + for(i=0; iarray = array; + + } else { + /* interleaving */ + unsigned nr_loops = 1, cur_loop; + unsigned minstep = total; + unsigned long nbs = 1; + unsigned j, mul; + const char *tmp; + + tmp = attr; + while (tmp) { + tmp = strchr(tmp, ':'); + if (!tmp || tmp >= attr+length) + break; + nr_loops++; + tmp++; + } + + { + /* nr_loops colon-separated fields, but we may need one more at the end */ + HWLOC_VLA(struct hwloc_synthetic_intlv_loop_s, loops, nr_loops+1); + + if (*attr >= '0' && *attr <= '9') { + /* interleaving as x*y:z*t:... */ + unsigned step, nb; + + tmp = attr; + cur_loop = 0; + while (tmp) { + char *tmp2, *tmp3; + step = (unsigned) strtol(tmp, &tmp2, 0); + if (tmp2 == tmp || *tmp2 != '*') { + if (verbose) + fprintf(stderr, "Failed to read synthetic index interleaving loop '%s' without number before '*'\n", tmp); + goto out_with_array; + } + if (!step) { + if (verbose) + fprintf(stderr, "Invalid interleaving loop with step 0 at '%s'\n", tmp); + goto out_with_array; + } + tmp2++; + nb = (unsigned) strtol(tmp2, &tmp3, 0); + if (tmp3 == tmp2 || (*tmp3 && *tmp3 != ':' && *tmp3 != ')' && *tmp3 != ' ')) { + if (verbose) + fprintf(stderr, "Failed to read synthetic index interleaving loop '%s' without number between '*' and ':'\n", tmp); + goto out_with_array; + } + if (!nb) { + if (verbose) + fprintf(stderr, "Invalid interleaving loop with number 0 at '%s'\n", tmp2); + goto out_with_array; + } + loops[cur_loop].step = step; + loops[cur_loop].nb = nb; + if (step < minstep) + minstep = step; + nbs *= nb; + cur_loop++; + if (*tmp3 == ')' || *tmp3 == ' ') + break; + tmp = (const char*) (tmp3+1); + } + + } else { + /* interleaving as type1:type2:... */ + hwloc_obj_type_t type; + union hwloc_obj_attr_u attrs; + int err; + + /* find level depths for each interleaving loop */ + tmp = attr; + cur_loop = 0; + while (tmp) { + err = hwloc_type_sscanf(tmp, &type, &attrs, sizeof(attrs)); + if (err < 0) { + if (verbose) + fprintf(stderr, "Failed to read synthetic index interleaving loop type '%s'\n", tmp); + goto out_with_array; + } + if (type == HWLOC_OBJ_MISC || type == HWLOC_OBJ_BRIDGE || type == HWLOC_OBJ_PCI_DEVICE || type == HWLOC_OBJ_OS_DEVICE) { + if (verbose) + fprintf(stderr, "Misc object type disallowed in synthetic index interleaving loop type '%s'\n", tmp); + goto out_with_array; + } + for(i=0; ; i++) { + if (!data->level[i].arity) { + loops[cur_loop].level_depth = (unsigned)-1; + break; + } + if (type != data->level[i].attr.type) + continue; + if (type == HWLOC_OBJ_GROUP + && attrs.group.depth != (unsigned) -1 + && attrs.group.depth != data->level[i].attr.depth) + continue; + loops[cur_loop].level_depth = (unsigned)i; + break; + } + if (loops[cur_loop].level_depth == (unsigned)-1) { + if (verbose) + fprintf(stderr, "Failed to find level for synthetic index interleaving loop type '%s'\n", + tmp); + goto out_with_array; + } + tmp = strchr(tmp, ':'); + if (!tmp || tmp > attr+length) + break; + tmp++; + cur_loop++; + } + + /* compute actual loop step/nb */ + for(cur_loop=0; cur_loop prevdepth) + prevdepth = loops[i].level_depth; + } + step = total / data->level[mydepth].totalwidth; /* number of objects below us */ + nb = data->level[mydepth].totalwidth / data->level[prevdepth].totalwidth; /* number of us within parent */ + + loops[cur_loop].step = step; + loops[cur_loop].nb = nb; + assert(nb); + assert(step); + if (step < minstep) + minstep = step; + nbs *= nb; + } + } + assert(nbs); + + if (nbs != total) { + /* one loop of total/nbs steps is missing, add it if it's just the smallest one */ + if (minstep == total/nbs) { + loops[nr_loops].step = 1; + loops[nr_loops].nb = total/nbs; + nr_loops++; + } else { + if (verbose) + fprintf(stderr, "Invalid index interleaving total width %lu instead of %lu\n", nbs, total); + goto out_with_array; + } + } + + /* generate the array of indexes */ + mul = 1; + for(i=0; i= total) { + if (verbose) + fprintf(stderr, "Invalid index interleaving generates out-of-range index %u\n", array[j]); + goto out_with_array; + } + if (!array[j] && j) { + if (verbose) + fprintf(stderr, "Invalid index interleaving generates duplicate index values\n"); + goto out_with_array; + } + } + + indexes->array = array; + } + } + + return; + + out_with_array: + free(array); + out: + return; +} + +static hwloc_uint64_t +hwloc_synthetic_parse_memory_attr(const char *attr, const char **endp) +{ + const char *endptr; + hwloc_uint64_t size; + size = strtoull(attr, (char **) &endptr, 0); + if (!hwloc_strncasecmp(endptr, "TB", 2)) { + size <<= 40; + endptr += 2; + } else if (!hwloc_strncasecmp(endptr, "GB", 2)) { + size <<= 30; + endptr += 2; + } else if (!hwloc_strncasecmp(endptr, "MB", 2)) { + size <<= 20; + endptr += 2; + } else if (!hwloc_strncasecmp(endptr, "kB", 2)) { + size <<= 10; + endptr += 2; + } + *endp = endptr; + return size; +} + +static int +hwloc_synthetic_parse_attrs(const char *attrs, const char **next_posp, + struct hwloc_synthetic_attr_s *sattr, + struct hwloc_synthetic_indexes_s *sind, + int verbose) +{ + hwloc_obj_type_t type = sattr->type; + const char *next_pos; + hwloc_uint64_t memorysize = 0; + const char *index_string = NULL; + size_t index_string_length = 0; + + next_pos = (const char *) strchr(attrs, ')'); + if (!next_pos) { + if (verbose) + fprintf(stderr, "Missing attribute closing bracket in synthetic string doesn't have a number of objects at '%s'\n", attrs); + errno = EINVAL; + return -1; + } + + while (')' != *attrs) { + int iscache = hwloc__obj_type_is_cache(type); + + if (iscache && !strncmp("size=", attrs, 5)) { + memorysize = hwloc_synthetic_parse_memory_attr(attrs+5, &attrs); + + } else if (!iscache && !strncmp("memory=", attrs, 7)) { + memorysize = hwloc_synthetic_parse_memory_attr(attrs+7, &attrs); + + } else if (!strncmp("indexes=", attrs, 8)) { + index_string = attrs+8; + attrs += 8; + index_string_length = strcspn(attrs, " )"); + attrs += index_string_length; + + } else { + if (verbose) + fprintf(stderr, "Unknown attribute at '%s'\n", attrs); + errno = EINVAL; + return -1; + } + + if (' ' == *attrs) + attrs++; + else if (')' != *attrs) { + if (verbose) + fprintf(stderr, "Missing parameter separator at '%s'\n", attrs); + errno = EINVAL; + return -1; + } + } + + sattr->memorysize = memorysize; + + if (index_string) { + if (sind->string && verbose) + fprintf(stderr, "Overwriting duplicate indexes attribute with last occurence\n"); + sind->string = index_string; + sind->string_length = (unsigned long)index_string_length; + } + + *next_posp = next_pos+1; + return 0; +} + +/* frees level until arity = 0 */ +static void +hwloc_synthetic_free_levels(struct hwloc_synthetic_backend_data_s *data) +{ + unsigned i; + for(i=0; ilevel[i]; + struct hwloc_synthetic_attached_s **pprev = &curlevel->attached; + while (*pprev) { + struct hwloc_synthetic_attached_s *cur = *pprev; + *pprev = cur->next; + free(cur); + } + free(curlevel->indexes.array); + if (!curlevel->arity) + break; + } + free(data->numa_attached_indexes.array); +} + +/* Read from description a series of integers describing a symmetrical + topology and update the hwloc_synthetic_backend_data_s accordingly. On + success, return zero. */ +static int +hwloc_backend_synthetic_init(struct hwloc_synthetic_backend_data_s *data, + const char *description) +{ + const char *pos, *next_pos; + unsigned long item, count; + unsigned i; + int type_count[HWLOC_OBJ_TYPE_MAX]; + unsigned unset; + int verbose = 0; + const char *env = getenv("HWLOC_SYNTHETIC_VERBOSE"); + int err; + unsigned long totalarity = 1; + + if (env) + verbose = atoi(env); + + data->numa_attached_nr = 0; + data->numa_attached_indexes.array = NULL; + + /* default values before we add root attributes */ + data->level[0].totalwidth = 1; + data->level[0].attr.type = HWLOC_OBJ_MACHINE; + data->level[0].indexes.string = NULL; + data->level[0].indexes.array = NULL; + data->level[0].attr.memorysize = 0; + data->level[0].attached = NULL; + type_count[HWLOC_OBJ_MACHINE] = 1; + if (*description == '(') { + err = hwloc_synthetic_parse_attrs(description+1, &description, &data->level[0].attr, &data->level[0].indexes, verbose); + if (err < 0) + return err; + } + + data->numa_attached_indexes.string = NULL; + data->numa_attached_indexes.array = NULL; + + for (pos = description, count = 1; *pos; pos = next_pos) { + hwloc_obj_type_t type = HWLOC_OBJ_TYPE_NONE; + union hwloc_obj_attr_u attrs; + + /* initialize parent arity to 0 so that the levels are not infinite */ + data->level[count-1].arity = 0; + + while (*pos == ' ') + pos++; + + if (!*pos) + break; + + if (*pos == '[') { + /* attached */ + struct hwloc_synthetic_attached_s *attached, **pprev; + char *attr; + + pos++; + + if (hwloc_type_sscanf(pos, &type, &attrs, sizeof(attrs)) < 0) { + if (verbose) + fprintf(stderr, "Synthetic string with unknown attached object type at '%s'\n", pos); + errno = EINVAL; + goto error; + } + if (type != HWLOC_OBJ_NUMANODE) { + if (verbose) + fprintf(stderr, "Synthetic string with disallowed attached object type at '%s'\n", pos); + errno = EINVAL; + goto error; + } + data->numa_attached_nr += data->level[count-1].totalwidth; + + attached = malloc(sizeof(*attached)); + if (attached) { + attached->attr.type = type; + attached->attr.memorysize = 0; + /* attached->attr.depth and .cachetype unused */ + attached->next = NULL; + pprev = &data->level[count-1].attached; + while (*pprev) + pprev = &((*pprev)->next); + *pprev = attached; + } + + next_pos = strchr(pos, ']'); + if (!next_pos) { + if (verbose) + fprintf(stderr,"Synthetic string doesn't have a closing `]' after attached object type at '%s'\n", pos); + errno = EINVAL; + goto error; + } + + attr = strchr(pos, '('); + if (attr && attr < next_pos && attached) { + const char *dummy; + err = hwloc_synthetic_parse_attrs(attr+1, &dummy, &attached->attr, &data->numa_attached_indexes, verbose); + if (err < 0) + goto error; + } + + next_pos++; + continue; + } + + /* normal level */ + + /* reset defaults */ + data->level[count].indexes.string = NULL; + data->level[count].indexes.array = NULL; + data->level[count].attached = NULL; + + if (*pos < '0' || *pos > '9') { + if (hwloc_type_sscanf(pos, &type, &attrs, sizeof(attrs)) < 0) { + if (!strncmp(pos, "Die", 3) || !strncmp(pos, "Tile", 4) || !strncmp(pos, "Module", 6)) { + type = HWLOC_OBJ_GROUP; + } else { + /* FIXME: allow generic "Cache" string? would require to deal with possibly duplicate cache levels */ + if (verbose) + fprintf(stderr, "Synthetic string with unknown object type at '%s'\n", pos); + errno = EINVAL; + goto error; + } + } + if (type == HWLOC_OBJ_MACHINE || type == HWLOC_OBJ_MISC || type == HWLOC_OBJ_BRIDGE || type == HWLOC_OBJ_PCI_DEVICE || type == HWLOC_OBJ_OS_DEVICE) { + if (verbose) + fprintf(stderr, "Synthetic string with disallowed object type at '%s'\n", pos); + errno = EINVAL; + goto error; + } + + next_pos = strchr(pos, ':'); + if (!next_pos) { + if (verbose) + fprintf(stderr,"Synthetic string doesn't have a `:' after object type at '%s'\n", pos); + errno = EINVAL; + goto error; + } + pos = next_pos + 1; + } + + data->level[count].attr.type = type; + data->level[count].attr.depth = (unsigned) -1; + data->level[count].attr.cachetype = (hwloc_obj_cache_type_t) -1; + if (hwloc__obj_type_is_cache(type)) { + /* these are always initialized */ + data->level[count].attr.depth = attrs.cache.depth; + data->level[count].attr.cachetype = attrs.cache.type; + } else if (type == HWLOC_OBJ_GROUP) { + /* could be -1 but will be set below */ + data->level[count].attr.depth = attrs.group.depth; + } + + /* number of normal children */ + item = strtoul(pos, (char **)&next_pos, 0); + if (next_pos == pos) { + if (verbose) + fprintf(stderr,"Synthetic string doesn't have a number of objects at '%s'\n", pos); + errno = EINVAL; + goto error; + } + if (!item) { + if (verbose) + fprintf(stderr,"Synthetic string with disallow 0 number of objects at '%s'\n", pos); + errno = EINVAL; + goto error; + } + + totalarity *= item; + data->level[count].totalwidth = totalarity; + data->level[count].indexes.string = NULL; + data->level[count].indexes.array = NULL; + data->level[count].attr.memorysize = 0; + if (*next_pos == '(') { + err = hwloc_synthetic_parse_attrs(next_pos+1, &next_pos, &data->level[count].attr, &data->level[count].indexes, verbose); + if (err < 0) + goto error; + } + + if (count + 1 >= HWLOC_SYNTHETIC_MAX_DEPTH) { + if (verbose) + fprintf(stderr,"Too many synthetic levels, max %d\n", HWLOC_SYNTHETIC_MAX_DEPTH); + errno = EINVAL; + goto error; + } + if (item > UINT_MAX) { + if (verbose) + fprintf(stderr,"Too big arity, max %u\n", UINT_MAX); + errno = EINVAL; + goto error; + } + + data->level[count-1].arity = (unsigned)item; + count++; + } + + if (data->level[count-1].attr.type != HWLOC_OBJ_TYPE_NONE && data->level[count-1].attr.type != HWLOC_OBJ_PU) { + if (verbose) + fprintf(stderr, "Synthetic string cannot use non-PU type for last level\n"); + errno = EINVAL; + return -1; + } + data->level[count-1].attr.type = HWLOC_OBJ_PU; + + for(i=HWLOC_OBJ_TYPE_MIN; i0; i--) { + hwloc_obj_type_t type = data->level[i].attr.type; + if (type != HWLOC_OBJ_TYPE_NONE) { + type_count[type]++; + } + } + + /* sanity checks */ + if (!type_count[HWLOC_OBJ_PU]) { + if (verbose) + fprintf(stderr, "Synthetic string missing ending number of PUs\n"); + errno = EINVAL; + return -1; + } else if (type_count[HWLOC_OBJ_PU] > 1) { + if (verbose) + fprintf(stderr, "Synthetic string cannot have several PU levels\n"); + errno = EINVAL; + return -1; + } + if (type_count[HWLOC_OBJ_PACKAGE] > 1) { + if (verbose) + fprintf(stderr, "Synthetic string cannot have several package levels\n"); + errno = EINVAL; + return -1; + } + if (type_count[HWLOC_OBJ_NUMANODE] > 1) { + if (verbose) + fprintf(stderr, "Synthetic string cannot have several NUMA node levels\n"); + errno = EINVAL; + return -1; + } + if (type_count[HWLOC_OBJ_NUMANODE] && data->numa_attached_nr) { + if (verbose) + fprintf(stderr,"Synthetic string cannot have NUMA nodes both as a level and attached\n"); + errno = EINVAL; + return -1; + } + if (type_count[HWLOC_OBJ_CORE] > 1) { + if (verbose) + fprintf(stderr, "Synthetic string cannot have several core levels\n"); + errno = EINVAL; + return -1; + } + + /* deal with missing intermediate levels */ + unset = 0; + for(i=1; ilevel[i].attr.type == HWLOC_OBJ_TYPE_NONE) + unset++; + } + if (unset && unset != count-2) { + if (verbose) + fprintf(stderr, "Synthetic string cannot mix unspecified and specified types for levels\n"); + errno = EINVAL; + return -1; + } + if (unset) { + /* we want in priority: numa, package, core, up to 3 caches, groups */ + unsigned _count = count; + unsigned neednuma = 0; + unsigned needpack = 0; + unsigned needcore = 0; + unsigned needcaches = 0; + unsigned needgroups = 0; + /* 2 levels for machine and PU */ + _count -= 2; + + neednuma = (_count >= 1 && !data->numa_attached_nr); + _count -= neednuma; + + needpack = (_count >= 1); + _count -= needpack; + + needcore = (_count >= 1); + _count -= needcore; + + needcaches = (_count > 4 ? 4 : _count); + _count -= needcaches; + + needgroups = _count; + + /* we place them in order: groups, package, numa, caches, core */ + for(i = 0; i < needgroups; i++) { + unsigned depth = 1 + i; + data->level[depth].attr.type = HWLOC_OBJ_GROUP; + type_count[HWLOC_OBJ_GROUP]++; + } + if (needpack) { + unsigned depth = 1 + needgroups; + data->level[depth].attr.type = HWLOC_OBJ_PACKAGE; + type_count[HWLOC_OBJ_PACKAGE] = 1; + } + if (neednuma) { + unsigned depth = 1 + needgroups + needpack; + data->level[depth].attr.type = HWLOC_OBJ_NUMANODE; + type_count[HWLOC_OBJ_NUMANODE] = 1; + } + if (needcaches) { + /* priority: l2, l1, l3, l1i */ + /* order: l3, l2, l1, l1i */ + unsigned l3depth = 1 + needgroups + needpack + neednuma; + unsigned l2depth = l3depth + (needcaches >= 3); + unsigned l1depth = l2depth + 1; + unsigned l1idepth = l1depth + 1; + if (needcaches >= 3) { + data->level[l3depth].attr.type = HWLOC_OBJ_L3CACHE; + data->level[l3depth].attr.depth = 3; + data->level[l3depth].attr.cachetype = HWLOC_OBJ_CACHE_UNIFIED; + type_count[HWLOC_OBJ_L3CACHE] = 1; + } + data->level[l2depth].attr.type = HWLOC_OBJ_L2CACHE; + data->level[l2depth].attr.depth = 2; + data->level[l2depth].attr.cachetype = HWLOC_OBJ_CACHE_UNIFIED; + type_count[HWLOC_OBJ_L2CACHE] = 1; + if (needcaches >= 2) { + data->level[l1depth].attr.type = HWLOC_OBJ_L1CACHE; + data->level[l1depth].attr.depth = 1; + data->level[l1depth].attr.cachetype = HWLOC_OBJ_CACHE_DATA; + type_count[HWLOC_OBJ_L1CACHE] = 1; + } + if (needcaches >= 4) { + data->level[l1idepth].attr.type = HWLOC_OBJ_L1ICACHE; + data->level[l1idepth].attr.depth = 1; + data->level[l1idepth].attr.cachetype = HWLOC_OBJ_CACHE_INSTRUCTION; + type_count[HWLOC_OBJ_L1ICACHE] = 1; + } + } + if (needcore) { + unsigned depth = 1 + needgroups + needpack + neednuma + needcaches; + data->level[depth].attr.type = HWLOC_OBJ_CORE; + type_count[HWLOC_OBJ_CORE] = 1; + } + } + + /* enforce a NUMA level */ + if (!type_count[HWLOC_OBJ_NUMANODE] && !data->numa_attached_nr) { + /* insert a NUMA level below the automatic machine root */ + if (verbose) + fprintf(stderr, "Inserting a NUMA level with a single object at depth 1\n"); + /* move existing levels by one */ + memmove(&data->level[2], &data->level[1], count*sizeof(struct hwloc_synthetic_level_data_s)); + data->level[1].attr.type = HWLOC_OBJ_NUMANODE; + data->level[1].indexes.string = NULL; + data->level[1].indexes.array = NULL; + data->level[1].attr.memorysize = 0; + data->level[1].totalwidth = data->level[0].totalwidth; + /* update arity to insert a single NUMA node per parent */ + data->level[1].arity = data->level[0].arity; + data->level[0].arity = 1; + count++; + } + + for (i=0; ilevel[i]; + hwloc_obj_type_t type = curlevel->attr.type; + + if (type == HWLOC_OBJ_GROUP) { + if (curlevel->attr.depth == (unsigned)-1) + curlevel->attr.depth = type_count[HWLOC_OBJ_GROUP]--; + + } else if (hwloc__obj_type_is_cache(type)) { + if (!curlevel->attr.memorysize) { + if (1 == curlevel->attr.depth) + /* 32Kb in L1 */ + curlevel->attr.memorysize = 32*1024; + else + /* *4 at each level, starting from 1MB for L2, unified */ + curlevel->attr.memorysize = 256ULL*1024 << (2*curlevel->attr.depth); + } + + } else if (type == HWLOC_OBJ_NUMANODE && !curlevel->attr.memorysize) { + /* 1GB in memory nodes. */ + curlevel->attr.memorysize = 1024*1024*1024; + } + + hwloc_synthetic_process_indexes(data, &data->level[i].indexes, data->level[i].totalwidth, verbose); + } + + hwloc_synthetic_process_indexes(data, &data->numa_attached_indexes, data->numa_attached_nr, verbose); + + data->string = strdup(description); + data->level[count-1].arity = 0; + return 0; + + error: + hwloc_synthetic_free_levels(data); + return -1; +} + +static void +hwloc_synthetic_set_attr(struct hwloc_synthetic_attr_s *sattr, + hwloc_obj_t obj) +{ + switch (obj->type) { + case HWLOC_OBJ_GROUP: + obj->attr->group.kind = HWLOC_GROUP_KIND_SYNTHETIC; + obj->attr->group.subkind = sattr->depth-1; + break; + case HWLOC_OBJ_MACHINE: + break; + case HWLOC_OBJ_NUMANODE: + obj->attr->numanode.local_memory = sattr->memorysize; + obj->attr->numanode.page_types_len = 1; + obj->attr->numanode.page_types = malloc(sizeof(*obj->attr->numanode.page_types)); + memset(obj->attr->numanode.page_types, 0, sizeof(*obj->attr->numanode.page_types)); + obj->attr->numanode.page_types[0].size = 4096; + obj->attr->numanode.page_types[0].count = sattr->memorysize / 4096; + break; + case HWLOC_OBJ_PACKAGE: + break; + case HWLOC_OBJ_L1CACHE: + case HWLOC_OBJ_L2CACHE: + case HWLOC_OBJ_L3CACHE: + case HWLOC_OBJ_L4CACHE: + case HWLOC_OBJ_L5CACHE: + case HWLOC_OBJ_L1ICACHE: + case HWLOC_OBJ_L2ICACHE: + case HWLOC_OBJ_L3ICACHE: + obj->attr->cache.depth = sattr->depth; + obj->attr->cache.linesize = 64; + obj->attr->cache.type = sattr->cachetype; + obj->attr->cache.size = sattr->memorysize; + break; + case HWLOC_OBJ_CORE: + break; + case HWLOC_OBJ_PU: + break; + default: + /* Should never happen */ + assert(0); + break; + } +} + +static unsigned +hwloc_synthetic_next_index(struct hwloc_synthetic_indexes_s *indexes, hwloc_obj_type_t type) +{ + unsigned os_index = indexes->next++; + + if (indexes->array) + os_index = indexes->array[os_index]; + else if (hwloc__obj_type_is_cache(type) || type == HWLOC_OBJ_GROUP) + /* don't enforce useless os_indexes for Caches and Groups */ + os_index = HWLOC_UNKNOWN_INDEX; + + return os_index; +} + +static void +hwloc_synthetic_insert_attached(struct hwloc_topology *topology, + struct hwloc_synthetic_backend_data_s *data, + struct hwloc_synthetic_attached_s *attached, + hwloc_bitmap_t set) +{ + hwloc_obj_t child; + unsigned attached_os_index; + + if (!attached) + return; + + assert(attached->attr.type == HWLOC_OBJ_NUMANODE); + + attached_os_index = hwloc_synthetic_next_index(&data->numa_attached_indexes, HWLOC_OBJ_NUMANODE); + + child = hwloc_alloc_setup_object(topology, attached->attr.type, attached_os_index); + child->cpuset = hwloc_bitmap_dup(set); + + child->nodeset = hwloc_bitmap_alloc(); + hwloc_bitmap_set(child->nodeset, attached_os_index); + + hwloc_synthetic_set_attr(&attached->attr, child); + + hwloc_insert_object_by_cpuset(topology, child); + + hwloc_synthetic_insert_attached(topology, data, attached->next, set); +} + +/* + * Recursively build objects whose cpu start at first_cpu + * - level gives where to look in the type, arity and id arrays + * - the id array is used as a variable to get unique IDs for a given level. + * - generated memory should be added to *memory_kB. + * - generated cpus should be added to parent_cpuset. + * - next cpu number to be used should be returned. + */ +static void +hwloc__look_synthetic(struct hwloc_topology *topology, + struct hwloc_synthetic_backend_data_s *data, + int level, + hwloc_bitmap_t parent_cpuset) +{ + hwloc_obj_t obj; + unsigned i; + struct hwloc_synthetic_level_data_s *curlevel = &data->level[level]; + hwloc_obj_type_t type = curlevel->attr.type; + hwloc_bitmap_t set; + unsigned os_index; + + assert(hwloc__obj_type_is_normal(type) || type == HWLOC_OBJ_NUMANODE); + assert(type != HWLOC_OBJ_MACHINE); + + os_index = hwloc_synthetic_next_index(&curlevel->indexes, type); + + set = hwloc_bitmap_alloc(); + if (!curlevel->arity) { + hwloc_bitmap_set(set, os_index); + } else { + for (i = 0; i < curlevel->arity; i++) + hwloc__look_synthetic(topology, data, level + 1, set); + } + + hwloc_bitmap_or(parent_cpuset, parent_cpuset, set); + + if (hwloc_filter_check_keep_object_type(topology, type)) { + obj = hwloc_alloc_setup_object(topology, type, os_index); + obj->cpuset = hwloc_bitmap_dup(set); + + if (type == HWLOC_OBJ_NUMANODE) { + obj->nodeset = hwloc_bitmap_alloc(); + hwloc_bitmap_set(obj->nodeset, os_index); + } + + hwloc_synthetic_set_attr(&curlevel->attr, obj); + + hwloc_insert_object_by_cpuset(topology, obj); + } + + hwloc_synthetic_insert_attached(topology, data, curlevel->attached, set); + + hwloc_bitmap_free(set); +} + +static int +hwloc_look_synthetic(struct hwloc_backend *backend) +{ + struct hwloc_topology *topology = backend->topology; + struct hwloc_synthetic_backend_data_s *data = backend->private_data; + hwloc_bitmap_t cpuset = hwloc_bitmap_alloc(); + unsigned i; + + assert(!topology->levels[0][0]->cpuset); + + hwloc_alloc_root_sets(topology->levels[0][0]); + + topology->support.discovery->pu = 1; + topology->support.discovery->numa = 1; /* we add a single NUMA node if none is given */ + topology->support.discovery->numa_memory = 1; /* specified or default size */ + + /* start with os_index 0 for each level */ + for (i = 0; data->level[i].arity > 0; i++) + data->level[i].indexes.next = 0; + data->numa_attached_indexes.next = 0; + /* ... including the last one */ + data->level[i].indexes.next = 0; + + /* update first level type according to the synthetic type array */ + topology->levels[0][0]->type = data->level[0].attr.type; + hwloc_synthetic_set_attr(&data->level[0].attr, topology->levels[0][0]); + + for (i = 0; i < data->level[0].arity; i++) + hwloc__look_synthetic(topology, data, 1, cpuset); + + hwloc_synthetic_insert_attached(topology, data, data->level[0].attached, cpuset); + + hwloc_bitmap_free(cpuset); + + hwloc_obj_add_info(topology->levels[0][0], "Backend", "Synthetic"); + hwloc_obj_add_info(topology->levels[0][0], "SyntheticDescription", data->string); + return 0; +} + +static void +hwloc_synthetic_backend_disable(struct hwloc_backend *backend) +{ + struct hwloc_synthetic_backend_data_s *data = backend->private_data; + hwloc_synthetic_free_levels(data); + free(data->string); + free(data); +} + +static struct hwloc_backend * +hwloc_synthetic_component_instantiate(struct hwloc_disc_component *component, + const void *_data1, + const void *_data2 __hwloc_attribute_unused, + const void *_data3 __hwloc_attribute_unused) +{ + struct hwloc_backend *backend; + struct hwloc_synthetic_backend_data_s *data; + int err; + + if (!_data1) { + const char *env = getenv("HWLOC_SYNTHETIC"); + if (env) { + /* 'synthetic' was given in HWLOC_COMPONENTS without a description */ + _data1 = env; + } else { + errno = EINVAL; + goto out; + } + } + + backend = hwloc_backend_alloc(component); + if (!backend) + goto out; + + data = malloc(sizeof(*data)); + if (!data) { + errno = ENOMEM; + goto out_with_backend; + } + + err = hwloc_backend_synthetic_init(data, (const char *) _data1); + if (err < 0) + goto out_with_data; + + backend->private_data = data; + backend->discover = hwloc_look_synthetic; + backend->disable = hwloc_synthetic_backend_disable; + backend->is_thissystem = 0; + + return backend; + + out_with_data: + free(data); + out_with_backend: + free(backend); + out: + return NULL; +} + +static struct hwloc_disc_component hwloc_synthetic_disc_component = { + HWLOC_DISC_COMPONENT_TYPE_GLOBAL, + "synthetic", + ~0, + hwloc_synthetic_component_instantiate, + 30, + 1, + NULL +}; + +const struct hwloc_component hwloc_synthetic_component = { + HWLOC_COMPONENT_ABI, + NULL, NULL, + HWLOC_COMPONENT_TYPE_DISC, + 0, + &hwloc_synthetic_disc_component +}; + +static __hwloc_inline int +hwloc__export_synthetic_update_status(int *ret, char **tmp, ssize_t *tmplen, int res) +{ + if (res < 0) + return -1; + *ret += res; + if (res >= *tmplen) + res = *tmplen>0 ? (int)(*tmplen) - 1 : 0; + *tmp += res; + *tmplen -= res; + return 0; +} + +static __hwloc_inline void +hwloc__export_synthetic_add_char(int *ret, char **tmp, ssize_t *tmplen, char c) +{ + if (*tmplen > 1) { + (*tmp)[0] = c; + (*tmp)[1] = '\0'; + (*tmp)++; + (*tmplen)--; + } + (*ret)++; +} + +static int +hwloc__export_synthetic_indexes(hwloc_obj_t *level, unsigned total, + char *buffer, size_t buflen) +{ + unsigned step = 1; + unsigned nr_loops = 0; + struct hwloc_synthetic_intlv_loop_s *loops = NULL, *tmploops; + hwloc_obj_t cur; + unsigned i, j; + ssize_t tmplen = buflen; + char *tmp = buffer; + int res, ret = 0; + + /* must start with 0 */ + if (level[0]->os_index) + goto exportall; + + while (step != total) { + /* must be a divider of the total */ + if (total % step) + goto exportall; + + /* look for os_index == step */ + for(i=1; ios_index == step) + break; + if (i == total) + goto exportall; + for(j=2; jos_index != step*j) + break; + + nr_loops++; + tmploops = realloc(loops, nr_loops*sizeof(*loops)); + if (!tmploops) + goto exportall; + loops = tmploops; + loops[nr_loops-1].step = i; + loops[nr_loops-1].nb = j; + step *= j; + } + + /* check this interleaving */ + for(i=0; ios_index != ind) + goto exportall; + } + + /* success, print it */ + for(j=0; jos_index, + cur->next_cousin ? "," : ")"); + if (hwloc__export_synthetic_update_status(&ret, &tmp, &tmplen, res) < 0) + return -1; + cur = cur->next_cousin; + } + return ret; +} + +static int +hwloc__export_synthetic_obj_attr(struct hwloc_topology * topology, + hwloc_obj_t obj, + char *buffer, size_t buflen) +{ + const char * separator = " "; + const char * prefix = "("; + char cachesize[64] = ""; + char memsize[64] = ""; + int needindexes = 0; + + if (hwloc__obj_type_is_cache(obj->type) && obj->attr->cache.size) { + snprintf(cachesize, sizeof(cachesize), "%ssize=%llu", + prefix, (unsigned long long) obj->attr->cache.size); + prefix = separator; + } + if (obj->type == HWLOC_OBJ_NUMANODE && obj->attr->numanode.local_memory) { + snprintf(memsize, sizeof(memsize), "%smemory=%llu", + prefix, (unsigned long long) obj->attr->numanode.local_memory); + prefix = separator; + } + if (!obj->logical_index /* only display indexes once per level (not for non-first NUMA children, etc.) */ + && (obj->type == HWLOC_OBJ_PU || obj->type == HWLOC_OBJ_NUMANODE)) { + hwloc_obj_t cur = obj; + while (cur) { + if (cur->os_index != cur->logical_index) { + needindexes = 1; + break; + } + cur = cur->next_cousin; + } + } + if (*cachesize || *memsize || needindexes) { + ssize_t tmplen = buflen; + char *tmp = buffer; + int res, ret = 0; + + res = hwloc_snprintf(tmp, tmplen, "%s%s%s", cachesize, memsize, needindexes ? "" : ")"); + if (hwloc__export_synthetic_update_status(&ret, &tmp, &tmplen, res) < 0) + return -1; + + if (needindexes) { + unsigned total; + hwloc_obj_t *level; + + if (obj->depth < 0) { + assert(obj->depth == HWLOC_TYPE_DEPTH_NUMANODE); + total = topology->slevels[HWLOC_SLEVEL_NUMANODE].nbobjs; + level = topology->slevels[HWLOC_SLEVEL_NUMANODE].objs; + } else { + total = topology->level_nbobjects[obj->depth]; + level = topology->levels[obj->depth]; + } + + res = hwloc_snprintf(tmp, tmplen, "%sindexes=", prefix); + if (hwloc__export_synthetic_update_status(&ret, &tmp, &tmplen, res) < 0) + return -1; + + res = hwloc__export_synthetic_indexes(level, total, tmp, tmplen); + if (hwloc__export_synthetic_update_status(&ret, &tmp, &tmplen, res) < 0) + return -1; + } + return ret; + } else { + return 0; + } +} + +static int +hwloc__export_synthetic_obj(struct hwloc_topology * topology, unsigned long flags, + hwloc_obj_t obj, unsigned arity, + char *buffer, size_t buflen) +{ + char aritys[12] = ""; + ssize_t tmplen = buflen; + char *tmp = buffer; + int res, ret = 0; + + /* :, except for root */ + if (arity != (unsigned)-1) + snprintf(aritys, sizeof(aritys), ":%u", arity); + if (hwloc__obj_type_is_cache(obj->type) + && (flags & HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_NO_EXTENDED_TYPES)) { + /* v1 uses generic "Cache" for non-extended type name */ + res = hwloc_snprintf(tmp, tmplen, "Cache%s", aritys); + + } else if (obj->type == HWLOC_OBJ_PACKAGE + && (flags & (HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_NO_EXTENDED_TYPES + |HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_V1))) { + /* if exporting to v1 or without extended-types, use all-v1-compatible Socket name */ + res = hwloc_snprintf(tmp, tmplen, "Socket%s", aritys); + + } else if (obj->type == HWLOC_OBJ_GROUP /* don't export group depth */ + || flags & HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_NO_EXTENDED_TYPES) { + res = hwloc_snprintf(tmp, tmplen, "%s%s", hwloc_obj_type_string(obj->type), aritys); + } else { + char types[64]; + hwloc_obj_type_snprintf(types, sizeof(types), obj, 1); + res = hwloc_snprintf(tmp, tmplen, "%s%s", types, aritys); + } + if (hwloc__export_synthetic_update_status(&ret, &tmp, &tmplen, res) < 0) + return -1; + + if (!(flags & HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_NO_ATTRS)) { + /* obj attributes */ + res = hwloc__export_synthetic_obj_attr(topology, obj, tmp, tmplen); + if (hwloc__export_synthetic_update_status(&ret, &tmp, &tmplen, res) < 0) + return -1; + } + + return ret; +} + +static int +hwloc__export_synthetic_memory_children(struct hwloc_topology * topology, unsigned long flags, + hwloc_obj_t parent, + char *buffer, size_t buflen, + int needprefix, int verbose) +{ + hwloc_obj_t mchild; + ssize_t tmplen = buflen; + char *tmp = buffer; + int res, ret = 0; + + mchild = parent->memory_first_child; + if (!mchild) + return 0; + + if (flags & HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_V1) { + /* v1: export a single NUMA child */ + if (parent->memory_arity > 1 || mchild->type != HWLOC_OBJ_NUMANODE) { + /* not supported */ + if (verbose) + fprintf(stderr, "Cannot export to synthetic v1 if multiple memory children are attached to the same location.\n"); + errno = EINVAL; + return -1; + } + + if (needprefix) + hwloc__export_synthetic_add_char(&ret, &tmp, &tmplen, ' '); + + res = hwloc__export_synthetic_obj(topology, flags, mchild, 1, tmp, tmplen); + if (hwloc__export_synthetic_update_status(&ret, &tmp, &tmplen, res) < 0) + return -1; + return ret; + } + + while (mchild) { + /* v2: export all NUMA children */ + + assert(mchild->type == HWLOC_OBJ_NUMANODE); /* only NUMA node memory children for now */ + + if (needprefix) + hwloc__export_synthetic_add_char(&ret, &tmp, &tmplen, ' '); + + hwloc__export_synthetic_add_char(&ret, &tmp, &tmplen, '['); + + res = hwloc__export_synthetic_obj(topology, flags, mchild, (unsigned)-1, tmp, tmplen); + if (hwloc__export_synthetic_update_status(&ret, &tmp, &tmplen, res) < 0) + return -1; + + hwloc__export_synthetic_add_char(&ret, &tmp, &tmplen, ']'); + + needprefix = 1; + mchild = mchild->next_sibling; + } + + return ret; +} + +static int +hwloc_check_memory_symmetric(struct hwloc_topology * topology) +{ + hwloc_bitmap_t remaining_nodes; + + remaining_nodes = hwloc_bitmap_dup(hwloc_get_root_obj(topology)->nodeset); + if (!remaining_nodes) + /* assume asymmetric */ + return -1; + + while (!hwloc_bitmap_iszero(remaining_nodes)) { + unsigned idx; + hwloc_obj_t node; + hwloc_obj_t first_parent; + unsigned i; + + idx = hwloc_bitmap_first(remaining_nodes); + node = hwloc_get_numanode_obj_by_os_index(topology, idx); + assert(node); + + first_parent = node->parent; + assert(hwloc__obj_type_is_normal(first_parent->type)); /* only depth-1 memory children for now */ + + /* check whether all object on parent's level have same number of NUMA children */ + for(i=0; idepth); i++) { + hwloc_obj_t parent, mchild; + + parent = hwloc_get_obj_by_depth(topology, first_parent->depth, i); + assert(parent); + + /* must have same memory arity */ + if (parent->memory_arity != first_parent->memory_arity) + goto out_with_bitmap; + + /* clear these NUMA children from remaining_nodes */ + mchild = parent->memory_first_child; + while (mchild) { + assert(mchild->type == HWLOC_OBJ_NUMANODE); /* only NUMA node memory children for now */ + hwloc_bitmap_clr(remaining_nodes, mchild->os_index); /* cannot use parent->nodeset, some normal children may have other NUMA nodes */ + mchild = mchild->next_sibling; + } + } + } + + hwloc_bitmap_free(remaining_nodes); + return 0; + + out_with_bitmap: + hwloc_bitmap_free(remaining_nodes); + return -1; +} + +int +hwloc_topology_export_synthetic(struct hwloc_topology * topology, + char *buffer, size_t buflen, + unsigned long flags) +{ + hwloc_obj_t obj = hwloc_get_root_obj(topology); + ssize_t tmplen = buflen; + char *tmp = buffer; + int res, ret = 0; + unsigned arity; + int needprefix = 0; + int verbose = 0; + const char *env = getenv("HWLOC_SYNTHETIC_VERBOSE"); + + if (env) + verbose = atoi(env); + + if (!topology->is_loaded) { + errno = EINVAL; + return -1; + } + + if (flags & ~(HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_NO_EXTENDED_TYPES + |HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_NO_ATTRS + |HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_V1 + |HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_IGNORE_MEMORY)) { + errno = EINVAL; + return -1; + } + + /* TODO: add a flag to ignore symmetric_subtree and I/Os. + * just assume things are symmetric with the left branches of the tree. + * but the number of objects per level may be wrong, what to do with OS index array in this case? + * only allow ignoring symmetric_subtree if the level width remains OK? + */ + + /* TODO: add a root object by default, with a prefix such as tree= + * so that we can backward-compatibly recognize whether there's a root or not. + * and add a flag to disable it. + */ + + /* TODO: flag to force all indexes, not only for PU and NUMA? */ + + if (!obj->symmetric_subtree) { + if (verbose) + fprintf(stderr, "Cannot export to synthetic unless topology is symmetric (root->symmetric_subtree must be set).\n"); + errno = EINVAL; + return -1; + } + + if (!(flags & HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_IGNORE_MEMORY) + && hwloc_check_memory_symmetric(topology) < 0) { + if (verbose) + fprintf(stderr, "Cannot export to synthetic unless memory is attached symmetrically.\n"); + errno = EINVAL; + return -1; + } + + if (flags & HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_V1) { + /* v1 requires all NUMA at the same level */ + hwloc_obj_t node; + signed pdepth; + + node = hwloc_get_obj_by_type(topology, HWLOC_OBJ_NUMANODE, 0); + assert(hwloc__obj_type_is_normal(node->parent->type)); /* only depth-1 memory children for now */ + pdepth = node->parent->depth; + + while ((node = node->next_cousin) != NULL) { + assert(hwloc__obj_type_is_normal(node->parent->type)); /* only depth-1 memory children for now */ + if (node->parent->depth != pdepth) { + if (verbose) + fprintf(stderr, "Cannot export to synthetic v1 if memory is attached to parents at different depths.\n"); + errno = EINVAL; + return -1; + } + } + } + + /* we're good, start exporting */ + + if (!(flags & HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_NO_ATTRS)) { + /* obj attributes */ + res = hwloc__export_synthetic_obj_attr(topology, obj, tmp, tmplen); + if (res > 0) + needprefix = 1; + if (hwloc__export_synthetic_update_status(&ret, &tmp, &tmplen, res) < 0) + return -1; + } + + if (!(flags & HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_IGNORE_MEMORY)) { + res = hwloc__export_synthetic_memory_children(topology, flags, obj, tmp, tmplen, needprefix, verbose); + if (res > 0) + needprefix = 1; + if (hwloc__export_synthetic_update_status(&ret, &tmp, &tmplen, res) < 0) + return -1; + } + + arity = obj->arity; + while (arity) { + /* for each level */ + obj = obj->first_child; + + if (needprefix) + hwloc__export_synthetic_add_char(&ret, &tmp, &tmplen, ' '); + + res = hwloc__export_synthetic_obj(topology, flags, obj, arity, tmp, tmplen); + if (hwloc__export_synthetic_update_status(&ret, &tmp, &tmplen, res) < 0) + return -1; + + if (!(flags & HWLOC_TOPOLOGY_EXPORT_SYNTHETIC_FLAG_IGNORE_MEMORY)) { + res = hwloc__export_synthetic_memory_children(topology, flags, obj, tmp, tmplen, 1, verbose); + if (hwloc__export_synthetic_update_status(&ret, &tmp, &tmplen, res) < 0) + return -1; + } + + /* next level */ + needprefix = 1; + arity = obj->arity; + } + + return ret; +} diff --git a/src/3rdparty/hwloc/src/topology-windows.c b/src/3rdparty/hwloc/src/topology-windows.c new file mode 100644 index 00000000..d03645c0 --- /dev/null +++ b/src/3rdparty/hwloc/src/topology-windows.c @@ -0,0 +1,1189 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2018 Inria. All rights reserved. + * Copyright © 2009-2012 Université Bordeaux + * Copyright © 2011 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +/* To try to get all declarations duplicated below. */ +#define _WIN32_WINNT 0x0601 + +#include +#include +#include +#include + +#include + +#ifndef HAVE_KAFFINITY +typedef ULONG_PTR KAFFINITY, *PKAFFINITY; +#endif + +#ifndef HAVE_PROCESSOR_CACHE_TYPE +typedef enum _PROCESSOR_CACHE_TYPE { + CacheUnified, + CacheInstruction, + CacheData, + CacheTrace +} PROCESSOR_CACHE_TYPE; +#endif + +#ifndef CACHE_FULLY_ASSOCIATIVE +#define CACHE_FULLY_ASSOCIATIVE 0xFF +#endif + +#ifndef MAXIMUM_PROC_PER_GROUP /* missing in MinGW */ +#define MAXIMUM_PROC_PER_GROUP 64 +#endif + +#ifndef HAVE_CACHE_DESCRIPTOR +typedef struct _CACHE_DESCRIPTOR { + BYTE Level; + BYTE Associativity; + WORD LineSize; + DWORD Size; /* in bytes */ + PROCESSOR_CACHE_TYPE Type; +} CACHE_DESCRIPTOR, *PCACHE_DESCRIPTOR; +#endif + +#ifndef HAVE_LOGICAL_PROCESSOR_RELATIONSHIP +typedef enum _LOGICAL_PROCESSOR_RELATIONSHIP { + RelationProcessorCore, + RelationNumaNode, + RelationCache, + RelationProcessorPackage, + RelationGroup, + RelationAll = 0xffff +} LOGICAL_PROCESSOR_RELATIONSHIP; +#else /* HAVE_LOGICAL_PROCESSOR_RELATIONSHIP */ +# ifndef HAVE_RELATIONPROCESSORPACKAGE +# define RelationProcessorPackage 3 +# define RelationGroup 4 +# define RelationAll 0xffff +# endif /* HAVE_RELATIONPROCESSORPACKAGE */ +#endif /* HAVE_LOGICAL_PROCESSOR_RELATIONSHIP */ + +#ifndef HAVE_SYSTEM_LOGICAL_PROCESSOR_INFORMATION +typedef struct _SYSTEM_LOGICAL_PROCESSOR_INFORMATION { + ULONG_PTR ProcessorMask; + LOGICAL_PROCESSOR_RELATIONSHIP Relationship; + _ANONYMOUS_UNION + union { + struct { + BYTE flags; + } ProcessorCore; + struct { + DWORD NodeNumber; + } NumaNode; + CACHE_DESCRIPTOR Cache; + ULONGLONG Reserved[2]; + } DUMMYUNIONNAME; +} SYSTEM_LOGICAL_PROCESSOR_INFORMATION, *PSYSTEM_LOGICAL_PROCESSOR_INFORMATION; +#endif + +/* Extended interface, for group support */ + +#ifndef HAVE_GROUP_AFFINITY +typedef struct _GROUP_AFFINITY { + KAFFINITY Mask; + WORD Group; + WORD Reserved[3]; +} GROUP_AFFINITY, *PGROUP_AFFINITY; +#endif + +#ifndef HAVE_PROCESSOR_RELATIONSHIP +typedef struct _PROCESSOR_RELATIONSHIP { + BYTE Flags; + BYTE Reserved[21]; + WORD GroupCount; + GROUP_AFFINITY GroupMask[ANYSIZE_ARRAY]; +} PROCESSOR_RELATIONSHIP, *PPROCESSOR_RELATIONSHIP; +#endif + +#ifndef HAVE_NUMA_NODE_RELATIONSHIP +typedef struct _NUMA_NODE_RELATIONSHIP { + DWORD NodeNumber; + BYTE Reserved[20]; + GROUP_AFFINITY GroupMask; +} NUMA_NODE_RELATIONSHIP, *PNUMA_NODE_RELATIONSHIP; +#endif + +#ifndef HAVE_CACHE_RELATIONSHIP +typedef struct _CACHE_RELATIONSHIP { + BYTE Level; + BYTE Associativity; + WORD LineSize; + DWORD CacheSize; + PROCESSOR_CACHE_TYPE Type; + BYTE Reserved[20]; + GROUP_AFFINITY GroupMask; +} CACHE_RELATIONSHIP, *PCACHE_RELATIONSHIP; +#endif + +#ifndef HAVE_PROCESSOR_GROUP_INFO +typedef struct _PROCESSOR_GROUP_INFO { + BYTE MaximumProcessorCount; + BYTE ActiveProcessorCount; + BYTE Reserved[38]; + KAFFINITY ActiveProcessorMask; +} PROCESSOR_GROUP_INFO, *PPROCESSOR_GROUP_INFO; +#endif + +#ifndef HAVE_GROUP_RELATIONSHIP +typedef struct _GROUP_RELATIONSHIP { + WORD MaximumGroupCount; + WORD ActiveGroupCount; + ULONGLONG Reserved[2]; + PROCESSOR_GROUP_INFO GroupInfo[ANYSIZE_ARRAY]; +} GROUP_RELATIONSHIP, *PGROUP_RELATIONSHIP; +#endif + +#ifndef HAVE_SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX +typedef struct _SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX { + LOGICAL_PROCESSOR_RELATIONSHIP Relationship; + DWORD Size; + _ANONYMOUS_UNION + union { + PROCESSOR_RELATIONSHIP Processor; + NUMA_NODE_RELATIONSHIP NumaNode; + CACHE_RELATIONSHIP Cache; + GROUP_RELATIONSHIP Group; + /* Odd: no member to tell the cpu mask of the package... */ + } DUMMYUNIONNAME; +} SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX, *PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX; +#endif + +#ifndef HAVE_PSAPI_WORKING_SET_EX_BLOCK +typedef union _PSAPI_WORKING_SET_EX_BLOCK { + ULONG_PTR Flags; + struct { + unsigned Valid :1; + unsigned ShareCount :3; + unsigned Win32Protection :11; + unsigned Shared :1; + unsigned Node :6; + unsigned Locked :1; + unsigned LargePage :1; + }; +} PSAPI_WORKING_SET_EX_BLOCK; +#endif + +#ifndef HAVE_PSAPI_WORKING_SET_EX_INFORMATION +typedef struct _PSAPI_WORKING_SET_EX_INFORMATION { + PVOID VirtualAddress; + PSAPI_WORKING_SET_EX_BLOCK VirtualAttributes; +} PSAPI_WORKING_SET_EX_INFORMATION; +#endif + +#ifndef HAVE_PROCESSOR_NUMBER +typedef struct _PROCESSOR_NUMBER { + WORD Group; + BYTE Number; + BYTE Reserved; +} PROCESSOR_NUMBER, *PPROCESSOR_NUMBER; +#endif + +/* Function pointers */ + +typedef WORD (WINAPI *PFN_GETACTIVEPROCESSORGROUPCOUNT)(void); +static PFN_GETACTIVEPROCESSORGROUPCOUNT GetActiveProcessorGroupCountProc; + +static unsigned long nr_processor_groups = 1; +static unsigned long max_numanode_index = 0; + +typedef WORD (WINAPI *PFN_GETACTIVEPROCESSORCOUNT)(WORD); +static PFN_GETACTIVEPROCESSORCOUNT GetActiveProcessorCountProc; + +typedef DWORD (WINAPI *PFN_GETCURRENTPROCESSORNUMBER)(void); +static PFN_GETCURRENTPROCESSORNUMBER GetCurrentProcessorNumberProc; + +typedef VOID (WINAPI *PFN_GETCURRENTPROCESSORNUMBEREX)(PPROCESSOR_NUMBER); +static PFN_GETCURRENTPROCESSORNUMBEREX GetCurrentProcessorNumberExProc; + +typedef BOOL (WINAPI *PFN_GETLOGICALPROCESSORINFORMATION)(PSYSTEM_LOGICAL_PROCESSOR_INFORMATION Buffer, PDWORD ReturnLength); +static PFN_GETLOGICALPROCESSORINFORMATION GetLogicalProcessorInformationProc; + +typedef BOOL (WINAPI *PFN_GETLOGICALPROCESSORINFORMATIONEX)(LOGICAL_PROCESSOR_RELATIONSHIP relationship, PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX Buffer, PDWORD ReturnLength); +static PFN_GETLOGICALPROCESSORINFORMATIONEX GetLogicalProcessorInformationExProc; + +typedef BOOL (WINAPI *PFN_SETTHREADGROUPAFFINITY)(HANDLE hThread, const GROUP_AFFINITY *GroupAffinity, PGROUP_AFFINITY PreviousGroupAffinity); +static PFN_SETTHREADGROUPAFFINITY SetThreadGroupAffinityProc; + +typedef BOOL (WINAPI *PFN_GETTHREADGROUPAFFINITY)(HANDLE hThread, PGROUP_AFFINITY GroupAffinity); +static PFN_GETTHREADGROUPAFFINITY GetThreadGroupAffinityProc; + +typedef BOOL (WINAPI *PFN_GETNUMAAVAILABLEMEMORYNODE)(UCHAR Node, PULONGLONG AvailableBytes); +static PFN_GETNUMAAVAILABLEMEMORYNODE GetNumaAvailableMemoryNodeProc; + +typedef BOOL (WINAPI *PFN_GETNUMAAVAILABLEMEMORYNODEEX)(USHORT Node, PULONGLONG AvailableBytes); +static PFN_GETNUMAAVAILABLEMEMORYNODEEX GetNumaAvailableMemoryNodeExProc; + +typedef LPVOID (WINAPI *PFN_VIRTUALALLOCEXNUMA)(HANDLE hProcess, LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect, DWORD nndPreferred); +static PFN_VIRTUALALLOCEXNUMA VirtualAllocExNumaProc; + +typedef BOOL (WINAPI *PFN_VIRTUALFREEEX)(HANDLE hProcess, LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType); +static PFN_VIRTUALFREEEX VirtualFreeExProc; + +typedef BOOL (WINAPI *PFN_QUERYWORKINGSETEX)(HANDLE hProcess, PVOID pv, DWORD cb); +static PFN_QUERYWORKINGSETEX QueryWorkingSetExProc; + +static void hwloc_win_get_function_ptrs(void) +{ + HMODULE kernel32; + + kernel32 = LoadLibrary("kernel32.dll"); + if (kernel32) { + GetActiveProcessorGroupCountProc = + (PFN_GETACTIVEPROCESSORGROUPCOUNT) GetProcAddress(kernel32, "GetActiveProcessorGroupCount"); + GetActiveProcessorCountProc = + (PFN_GETACTIVEPROCESSORCOUNT) GetProcAddress(kernel32, "GetActiveProcessorCount"); + GetLogicalProcessorInformationProc = + (PFN_GETLOGICALPROCESSORINFORMATION) GetProcAddress(kernel32, "GetLogicalProcessorInformation"); + GetCurrentProcessorNumberProc = + (PFN_GETCURRENTPROCESSORNUMBER) GetProcAddress(kernel32, "GetCurrentProcessorNumber"); + GetCurrentProcessorNumberExProc = + (PFN_GETCURRENTPROCESSORNUMBEREX) GetProcAddress(kernel32, "GetCurrentProcessorNumberEx"); + SetThreadGroupAffinityProc = + (PFN_SETTHREADGROUPAFFINITY) GetProcAddress(kernel32, "SetThreadGroupAffinity"); + GetThreadGroupAffinityProc = + (PFN_GETTHREADGROUPAFFINITY) GetProcAddress(kernel32, "GetThreadGroupAffinity"); + GetNumaAvailableMemoryNodeProc = + (PFN_GETNUMAAVAILABLEMEMORYNODE) GetProcAddress(kernel32, "GetNumaAvailableMemoryNode"); + GetNumaAvailableMemoryNodeExProc = + (PFN_GETNUMAAVAILABLEMEMORYNODEEX) GetProcAddress(kernel32, "GetNumaAvailableMemoryNodeEx"); + GetLogicalProcessorInformationExProc = + (PFN_GETLOGICALPROCESSORINFORMATIONEX)GetProcAddress(kernel32, "GetLogicalProcessorInformationEx"); + QueryWorkingSetExProc = + (PFN_QUERYWORKINGSETEX) GetProcAddress(kernel32, "K32QueryWorkingSetEx"); + VirtualAllocExNumaProc = + (PFN_VIRTUALALLOCEXNUMA) GetProcAddress(kernel32, "VirtualAllocExNuma"); + VirtualFreeExProc = + (PFN_VIRTUALFREEEX) GetProcAddress(kernel32, "VirtualFreeEx"); + } + + if (GetActiveProcessorGroupCountProc) + nr_processor_groups = GetActiveProcessorGroupCountProc(); + + if (!QueryWorkingSetExProc) { + HMODULE psapi = LoadLibrary("psapi.dll"); + if (psapi) + QueryWorkingSetExProc = (PFN_QUERYWORKINGSETEX) GetProcAddress(psapi, "QueryWorkingSetEx"); + } +} + +/* + * ULONG_PTR and DWORD_PTR are 64/32bits depending on the arch + * while bitmaps use unsigned long (always 32bits) + */ + +static void hwloc_bitmap_from_ULONG_PTR(hwloc_bitmap_t set, ULONG_PTR mask) +{ +#if SIZEOF_VOID_P == 8 + hwloc_bitmap_from_ulong(set, mask & 0xffffffff); + hwloc_bitmap_set_ith_ulong(set, 1, mask >> 32); +#else + hwloc_bitmap_from_ulong(set, mask); +#endif +} + +static void hwloc_bitmap_from_ith_ULONG_PTR(hwloc_bitmap_t set, unsigned i, ULONG_PTR mask) +{ +#if SIZEOF_VOID_P == 8 + hwloc_bitmap_from_ith_ulong(set, 2*i, mask & 0xffffffff); + hwloc_bitmap_set_ith_ulong(set, 2*i+1, mask >> 32); +#else + hwloc_bitmap_from_ith_ulong(set, i, mask); +#endif +} + +static void hwloc_bitmap_set_ith_ULONG_PTR(hwloc_bitmap_t set, unsigned i, ULONG_PTR mask) +{ +#if SIZEOF_VOID_P == 8 + hwloc_bitmap_set_ith_ulong(set, 2*i, mask & 0xffffffff); + hwloc_bitmap_set_ith_ulong(set, 2*i+1, mask >> 32); +#else + hwloc_bitmap_set_ith_ulong(set, i, mask); +#endif +} + +static ULONG_PTR hwloc_bitmap_to_ULONG_PTR(hwloc_const_bitmap_t set) +{ +#if SIZEOF_VOID_P == 8 + ULONG_PTR up = hwloc_bitmap_to_ith_ulong(set, 1); + up <<= 32; + up |= hwloc_bitmap_to_ulong(set); + return up; +#else + return hwloc_bitmap_to_ulong(set); +#endif +} + +static ULONG_PTR hwloc_bitmap_to_ith_ULONG_PTR(hwloc_const_bitmap_t set, unsigned i) +{ +#if SIZEOF_VOID_P == 8 + ULONG_PTR up = hwloc_bitmap_to_ith_ulong(set, 2*i+1); + up <<= 32; + up |= hwloc_bitmap_to_ith_ulong(set, 2*i); + return up; +#else + return hwloc_bitmap_to_ith_ulong(set, i); +#endif +} + +/* convert set into index+mask if all set bits are in the same ULONG. + * otherwise return -1. + */ +static int hwloc_bitmap_to_single_ULONG_PTR(hwloc_const_bitmap_t set, unsigned *index, ULONG_PTR *mask) +{ + unsigned first_ulp, last_ulp; + if (hwloc_bitmap_weight(set) == -1) + return -1; + first_ulp = hwloc_bitmap_first(set) / (sizeof(ULONG_PTR)*8); + last_ulp = hwloc_bitmap_last(set) / (sizeof(ULONG_PTR)*8); + if (first_ulp != last_ulp) + return -1; + *mask = hwloc_bitmap_to_ith_ULONG_PTR(set, first_ulp); + *index = first_ulp; + return 0; +} + +/************************************************************** + * hwloc PU numbering with respect to Windows processor groups + * + * Everywhere below we reserve 64 physical indexes per processor groups because that's + * the maximum (MAXIMUM_PROC_PER_GROUP). Windows may actually use less bits than that + * in some groups (either to avoid splitting NUMA nodes across groups, or because of OS + * tweaks such as "bcdedit /set groupsize 8") but we keep some unused indexes for simplicity. + * That means PU physical indexes and cpusets may be non-contigous. + * That also means hwloc_fallback_nbprocessors() below must return the last PU index + 1 + * instead the actual number of processors. + */ + +/******************** + * last_cpu_location + */ + +static int +hwloc_win_get_thisthread_last_cpu_location(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_cpuset_t set, int flags __hwloc_attribute_unused) +{ + assert(GetCurrentProcessorNumberExProc || (GetCurrentProcessorNumberProc && nr_processor_groups == 1)); + + if (nr_processor_groups > 1 || !GetCurrentProcessorNumberProc) { + PROCESSOR_NUMBER num; + GetCurrentProcessorNumberExProc(&num); + hwloc_bitmap_from_ith_ULONG_PTR(set, num.Group, ((ULONG_PTR)1) << num.Number); + return 0; + } + + hwloc_bitmap_from_ith_ULONG_PTR(set, 0, ((ULONG_PTR)1) << GetCurrentProcessorNumberProc()); + return 0; +} + +/* TODO: hwloc_win_get_thisproc_last_cpu_location() using + * CreateToolhelp32Snapshot(), Thread32First/Next() + * th.th32OwnerProcessID == GetCurrentProcessId() for filtering within process + * OpenThread(THREAD_SET_INFORMATION|THREAD_QUERY_INFORMATION, FALSE, te32.th32ThreadID) to get a handle. + */ + + +/****************************** + * set cpu/membind for threads + */ + +/* TODO: SetThreadIdealProcessor{,Ex} */ + +static int +hwloc_win_set_thread_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_thread_t thread, hwloc_const_bitmap_t hwloc_set, int flags) +{ + DWORD_PTR mask; + unsigned group; + + if (flags & HWLOC_CPUBIND_NOMEMBIND) { + errno = ENOSYS; + return -1; + } + + if (hwloc_bitmap_to_single_ULONG_PTR(hwloc_set, &group, &mask) < 0) { + errno = ENOSYS; + return -1; + } + + assert(nr_processor_groups == 1 || SetThreadGroupAffinityProc); + + if (nr_processor_groups > 1) { + GROUP_AFFINITY aff; + memset(&aff, 0, sizeof(aff)); /* we get Invalid Parameter error if Reserved field isn't cleared */ + aff.Group = group; + aff.Mask = mask; + if (!SetThreadGroupAffinityProc(thread, &aff, NULL)) + return -1; + + } else { + /* SetThreadAffinityMask() only changes the mask inside the current processor group */ + /* The resulting binding is always strict */ + if (!SetThreadAffinityMask(thread, mask)) + return -1; + } + return 0; +} + +static int +hwloc_win_set_thisthread_cpubind(hwloc_topology_t topology, hwloc_const_bitmap_t hwloc_set, int flags) +{ + return hwloc_win_set_thread_cpubind(topology, GetCurrentThread(), hwloc_set, flags); +} + +static int +hwloc_win_set_thisthread_membind(hwloc_topology_t topology, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags) +{ + int ret; + hwloc_const_cpuset_t cpuset; + hwloc_cpuset_t _cpuset = NULL; + + if ((policy != HWLOC_MEMBIND_DEFAULT && policy != HWLOC_MEMBIND_BIND) + || flags & HWLOC_MEMBIND_NOCPUBIND) { + errno = ENOSYS; + return -1; + } + + if (policy == HWLOC_MEMBIND_DEFAULT) { + cpuset = hwloc_topology_get_complete_cpuset(topology); + } else { + cpuset = _cpuset = hwloc_bitmap_alloc(); + hwloc_cpuset_from_nodeset(topology, _cpuset, nodeset); + } + + ret = hwloc_win_set_thisthread_cpubind(topology, cpuset, + (flags & HWLOC_MEMBIND_STRICT) ? HWLOC_CPUBIND_STRICT : 0); + hwloc_bitmap_free(_cpuset); + return ret; +} + + +/****************************** + * get cpu/membind for threads + */ + +static int +hwloc_win_get_thread_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_thread_t thread, hwloc_cpuset_t set, int flags __hwloc_attribute_unused) +{ + GROUP_AFFINITY aff; + + assert(GetThreadGroupAffinityProc); + + if (!GetThreadGroupAffinityProc(thread, &aff)) + return -1; + hwloc_bitmap_from_ith_ULONG_PTR(set, aff.Group, aff.Mask); + return 0; +} + +static int +hwloc_win_get_thisthread_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_cpuset_t set, int flags __hwloc_attribute_unused) +{ + return hwloc_win_get_thread_cpubind(topology, GetCurrentThread(), set, flags); +} + +static int +hwloc_win_get_thisthread_membind(hwloc_topology_t topology, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags) +{ + int ret; + hwloc_cpuset_t cpuset = hwloc_bitmap_alloc(); + ret = hwloc_win_get_thread_cpubind(topology, GetCurrentThread(), cpuset, flags); + if (!ret) { + *policy = HWLOC_MEMBIND_BIND; + hwloc_cpuset_to_nodeset(topology, cpuset, nodeset); + } + hwloc_bitmap_free(cpuset); + return ret; +} + + +/******************************** + * set cpu/membind for processes + */ + +static int +hwloc_win_set_proc_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_pid_t proc, hwloc_const_bitmap_t hwloc_set, int flags) +{ + DWORD_PTR mask; + + assert(nr_processor_groups == 1); + + if (flags & HWLOC_CPUBIND_NOMEMBIND) { + errno = ENOSYS; + return -1; + } + + /* TODO: SetThreadGroupAffinity() for all threads doesn't enforce the whole process affinity, + * maybe because of process-specific resource locality */ + /* TODO: if we are in a single group (check with GetProcessGroupAffinity()), + * SetProcessAffinityMask() changes the binding within that same group. + */ + /* TODO: NtSetInformationProcess() works very well for binding to any mask in a single group, + * but it's an internal routine. + */ + /* TODO: checks whether hwloc-bind.c needs to pass INHERIT_PARENT_AFFINITY to CreateProcess() instead of execvp(). */ + + /* The resulting binding is always strict */ + mask = hwloc_bitmap_to_ULONG_PTR(hwloc_set); + if (!SetProcessAffinityMask(proc, mask)) + return -1; + return 0; +} + +static int +hwloc_win_set_thisproc_cpubind(hwloc_topology_t topology, hwloc_const_bitmap_t hwloc_set, int flags) +{ + return hwloc_win_set_proc_cpubind(topology, GetCurrentProcess(), hwloc_set, flags); +} + +static int +hwloc_win_set_proc_membind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags) +{ + int ret; + hwloc_const_cpuset_t cpuset; + hwloc_cpuset_t _cpuset = NULL; + + if ((policy != HWLOC_MEMBIND_DEFAULT && policy != HWLOC_MEMBIND_BIND) + || flags & HWLOC_MEMBIND_NOCPUBIND) { + errno = ENOSYS; + return -1; + } + + if (policy == HWLOC_MEMBIND_DEFAULT) { + cpuset = hwloc_topology_get_complete_cpuset(topology); + } else { + cpuset = _cpuset = hwloc_bitmap_alloc(); + hwloc_cpuset_from_nodeset(topology, _cpuset, nodeset); + } + + ret = hwloc_win_set_proc_cpubind(topology, pid, cpuset, + (flags & HWLOC_MEMBIND_STRICT) ? HWLOC_CPUBIND_STRICT : 0); + hwloc_bitmap_free(_cpuset); + return ret; +} + +static int +hwloc_win_set_thisproc_membind(hwloc_topology_t topology, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags) +{ + return hwloc_win_set_proc_membind(topology, GetCurrentProcess(), nodeset, policy, flags); +} + + +/******************************** + * get cpu/membind for processes + */ + +static int +hwloc_win_get_proc_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_pid_t proc, hwloc_bitmap_t hwloc_set, int flags) +{ + DWORD_PTR proc_mask, sys_mask; + + assert(nr_processor_groups == 1); + + if (flags & HWLOC_CPUBIND_NOMEMBIND) { + errno = ENOSYS; + return -1; + } + + /* TODO: if we are in a single group (check with GetProcessGroupAffinity()), + * GetProcessAffinityMask() gives the mask within that group. + */ + /* TODO: if we are in multiple groups, GetProcessGroupAffinity() gives their IDs, + * but we don't know their masks. + */ + /* TODO: GetThreadGroupAffinity() for all threads can be smaller than the whole process affinity, + * maybe because of process-specific resource locality. + */ + + if (!GetProcessAffinityMask(proc, &proc_mask, &sys_mask)) + return -1; + hwloc_bitmap_from_ULONG_PTR(hwloc_set, proc_mask); + return 0; +} + +static int +hwloc_win_get_proc_membind(hwloc_topology_t topology, hwloc_pid_t pid, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags) +{ + int ret; + hwloc_cpuset_t cpuset = hwloc_bitmap_alloc(); + ret = hwloc_win_get_proc_cpubind(topology, pid, cpuset, + (flags & HWLOC_MEMBIND_STRICT) ? HWLOC_CPUBIND_STRICT : 0); + if (!ret) { + *policy = HWLOC_MEMBIND_BIND; + hwloc_cpuset_to_nodeset(topology, cpuset, nodeset); + } + hwloc_bitmap_free(cpuset); + return ret; +} + +static int +hwloc_win_get_thisproc_cpubind(hwloc_topology_t topology, hwloc_bitmap_t hwloc_cpuset, int flags) +{ + return hwloc_win_get_proc_cpubind(topology, GetCurrentProcess(), hwloc_cpuset, flags); +} + +static int +hwloc_win_get_thisproc_membind(hwloc_topology_t topology, hwloc_nodeset_t nodeset, hwloc_membind_policy_t * policy, int flags) +{ + return hwloc_win_get_proc_membind(topology, GetCurrentProcess(), nodeset, policy, flags); +} + + +/************************ + * membind alloc/free + */ + +static void * +hwloc_win_alloc(hwloc_topology_t topology __hwloc_attribute_unused, size_t len) { + return VirtualAlloc(NULL, len, MEM_COMMIT|MEM_RESERVE, PAGE_EXECUTE_READWRITE); +} + +static void * +hwloc_win_alloc_membind(hwloc_topology_t topology __hwloc_attribute_unused, size_t len, hwloc_const_nodeset_t nodeset, hwloc_membind_policy_t policy, int flags) { + int node; + + switch (policy) { + case HWLOC_MEMBIND_DEFAULT: + case HWLOC_MEMBIND_BIND: + break; + default: + errno = ENOSYS; + return hwloc_alloc_or_fail(topology, len, flags); + } + + if (flags & HWLOC_MEMBIND_STRICT) { + errno = ENOSYS; + return NULL; + } + + if (policy == HWLOC_MEMBIND_DEFAULT + || hwloc_bitmap_isequal(nodeset, hwloc_topology_get_complete_nodeset(topology))) + return hwloc_win_alloc(topology, len); + + if (hwloc_bitmap_weight(nodeset) != 1) { + /* Not a single node, can't do this */ + errno = EXDEV; + return hwloc_alloc_or_fail(topology, len, flags); + } + + node = hwloc_bitmap_first(nodeset); + return VirtualAllocExNumaProc(GetCurrentProcess(), NULL, len, MEM_COMMIT|MEM_RESERVE, PAGE_EXECUTE_READWRITE, node); +} + +static int +hwloc_win_free_membind(hwloc_topology_t topology __hwloc_attribute_unused, void *addr, size_t len __hwloc_attribute_unused) { + if (!addr) + return 0; + if (!VirtualFreeExProc(GetCurrentProcess(), addr, 0, MEM_RELEASE)) + return -1; + return 0; +} + + +/********************** + * membind for areas + */ + +static int +hwloc_win_get_area_memlocation(hwloc_topology_t topology __hwloc_attribute_unused, const void *addr, size_t len, hwloc_nodeset_t nodeset, int flags __hwloc_attribute_unused) +{ + SYSTEM_INFO SystemInfo; + DWORD page_size; + uintptr_t start; + unsigned nb; + PSAPI_WORKING_SET_EX_INFORMATION *pv; + unsigned i; + + GetSystemInfo(&SystemInfo); + page_size = SystemInfo.dwPageSize; + + start = (((uintptr_t) addr) / page_size) * page_size; + nb = (unsigned)((((uintptr_t) addr + len - start) + page_size - 1) / page_size); + + if (!nb) + nb = 1; + + pv = calloc(nb, sizeof(*pv)); + if (!pv) + return -1; + + for (i = 0; i < nb; i++) + pv[i].VirtualAddress = (void*) (start + i * page_size); + if (!QueryWorkingSetExProc(GetCurrentProcess(), pv, nb * sizeof(*pv))) { + free(pv); + return -1; + } + + for (i = 0; i < nb; i++) { + if (pv[i].VirtualAttributes.Valid) + hwloc_bitmap_set(nodeset, pv[i].VirtualAttributes.Node); + } + + free(pv); + return 0; +} + + +/************************* + * discovery + */ + +static int +hwloc_look_windows(struct hwloc_backend *backend) +{ + struct hwloc_topology *topology = backend->topology; + hwloc_bitmap_t groups_pu_set = NULL; + SYSTEM_INFO SystemInfo; + DWORD length; + int gotnuma = 0; + int gotnumamemory = 0; + + if (topology->levels[0][0]->cpuset) + /* somebody discovered things */ + return -1; + + hwloc_alloc_root_sets(topology->levels[0][0]); + + GetSystemInfo(&SystemInfo); + + if (!GetLogicalProcessorInformationExProc && GetLogicalProcessorInformationProc) { + PSYSTEM_LOGICAL_PROCESSOR_INFORMATION procInfo, tmpprocInfo; + unsigned id; + unsigned i; + struct hwloc_obj *obj; + hwloc_obj_type_t type; + + length = 0; + procInfo = NULL; + + while (1) { + if (GetLogicalProcessorInformationProc(procInfo, &length)) + break; + if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) + return -1; + tmpprocInfo = realloc(procInfo, length); + if (!tmpprocInfo) { + free(procInfo); + goto out; + } + procInfo = tmpprocInfo; + } + + assert(!length || procInfo); + + for (i = 0; i < length / sizeof(*procInfo); i++) { + + /* Ignore unknown caches */ + if (procInfo->Relationship == RelationCache + && procInfo->Cache.Type != CacheUnified + && procInfo->Cache.Type != CacheData + && procInfo->Cache.Type != CacheInstruction) + continue; + + id = HWLOC_UNKNOWN_INDEX; + switch (procInfo[i].Relationship) { + case RelationNumaNode: + type = HWLOC_OBJ_NUMANODE; + id = procInfo[i].NumaNode.NodeNumber; + gotnuma++; + if (id > max_numanode_index) + max_numanode_index = id; + break; + case RelationProcessorPackage: + type = HWLOC_OBJ_PACKAGE; + break; + case RelationCache: + type = (procInfo[i].Cache.Type == CacheInstruction ? HWLOC_OBJ_L1ICACHE : HWLOC_OBJ_L1CACHE) + procInfo[i].Cache.Level - 1; + break; + case RelationProcessorCore: + type = HWLOC_OBJ_CORE; + break; + case RelationGroup: + default: + type = HWLOC_OBJ_GROUP; + break; + } + + if (!hwloc_filter_check_keep_object_type(topology, type)) + continue; + + obj = hwloc_alloc_setup_object(topology, type, id); + obj->cpuset = hwloc_bitmap_alloc(); + hwloc_debug("%s#%u mask %llx\n", hwloc_obj_type_string(type), id, (unsigned long long) procInfo[i].ProcessorMask); + /* ProcessorMask is a ULONG_PTR */ + hwloc_bitmap_set_ith_ULONG_PTR(obj->cpuset, 0, procInfo[i].ProcessorMask); + hwloc_debug_2args_bitmap("%s#%u bitmap %s\n", hwloc_obj_type_string(type), id, obj->cpuset); + + switch (type) { + case HWLOC_OBJ_NUMANODE: + { + ULONGLONG avail; + obj->nodeset = hwloc_bitmap_alloc(); + hwloc_bitmap_set(obj->nodeset, id); + if ((GetNumaAvailableMemoryNodeExProc && GetNumaAvailableMemoryNodeExProc(id, &avail)) + || (GetNumaAvailableMemoryNodeProc && GetNumaAvailableMemoryNodeProc(id, &avail))) { + obj->attr->numanode.local_memory = avail; + gotnumamemory++; + } + obj->attr->numanode.page_types_len = 2; + obj->attr->numanode.page_types = malloc(2 * sizeof(*obj->attr->numanode.page_types)); + memset(obj->attr->numanode.page_types, 0, 2 * sizeof(*obj->attr->numanode.page_types)); + obj->attr->numanode.page_types_len = 1; + obj->attr->numanode.page_types[0].size = SystemInfo.dwPageSize; +#if HAVE_DECL__SC_LARGE_PAGESIZE + obj->attr->numanode.page_types_len++; + obj->attr->numanode.page_types[1].size = sysconf(_SC_LARGE_PAGESIZE); +#endif + break; + } + case HWLOC_OBJ_L1CACHE: + case HWLOC_OBJ_L2CACHE: + case HWLOC_OBJ_L3CACHE: + case HWLOC_OBJ_L4CACHE: + case HWLOC_OBJ_L5CACHE: + case HWLOC_OBJ_L1ICACHE: + case HWLOC_OBJ_L2ICACHE: + case HWLOC_OBJ_L3ICACHE: + obj->attr->cache.size = procInfo[i].Cache.Size; + obj->attr->cache.associativity = procInfo[i].Cache.Associativity == CACHE_FULLY_ASSOCIATIVE ? -1 : procInfo[i].Cache.Associativity ; + obj->attr->cache.linesize = procInfo[i].Cache.LineSize; + obj->attr->cache.depth = procInfo[i].Cache.Level; + switch (procInfo->Cache.Type) { + case CacheUnified: + obj->attr->cache.type = HWLOC_OBJ_CACHE_UNIFIED; + break; + case CacheData: + obj->attr->cache.type = HWLOC_OBJ_CACHE_DATA; + break; + case CacheInstruction: + obj->attr->cache.type = HWLOC_OBJ_CACHE_INSTRUCTION; + break; + default: + hwloc_free_unlinked_object(obj); + continue; + } + break; + case HWLOC_OBJ_GROUP: + obj->attr->group.kind = procInfo[i].Relationship == RelationGroup ? HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP : HWLOC_GROUP_KIND_WINDOWS_RELATIONSHIP_UNKNOWN; + break; + default: + break; + } + hwloc_insert_object_by_cpuset(topology, obj); + } + + free(procInfo); + } + + if (GetLogicalProcessorInformationExProc) { + PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX procInfoTotal, tmpprocInfoTotal, procInfo; + unsigned id; + struct hwloc_obj *obj; + hwloc_obj_type_t type; + + length = 0; + procInfoTotal = NULL; + + while (1) { + if (GetLogicalProcessorInformationExProc(RelationAll, procInfoTotal, &length)) + break; + if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) + return -1; + tmpprocInfoTotal = realloc(procInfoTotal, length); + if (!tmpprocInfoTotal) { + free(procInfoTotal); + goto out; + } + procInfoTotal = tmpprocInfoTotal; + } + + for (procInfo = procInfoTotal; + (void*) procInfo < (void*) ((uintptr_t) procInfoTotal + length); + procInfo = (void*) ((uintptr_t) procInfo + procInfo->Size)) { + unsigned num, i; + GROUP_AFFINITY *GroupMask; + + /* Ignore unknown caches */ + if (procInfo->Relationship == RelationCache + && procInfo->Cache.Type != CacheUnified + && procInfo->Cache.Type != CacheData + && procInfo->Cache.Type != CacheInstruction) + continue; + + id = HWLOC_UNKNOWN_INDEX; + switch (procInfo->Relationship) { + case RelationNumaNode: + type = HWLOC_OBJ_NUMANODE; + num = 1; + GroupMask = &procInfo->NumaNode.GroupMask; + id = procInfo->NumaNode.NodeNumber; + gotnuma++; + if (id > max_numanode_index) + max_numanode_index = id; + break; + case RelationProcessorPackage: + type = HWLOC_OBJ_PACKAGE; + num = procInfo->Processor.GroupCount; + GroupMask = procInfo->Processor.GroupMask; + break; + case RelationCache: + type = (procInfo->Cache.Type == CacheInstruction ? HWLOC_OBJ_L1ICACHE : HWLOC_OBJ_L1CACHE) + procInfo->Cache.Level - 1; + num = 1; + GroupMask = &procInfo->Cache.GroupMask; + break; + case RelationProcessorCore: + type = HWLOC_OBJ_CORE; + num = procInfo->Processor.GroupCount; + GroupMask = procInfo->Processor.GroupMask; + break; + case RelationGroup: + /* So strange an interface... */ + for (id = 0; id < procInfo->Group.ActiveGroupCount; id++) { + KAFFINITY mask; + hwloc_bitmap_t set; + + set = hwloc_bitmap_alloc(); + mask = procInfo->Group.GroupInfo[id].ActiveProcessorMask; + hwloc_debug("group %u %d cpus mask %lx\n", id, + procInfo->Group.GroupInfo[id].ActiveProcessorCount, mask); + /* KAFFINITY is ULONG_PTR */ + hwloc_bitmap_set_ith_ULONG_PTR(set, id, mask); + /* FIXME: what if running 32bits on a 64bits windows with 64-processor groups? + * ULONG_PTR is 32bits, so half the group is invisible? + * maybe scale id to id*8/sizeof(ULONG_PTR) so that groups are 64-PU aligned? + */ + hwloc_debug_2args_bitmap("group %u %d bitmap %s\n", id, procInfo->Group.GroupInfo[id].ActiveProcessorCount, set); + + /* save the set of PUs so that we can create them at the end */ + if (!groups_pu_set) + groups_pu_set = hwloc_bitmap_alloc(); + hwloc_bitmap_or(groups_pu_set, groups_pu_set, set); + + if (hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_GROUP)) { + obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_GROUP, id); + obj->cpuset = set; + obj->attr->group.kind = HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP; + hwloc_insert_object_by_cpuset(topology, obj); + } else + hwloc_bitmap_free(set); + } + continue; + default: + /* Don't know how to get the mask. */ + hwloc_debug("unknown relation %d\n", procInfo->Relationship); + continue; + } + + if (!hwloc_filter_check_keep_object_type(topology, type)) + continue; + + obj = hwloc_alloc_setup_object(topology, type, id); + obj->cpuset = hwloc_bitmap_alloc(); + for (i = 0; i < num; i++) { + hwloc_debug("%s#%u %d: mask %d:%lx\n", hwloc_obj_type_string(type), id, i, GroupMask[i].Group, GroupMask[i].Mask); + /* GROUP_AFFINITY.Mask is KAFFINITY, which is ULONG_PTR */ + hwloc_bitmap_set_ith_ULONG_PTR(obj->cpuset, GroupMask[i].Group, GroupMask[i].Mask); + /* FIXME: scale id to id*8/sizeof(ULONG_PTR) as above? */ + } + hwloc_debug_2args_bitmap("%s#%u bitmap %s\n", hwloc_obj_type_string(type), id, obj->cpuset); + switch (type) { + case HWLOC_OBJ_NUMANODE: + { + ULONGLONG avail; + obj->nodeset = hwloc_bitmap_alloc(); + hwloc_bitmap_set(obj->nodeset, id); + if ((GetNumaAvailableMemoryNodeExProc && GetNumaAvailableMemoryNodeExProc(id, &avail)) + || (GetNumaAvailableMemoryNodeProc && GetNumaAvailableMemoryNodeProc(id, &avail))) { + obj->attr->numanode.local_memory = avail; + gotnumamemory++; + } + obj->attr->numanode.page_types = malloc(2 * sizeof(*obj->attr->numanode.page_types)); + memset(obj->attr->numanode.page_types, 0, 2 * sizeof(*obj->attr->numanode.page_types)); + obj->attr->numanode.page_types_len = 1; + obj->attr->numanode.page_types[0].size = SystemInfo.dwPageSize; +#if HAVE_DECL__SC_LARGE_PAGESIZE + obj->attr->numanode.page_types_len++; + obj->attr->numanode.page_types[1].size = sysconf(_SC_LARGE_PAGESIZE); +#endif + break; + } + case HWLOC_OBJ_L1CACHE: + case HWLOC_OBJ_L2CACHE: + case HWLOC_OBJ_L3CACHE: + case HWLOC_OBJ_L4CACHE: + case HWLOC_OBJ_L5CACHE: + case HWLOC_OBJ_L1ICACHE: + case HWLOC_OBJ_L2ICACHE: + case HWLOC_OBJ_L3ICACHE: + obj->attr->cache.size = procInfo->Cache.CacheSize; + obj->attr->cache.associativity = procInfo->Cache.Associativity == CACHE_FULLY_ASSOCIATIVE ? -1 : procInfo->Cache.Associativity ; + obj->attr->cache.linesize = procInfo->Cache.LineSize; + obj->attr->cache.depth = procInfo->Cache.Level; + switch (procInfo->Cache.Type) { + case CacheUnified: + obj->attr->cache.type = HWLOC_OBJ_CACHE_UNIFIED; + break; + case CacheData: + obj->attr->cache.type = HWLOC_OBJ_CACHE_DATA; + break; + case CacheInstruction: + obj->attr->cache.type = HWLOC_OBJ_CACHE_INSTRUCTION; + break; + default: + hwloc_free_unlinked_object(obj); + continue; + } + break; + default: + break; + } + hwloc_insert_object_by_cpuset(topology, obj); + } + free(procInfoTotal); + } + + topology->support.discovery->pu = 1; + topology->support.discovery->numa = gotnuma; + topology->support.discovery->numa_memory = gotnumamemory; + + if (groups_pu_set) { + /* the system supports multiple Groups. + * PU indexes may be discontiguous, especially if Groups contain less than 64 procs. + */ + hwloc_obj_t obj; + unsigned idx; + hwloc_bitmap_foreach_begin(idx, groups_pu_set) { + obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_PU, idx); + obj->cpuset = hwloc_bitmap_alloc(); + hwloc_bitmap_only(obj->cpuset, idx); + hwloc_debug_1arg_bitmap("cpu %u has cpuset %s\n", + idx, obj->cpuset); + hwloc_insert_object_by_cpuset(topology, obj); + } hwloc_bitmap_foreach_end(); + hwloc_bitmap_free(groups_pu_set); + } else { + /* no processor groups */ + SYSTEM_INFO sysinfo; + hwloc_obj_t obj; + unsigned idx; + GetSystemInfo(&sysinfo); + for(idx=0; idx<32; idx++) + if (sysinfo.dwActiveProcessorMask & (((DWORD_PTR)1)<cpuset = hwloc_bitmap_alloc(); + hwloc_bitmap_only(obj->cpuset, idx); + hwloc_debug_1arg_bitmap("cpu %u has cpuset %s\n", + idx, obj->cpuset); + hwloc_insert_object_by_cpuset(topology, obj); + } + } + + out: + hwloc_obj_add_info(topology->levels[0][0], "Backend", "Windows"); + hwloc_add_uname_info(topology, NULL); + return 0; +} + +void +hwloc_set_windows_hooks(struct hwloc_binding_hooks *hooks, + struct hwloc_topology_support *support) +{ + if (GetCurrentProcessorNumberExProc || (GetCurrentProcessorNumberProc && nr_processor_groups == 1)) + hooks->get_thisthread_last_cpu_location = hwloc_win_get_thisthread_last_cpu_location; + + if (nr_processor_groups == 1) { + hooks->set_proc_cpubind = hwloc_win_set_proc_cpubind; + hooks->get_proc_cpubind = hwloc_win_get_proc_cpubind; + hooks->set_thisproc_cpubind = hwloc_win_set_thisproc_cpubind; + hooks->get_thisproc_cpubind = hwloc_win_get_thisproc_cpubind; + hooks->set_proc_membind = hwloc_win_set_proc_membind; + hooks->get_proc_membind = hwloc_win_get_proc_membind; + hooks->set_thisproc_membind = hwloc_win_set_thisproc_membind; + hooks->get_thisproc_membind = hwloc_win_get_thisproc_membind; + } + if (nr_processor_groups == 1 || SetThreadGroupAffinityProc) { + hooks->set_thread_cpubind = hwloc_win_set_thread_cpubind; + hooks->set_thisthread_cpubind = hwloc_win_set_thisthread_cpubind; + hooks->set_thisthread_membind = hwloc_win_set_thisthread_membind; + } + if (GetThreadGroupAffinityProc) { + hooks->get_thread_cpubind = hwloc_win_get_thread_cpubind; + hooks->get_thisthread_cpubind = hwloc_win_get_thisthread_cpubind; + hooks->get_thisthread_membind = hwloc_win_get_thisthread_membind; + } + + if (VirtualAllocExNumaProc) { + hooks->alloc_membind = hwloc_win_alloc_membind; + hooks->alloc = hwloc_win_alloc; + hooks->free_membind = hwloc_win_free_membind; + support->membind->bind_membind = 1; + } + + if (QueryWorkingSetExProc && max_numanode_index <= 63 /* PSAPI_WORKING_SET_EX_BLOCK.Node is 6 bits only */) + hooks->get_area_memlocation = hwloc_win_get_area_memlocation; +} + +static int hwloc_windows_component_init(unsigned long flags __hwloc_attribute_unused) +{ + hwloc_win_get_function_ptrs(); + return 0; +} + +static void hwloc_windows_component_finalize(unsigned long flags __hwloc_attribute_unused) +{ +} + +static struct hwloc_backend * +hwloc_windows_component_instantiate(struct hwloc_disc_component *component, + const void *_data1 __hwloc_attribute_unused, + const void *_data2 __hwloc_attribute_unused, + const void *_data3 __hwloc_attribute_unused) +{ + struct hwloc_backend *backend; + backend = hwloc_backend_alloc(component); + if (!backend) + return NULL; + backend->discover = hwloc_look_windows; + return backend; +} + +static struct hwloc_disc_component hwloc_windows_disc_component = { + HWLOC_DISC_COMPONENT_TYPE_CPU, + "windows", + HWLOC_DISC_COMPONENT_TYPE_GLOBAL, + hwloc_windows_component_instantiate, + 50, + 1, + NULL +}; + +const struct hwloc_component hwloc_windows_component = { + HWLOC_COMPONENT_ABI, + hwloc_windows_component_init, hwloc_windows_component_finalize, + HWLOC_COMPONENT_TYPE_DISC, + 0, + &hwloc_windows_disc_component +}; + +int +hwloc_fallback_nbprocessors(struct hwloc_topology *topology __hwloc_attribute_unused) { + int n; + SYSTEM_INFO sysinfo; + + /* by default, ignore groups (return only the number in the current group) */ + GetSystemInfo(&sysinfo); + n = sysinfo.dwNumberOfProcessors; /* FIXME could be non-contigous, rather return a mask from dwActiveProcessorMask? */ + + if (nr_processor_groups > 1) { + /* assume n-1 groups are complete, since that's how we store things in cpusets */ + if (GetActiveProcessorCountProc) + n = MAXIMUM_PROC_PER_GROUP*(nr_processor_groups-1) + + GetActiveProcessorCountProc((WORD)nr_processor_groups-1); + else + n = MAXIMUM_PROC_PER_GROUP*nr_processor_groups; + } + + return n; +} diff --git a/src/3rdparty/hwloc/src/topology-x86.c b/src/3rdparty/hwloc/src/topology-x86.c new file mode 100644 index 00000000..4aefdcf1 --- /dev/null +++ b/src/3rdparty/hwloc/src/topology-x86.c @@ -0,0 +1,1583 @@ +/* + * Copyright © 2010-2019 Inria. All rights reserved. + * Copyright © 2010-2013 Université Bordeaux + * Copyright © 2010-2011 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + * + * + * This backend is only used when the operating system does not export + * the necessary hardware topology information to user-space applications. + * Currently, only the FreeBSD backend relies on this x86 backend. + * + * Other backends such as Linux have their own way to retrieve various + * pieces of hardware topology information from the operating system + * on various architectures, without having to use this x86-specific code. + */ + +#include +#include +#include +#include +#include + +#include + +#include +#ifdef HAVE_DIRENT_H +#include +#endif +#ifdef HAVE_VALGRIND_VALGRIND_H +#include +#endif + +struct hwloc_x86_backend_data_s { + unsigned nbprocs; + hwloc_bitmap_t apicid_set; + int apicid_unique; + char *src_cpuiddump_path; + int is_knl; +}; + +/************************************ + * Management of cpuid dump as input + */ + +struct cpuiddump { + unsigned nr; + struct cpuiddump_entry { + unsigned inmask; /* which of ine[abcd]x are set on input */ + unsigned ineax; + unsigned inebx; + unsigned inecx; + unsigned inedx; + unsigned outeax; + unsigned outebx; + unsigned outecx; + unsigned outedx; + } *entries; +}; + +static void +cpuiddump_free(struct cpuiddump *cpuiddump) +{ + if (cpuiddump->nr) + free(cpuiddump->entries); + free(cpuiddump); +} + +static struct cpuiddump * +cpuiddump_read(const char *dirpath, unsigned idx) +{ + struct cpuiddump *cpuiddump; + struct cpuiddump_entry *cur; + FILE *file; + char line[128]; + unsigned nr; + + cpuiddump = malloc(sizeof(*cpuiddump)); + if (!cpuiddump) { + fprintf(stderr, "Failed to allocate cpuiddump for PU #%u, ignoring cpuiddump.\n", idx); + goto out; + } + + { + size_t filenamelen = strlen(dirpath) + 15; + HWLOC_VLA(char, filename, filenamelen); + snprintf(filename, filenamelen, "%s/pu%u", dirpath, idx); + file = fopen(filename, "r"); + if (!file) { + fprintf(stderr, "Could not read dumped cpuid file %s, ignoring cpuiddump.\n", filename); + goto out_with_dump; + } + } + + nr = 0; + while (fgets(line, sizeof(line), file)) + nr++; + cpuiddump->entries = malloc(nr * sizeof(struct cpuiddump_entry)); + if (!cpuiddump->entries) { + fprintf(stderr, "Failed to allocate %u cpuiddump entries for PU #%u, ignoring cpuiddump.\n", nr, idx); + goto out_with_file; + } + + fseek(file, 0, SEEK_SET); + cur = &cpuiddump->entries[0]; + nr = 0; + while (fgets(line, sizeof(line), file)) { + if (*line == '#') + continue; + if (sscanf(line, "%x %x %x %x %x => %x %x %x %x", + &cur->inmask, + &cur->ineax, &cur->inebx, &cur->inecx, &cur->inedx, + &cur->outeax, &cur->outebx, &cur->outecx, &cur->outedx) == 9) { + cur++; + nr++; + } + } + + cpuiddump->nr = nr; + fclose(file); + return cpuiddump; + + out_with_file: + fclose(file); + out_with_dump: + free(cpuiddump); + out: + return NULL; +} + +static void +cpuiddump_find_by_input(unsigned *eax, unsigned *ebx, unsigned *ecx, unsigned *edx, struct cpuiddump *cpuiddump) +{ + unsigned i; + + for(i=0; inr; i++) { + struct cpuiddump_entry *entry = &cpuiddump->entries[i]; + if ((entry->inmask & 0x1) && *eax != entry->ineax) + continue; + if ((entry->inmask & 0x2) && *ebx != entry->inebx) + continue; + if ((entry->inmask & 0x4) && *ecx != entry->inecx) + continue; + if ((entry->inmask & 0x8) && *edx != entry->inedx) + continue; + *eax = entry->outeax; + *ebx = entry->outebx; + *ecx = entry->outecx; + *edx = entry->outedx; + return; + } + + fprintf(stderr, "Couldn't find %x,%x,%x,%x in dumped cpuid, returning 0s.\n", + *eax, *ebx, *ecx, *edx); + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; +} + +static void cpuid_or_from_dump(unsigned *eax, unsigned *ebx, unsigned *ecx, unsigned *edx, struct cpuiddump *src_cpuiddump) +{ + if (src_cpuiddump) { + cpuiddump_find_by_input(eax, ebx, ecx, edx, src_cpuiddump); + } else { + hwloc_x86_cpuid(eax, ebx, ecx, edx); + } +} + +/******************************* + * Core detection routines and structures + */ + +#define has_topoext(features) ((features)[6] & (1 << 22)) +#define has_x2apic(features) ((features)[4] & (1 << 21)) + +struct cacheinfo { + hwloc_obj_cache_type_t type; + unsigned level; + unsigned nbthreads_sharing; + unsigned cacheid; + + unsigned linesize; + unsigned linepart; + int inclusive; + int ways; + unsigned sets; + unsigned long size; +}; + +struct procinfo { + unsigned present; + unsigned apicid; + unsigned packageid; + unsigned dieid; + unsigned nodeid; + unsigned unitid; + unsigned threadid; + unsigned coreid; + unsigned *otherids; + unsigned levels; + unsigned numcaches; + struct cacheinfo *cache; + char cpuvendor[13]; + char cpumodel[3*4*4+1]; + unsigned cpustepping; + unsigned cpumodelnumber; + unsigned cpufamilynumber; +}; + +enum cpuid_type { + intel, + amd, + zhaoxin, + hygon, + unknown +}; + +static void fill_amd_cache(struct procinfo *infos, unsigned level, hwloc_obj_cache_type_t type, unsigned nbthreads_sharing, unsigned cpuid) +{ + struct cacheinfo *cache, *tmpcaches; + unsigned cachenum; + unsigned long size = 0; + + if (level == 1) + size = ((cpuid >> 24)) << 10; + else if (level == 2) + size = ((cpuid >> 16)) << 10; + else if (level == 3) + size = ((cpuid >> 18)) << 19; + if (!size) + return; + + tmpcaches = realloc(infos->cache, (infos->numcaches+1)*sizeof(*infos->cache)); + if (!tmpcaches) + /* failed to allocated, ignore that cache */ + return; + infos->cache = tmpcaches; + cachenum = infos->numcaches++; + + cache = &infos->cache[cachenum]; + + cache->type = type; + cache->level = level; + cache->nbthreads_sharing = nbthreads_sharing; + cache->linesize = cpuid & 0xff; + cache->linepart = 0; + cache->inclusive = 0; /* old AMD (K8-K10) supposed to have exclusive caches */ + + if (level == 1) { + cache->ways = (cpuid >> 16) & 0xff; + if (cache->ways == 0xff) + /* Fully associative */ + cache->ways = -1; + } else { + static const unsigned ways_tab[] = { 0, 1, 2, 0, 4, 0, 8, 0, 16, 0, 32, 48, 64, 96, 128, -1 }; + unsigned ways = (cpuid >> 12) & 0xf; + cache->ways = ways_tab[ways]; + } + cache->size = size; + cache->sets = 0; + + hwloc_debug("cache L%u t%u linesize %u ways %d size %luKB\n", cache->level, cache->nbthreads_sharing, cache->linesize, cache->ways, cache->size >> 10); +} + +static void look_exttopoenum(struct procinfo *infos, unsigned leaf, struct cpuiddump *src_cpuiddump) +{ + unsigned level, apic_nextshift, apic_number, apic_type, apic_id = 0, apic_shift = 0, id; + unsigned threadid __hwloc_attribute_unused = 0; /* shut-up compiler */ + unsigned eax, ebx, ecx = 0, edx; + int apic_packageshift = 0; + + for (level = 0; ; level++) { + ecx = level; + eax = leaf; + cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump); + if (!eax && !ebx) + break; + apic_packageshift = eax & 0x1f; + } + + if (level) { + infos->otherids = malloc(level * sizeof(*infos->otherids)); + if (infos->otherids) { + infos->levels = level; + for (level = 0; ; level++) { + ecx = level; + eax = leaf; + cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump); + if (!eax && !ebx) + break; + apic_nextshift = eax & 0x1f; + apic_number = ebx & 0xffff; + apic_type = (ecx & 0xff00) >> 8; + apic_id = edx; + id = (apic_id >> apic_shift) & ((1 << (apic_packageshift - apic_shift)) - 1); + hwloc_debug("x2APIC %08x %u: nextshift %u num %2u type %u id %2u\n", apic_id, level, apic_nextshift, apic_number, apic_type, id); + infos->apicid = apic_id; + infos->otherids[level] = UINT_MAX; + switch (apic_type) { + case 1: + threadid = id; + /* apic_number is the actual number of threads per core */ + break; + case 2: + infos->coreid = id; + /* apic_number is the actual number of threads per module */ + break; + case 5: + infos->dieid = id; + /* apic_number is the actual number of threads per package */ + break; + default: + hwloc_debug("x2APIC %u: unknown type %u\n", level, apic_type); + infos->otherids[level] = apic_id >> apic_shift; + break; + } + apic_shift = apic_nextshift; + } + infos->apicid = apic_id; + infos->packageid = apic_id >> apic_shift; + hwloc_debug("x2APIC remainder: %u\n", infos->packageid); + hwloc_debug("this is thread %u of core %u\n", threadid, infos->coreid); + } + } +} + +/* Fetch information from the processor itself thanks to cpuid and store it in + * infos for summarize to analyze them globally */ +static void look_proc(struct hwloc_backend *backend, struct procinfo *infos, unsigned highest_cpuid, unsigned highest_ext_cpuid, unsigned *features, enum cpuid_type cpuid_type, struct cpuiddump *src_cpuiddump) +{ + struct hwloc_x86_backend_data_s *data = backend->private_data; + unsigned eax, ebx, ecx = 0, edx; + unsigned cachenum; + struct cacheinfo *cache; + unsigned regs[4]; + unsigned legacy_max_log_proc; /* not valid on Intel processors with > 256 threads, or when cpuid 0x80000008 is supported */ + unsigned legacy_log_proc_id; + unsigned _model, _extendedmodel, _family, _extendedfamily; + + infos->present = 1; + + /* Get apicid, legacy_max_log_proc, packageid, legacy_log_proc_id from cpuid 0x01 */ + eax = 0x01; + cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump); + infos->apicid = ebx >> 24; + if (edx & (1 << 28)) + legacy_max_log_proc = 1 << hwloc_flsl(((ebx >> 16) & 0xff) - 1); + else + legacy_max_log_proc = 1; + hwloc_debug("APIC ID 0x%02x legacy_max_log_proc %u\n", infos->apicid, legacy_max_log_proc); + infos->packageid = infos->apicid / legacy_max_log_proc; + legacy_log_proc_id = infos->apicid % legacy_max_log_proc; + hwloc_debug("phys %u legacy thread %u\n", infos->packageid, legacy_log_proc_id); + + /* Get cpu model/family/stepping numbers from same cpuid */ + _model = (eax>>4) & 0xf; + _extendedmodel = (eax>>16) & 0xf; + _family = (eax>>8) & 0xf; + _extendedfamily = (eax>>20) & 0xff; + if ((cpuid_type == intel || cpuid_type == amd || cpuid_type == hygon) && _family == 0xf) { + infos->cpufamilynumber = _family + _extendedfamily; + } else { + infos->cpufamilynumber = _family; + } + if ((cpuid_type == intel && (_family == 0x6 || _family == 0xf)) + || ((cpuid_type == amd || cpuid_type == hygon) && _family == 0xf) + || (cpuid_type == zhaoxin && (_family == 0x6 || _family == 0x7))) { + infos->cpumodelnumber = _model + (_extendedmodel << 4); + } else { + infos->cpumodelnumber = _model; + } + infos->cpustepping = eax & 0xf; + + if (cpuid_type == intel && infos->cpufamilynumber == 0x6 && + (infos->cpumodelnumber == 0x57 || infos->cpumodelnumber == 0x85)) + data->is_knl = 1; /* KNM is the same as KNL */ + + /* Get cpu vendor string from cpuid 0x00 */ + memset(regs, 0, sizeof(regs)); + regs[0] = 0; + cpuid_or_from_dump(®s[0], ®s[1], ®s[3], ®s[2], src_cpuiddump); + memcpy(infos->cpuvendor, regs+1, 4*3); + /* infos was calloc'ed, already ends with \0 */ + + /* Get cpu model string from cpuid 0x80000002-4 */ + if (highest_ext_cpuid >= 0x80000004) { + memset(regs, 0, sizeof(regs)); + regs[0] = 0x80000002; + cpuid_or_from_dump(®s[0], ®s[1], ®s[2], ®s[3], src_cpuiddump); + memcpy(infos->cpumodel, regs, 4*4); + regs[0] = 0x80000003; + cpuid_or_from_dump(®s[0], ®s[1], ®s[2], ®s[3], src_cpuiddump); + memcpy(infos->cpumodel + 4*4, regs, 4*4); + regs[0] = 0x80000004; + cpuid_or_from_dump(®s[0], ®s[1], ®s[2], ®s[3], src_cpuiddump); + memcpy(infos->cpumodel + 4*4*2, regs, 4*4); + /* infos was calloc'ed, already ends with \0 */ + } + + /* Get core/thread information from cpuid 0x80000008 + * (not supported on Intel) + */ + if (cpuid_type != intel && cpuid_type != zhaoxin && highest_ext_cpuid >= 0x80000008) { + unsigned max_nbcores; + unsigned max_nbthreads; + unsigned coreidsize; + unsigned logprocid; + eax = 0x80000008; + cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump); + coreidsize = (ecx >> 12) & 0xf; + hwloc_debug("core ID size: %u\n", coreidsize); + if (!coreidsize) { + max_nbcores = (ecx & 0xff) + 1; + } else + max_nbcores = 1 << coreidsize; + hwloc_debug("Thus max # of cores: %u\n", max_nbcores); + /* Still no multithreaded AMD */ + max_nbthreads = 1 ; + hwloc_debug("and max # of threads: %u\n", max_nbthreads); + /* legacy_max_log_proc is deprecated, it can be smaller than max_nbcores, + * which is the maximum number of cores that the processor could theoretically support + * (see "Multiple Core Calculation" in the AMD CPUID specification). + * Recompute packageid/threadid/coreid accordingly. + */ + infos->packageid = infos->apicid / max_nbcores; + logprocid = infos->apicid % max_nbcores; + infos->threadid = logprocid % max_nbthreads; + infos->coreid = logprocid / max_nbthreads; + hwloc_debug("this is thread %u of core %u\n", infos->threadid, infos->coreid); + } + + infos->numcaches = 0; + infos->cache = NULL; + + /* Get apicid, nodeid, unitid from cpuid 0x8000001e + * and cache information from cpuid 0x8000001d + * (AMD topology extension) + */ + if (cpuid_type != intel && cpuid_type != zhaoxin && has_topoext(features)) { + unsigned apic_id, node_id, nodes_per_proc; + + /* the code below doesn't want any other cache yet */ + assert(!infos->numcaches); + + eax = 0x8000001e; + cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump); + infos->apicid = apic_id = eax; + + if (infos->cpufamilynumber == 0x16) { + /* ecx is reserved */ + node_id = 0; + nodes_per_proc = 1; + } else { + /* AMD other families or Hygon family 18h */ + node_id = ecx & 0xff; + nodes_per_proc = ((ecx >> 8) & 7) + 1; + } + infos->nodeid = node_id; + if ((infos->cpufamilynumber == 0x15 && nodes_per_proc > 2) + || ((infos->cpufamilynumber == 0x17 || infos->cpufamilynumber == 0x18) && nodes_per_proc > 4)) { + hwloc_debug("warning: undefined nodes_per_proc value %u, assuming it means %u\n", nodes_per_proc, nodes_per_proc); + } + + if (infos->cpufamilynumber <= 0x16) { /* topoext appeared in 0x15 and compute-units were only used in 0x15 and 0x16 */ + unsigned unit_id, cores_per_unit; + infos->unitid = unit_id = ebx & 0xff; + cores_per_unit = ((ebx >> 8) & 0xff) + 1; + hwloc_debug("topoext %08x, %u nodes, node %u, %u cores in unit %u\n", apic_id, nodes_per_proc, node_id, cores_per_unit, unit_id); + /* coreid and unitid are package-wide (core 0-15 and unit 0-7 on 16-core 2-NUMAnode processor). + * The Linux kernel reduces theses to NUMA-node-wide (by applying %core_per_node and %unit_per node respectively). + * It's not clear if we should do this as well. + */ + } else { + unsigned core_id, threads_per_core; + infos->coreid = core_id = ebx & 0xff; + threads_per_core = ((ebx >> 8) & 0xff) + 1; + hwloc_debug("topoext %08x, %u nodes, node %u, %u threads in core %u\n", apic_id, nodes_per_proc, node_id, threads_per_core, core_id); + } + + for (cachenum = 0; ; cachenum++) { + eax = 0x8000001d; + ecx = cachenum; + cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump); + if ((eax & 0x1f) == 0) + break; + infos->numcaches++; + } + + cache = infos->cache = malloc(infos->numcaches * sizeof(*infos->cache)); + if (cache) { + for (cachenum = 0; ; cachenum++) { + unsigned long linesize, linepart, ways, sets; + eax = 0x8000001d; + ecx = cachenum; + cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump); + + if ((eax & 0x1f) == 0) + break; + switch (eax & 0x1f) { + case 1: cache->type = HWLOC_OBJ_CACHE_DATA; break; + case 2: cache->type = HWLOC_OBJ_CACHE_INSTRUCTION; break; + default: cache->type = HWLOC_OBJ_CACHE_UNIFIED; break; + } + + cache->level = (eax >> 5) & 0x7; + /* Note: actually number of cores */ + cache->nbthreads_sharing = ((eax >> 14) & 0xfff) + 1; + + cache->linesize = linesize = (ebx & 0xfff) + 1; + cache->linepart = linepart = ((ebx >> 12) & 0x3ff) + 1; + ways = ((ebx >> 22) & 0x3ff) + 1; + + if (eax & (1 << 9)) + /* Fully associative */ + cache->ways = -1; + else + cache->ways = ways; + cache->sets = sets = ecx + 1; + cache->size = linesize * linepart * ways * sets; + cache->inclusive = edx & 0x2; + + hwloc_debug("cache %u L%u%c t%u linesize %lu linepart %lu ways %lu sets %lu, size %luKB\n", + cachenum, cache->level, + cache->type == HWLOC_OBJ_CACHE_DATA ? 'd' : cache->type == HWLOC_OBJ_CACHE_INSTRUCTION ? 'i' : 'u', + cache->nbthreads_sharing, linesize, linepart, ways, sets, cache->size >> 10); + + cache++; + } + } else { + infos->numcaches = 0; + } + } else { + /* If there's no topoext, + * get cache information from cpuid 0x80000005 and 0x80000006 + * (not supported on Intel) + */ + if (cpuid_type != intel && cpuid_type != zhaoxin && highest_ext_cpuid >= 0x80000005) { + eax = 0x80000005; + cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump); + fill_amd_cache(infos, 1, HWLOC_OBJ_CACHE_DATA, 1, ecx); /* private L1d */ + fill_amd_cache(infos, 1, HWLOC_OBJ_CACHE_INSTRUCTION, 1, edx); /* private L1i */ + } + if (cpuid_type != intel && cpuid_type != zhaoxin && highest_ext_cpuid >= 0x80000006) { + eax = 0x80000006; + cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump); + if (ecx & 0xf000) + /* This is actually supported on Intel but LinePerTag isn't returned in bits 8-11. + * Could be useful if some Intels (at least before Core micro-architecture) + * support this leaf without leaf 0x4. + */ + fill_amd_cache(infos, 2, HWLOC_OBJ_CACHE_UNIFIED, 1, ecx); /* private L2u */ + if (edx & 0xf000) + fill_amd_cache(infos, 3, HWLOC_OBJ_CACHE_UNIFIED, legacy_max_log_proc, edx); /* package-wide L3u */ + } + } + + /* Get thread/core + cache information from cpuid 0x04 + * (not supported on AMD) + */ + if ((cpuid_type != amd && cpuid_type != hygon) && highest_cpuid >= 0x04) { + unsigned max_nbcores; + unsigned max_nbthreads; + unsigned level; + struct cacheinfo *tmpcaches; + unsigned oldnumcaches = infos->numcaches; /* in case we got caches above */ + + for (cachenum = 0; ; cachenum++) { + eax = 0x04; + ecx = cachenum; + cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump); + + hwloc_debug("cache %u type %u\n", cachenum, eax & 0x1f); + if ((eax & 0x1f) == 0) + break; + level = (eax >> 5) & 0x7; + if (data->is_knl && level == 3) + /* KNL reports wrong L3 information (size always 0, cpuset always the entire machine, ignore it */ + break; + infos->numcaches++; + + if (!cachenum) { + /* by the way, get thread/core information from the first cache */ + max_nbcores = ((eax >> 26) & 0x3f) + 1; + max_nbthreads = legacy_max_log_proc / max_nbcores; + hwloc_debug("thus %u threads\n", max_nbthreads); + infos->threadid = legacy_log_proc_id % max_nbthreads; + infos->coreid = legacy_log_proc_id / max_nbthreads; + hwloc_debug("this is thread %u of core %u\n", infos->threadid, infos->coreid); + } + } + + tmpcaches = realloc(infos->cache, infos->numcaches * sizeof(*infos->cache)); + if (!tmpcaches) { + infos->numcaches = oldnumcaches; + } else { + infos->cache = tmpcaches; + cache = &infos->cache[oldnumcaches]; + + for (cachenum = 0; ; cachenum++) { + unsigned long linesize, linepart, ways, sets; + eax = 0x04; + ecx = cachenum; + cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump); + + if ((eax & 0x1f) == 0) + break; + level = (eax >> 5) & 0x7; + if (data->is_knl && level == 3) + /* KNL reports wrong L3 information (size always 0, cpuset always the entire machine, ignore it */ + break; + switch (eax & 0x1f) { + case 1: cache->type = HWLOC_OBJ_CACHE_DATA; break; + case 2: cache->type = HWLOC_OBJ_CACHE_INSTRUCTION; break; + default: cache->type = HWLOC_OBJ_CACHE_UNIFIED; break; + } + + cache->level = level; + cache->nbthreads_sharing = ((eax >> 14) & 0xfff) + 1; + + cache->linesize = linesize = (ebx & 0xfff) + 1; + cache->linepart = linepart = ((ebx >> 12) & 0x3ff) + 1; + ways = ((ebx >> 22) & 0x3ff) + 1; + if (eax & (1 << 9)) + /* Fully associative */ + cache->ways = -1; + else + cache->ways = ways; + cache->sets = sets = ecx + 1; + cache->size = linesize * linepart * ways * sets; + cache->inclusive = edx & 0x2; + + hwloc_debug("cache %u L%u%c t%u linesize %lu linepart %lu ways %lu sets %lu, size %luKB\n", + cachenum, cache->level, + cache->type == HWLOC_OBJ_CACHE_DATA ? 'd' : cache->type == HWLOC_OBJ_CACHE_INSTRUCTION ? 'i' : 'u', + cache->nbthreads_sharing, linesize, linepart, ways, sets, cache->size >> 10); + cache++; + } + } + } + + if ((cpuid_type == intel) && highest_cpuid >= 0x1f) { + /* Get package/die/module/tile/core/thread information from cpuid 0x1f + * (Intel v2 Extended Topology Enumeration) + */ + look_exttopoenum(infos, 0x1f, src_cpuiddump); + + } else if ((cpuid_type == intel || cpuid_type == zhaoxin) && highest_cpuid >= 0x0b && has_x2apic(features)) { + /* Get package/core/thread information from cpuid 0x0b + * (Intel v1 Extended Topology Enumeration) + */ + look_exttopoenum(infos, 0x0b, src_cpuiddump); + } + + /* Now that we have all info, compute cacheids and apply quirks */ + for (cachenum = 0; cachenum < infos->numcaches; cachenum++) { + cache = &infos->cache[cachenum]; + + /* default cacheid value */ + cache->cacheid = infos->apicid / cache->nbthreads_sharing; + + if (cpuid_type == amd) { + /* AMD quirks */ + if (infos->cpufamilynumber == 0x17 + && cache->level == 3 && cache->nbthreads_sharing == 6) { + /* AMD family 0x17 always shares L3 between 8 APIC ids, + * even when only 6 APIC ids are enabled and reported in nbthreads_sharing + * (on 24-core CPUs). + */ + cache->cacheid = infos->apicid / 8; + + } else if (infos->cpufamilynumber== 0x10 && infos->cpumodelnumber == 0x9 + && cache->level == 3 + && (cache->ways == -1 || (cache->ways % 2 == 0)) && cache->nbthreads_sharing >= 8) { + /* Fix AMD family 0x10 model 0x9 (Magny-Cours) with 8 or 12 cores. + * The L3 (and its associativity) is actually split into two halves). + */ + if (cache->nbthreads_sharing == 16) + cache->nbthreads_sharing = 12; /* nbthreads_sharing is a power of 2 but the processor actually has 8 or 12 cores */ + cache->nbthreads_sharing /= 2; + cache->size /= 2; + if (cache->ways != -1) + cache->ways /= 2; + /* AMD Magny-Cours 12-cores processor reserve APIC ids as AAAAAABBBBBB.... + * among first L3 (A), second L3 (B), and unexisting cores (.). + * On multi-socket servers, L3 in non-first sockets may have APIC id ranges + * such as [16-21] that are not aligned on multiple of nbthreads_sharing (6). + * That means, we can't just compare apicid/nbthreads_sharing to identify siblings. + */ + cache->cacheid = (infos->apicid % legacy_max_log_proc) / cache->nbthreads_sharing /* cacheid within the package */ + + 2 * (infos->apicid / legacy_max_log_proc); /* add 2 caches per previous package */ + + } else if (infos->cpufamilynumber == 0x15 + && (infos->cpumodelnumber == 0x1 /* Bulldozer */ || infos->cpumodelnumber == 0x2 /* Piledriver */) + && cache->level == 3 && cache->nbthreads_sharing == 6) { + /* AMD Bulldozer and Piledriver 12-core processors have same APIC ids as Magny-Cours below, + * but we can't merge the checks because the original nbthreads_sharing must be exactly 6 here. + */ + cache->cacheid = (infos->apicid % legacy_max_log_proc) / cache->nbthreads_sharing /* cacheid within the package */ + + 2 * (infos->apicid / legacy_max_log_proc); /* add 2 cache per previous package */ + } + } else if (cpuid_type == hygon) { + if (infos->cpufamilynumber == 0x18 + && cache->level == 3 && cache->nbthreads_sharing == 6) { + /* Hygon family 0x18 always shares L3 between 8 APIC ids, + * even when only 6 APIC ids are enabled and reported in nbthreads_sharing + * (on 24-core CPUs). + */ + cache->cacheid = infos->apicid / 8; + } + } + } + + if (hwloc_bitmap_isset(data->apicid_set, infos->apicid)) + data->apicid_unique = 0; + else + hwloc_bitmap_set(data->apicid_set, infos->apicid); +} + +static void +hwloc_x86_add_cpuinfos(hwloc_obj_t obj, struct procinfo *info, int replace) +{ + char number[12]; + if (info->cpuvendor[0]) + hwloc__add_info_nodup(&obj->infos, &obj->infos_count, "CPUVendor", info->cpuvendor, replace); + snprintf(number, sizeof(number), "%u", info->cpufamilynumber); + hwloc__add_info_nodup(&obj->infos, &obj->infos_count, "CPUFamilyNumber", number, replace); + snprintf(number, sizeof(number), "%u", info->cpumodelnumber); + hwloc__add_info_nodup(&obj->infos, &obj->infos_count, "CPUModelNumber", number, replace); + if (info->cpumodel[0]) { + const char *c = info->cpumodel; + while (*c == ' ') + c++; + hwloc__add_info_nodup(&obj->infos, &obj->infos_count, "CPUModel", c, replace); + } + snprintf(number, sizeof(number), "%u", info->cpustepping); + hwloc__add_info_nodup(&obj->infos, &obj->infos_count, "CPUStepping", number, replace); +} + +/* Analyse information stored in infos, and build/annotate topology levels accordingly */ +static void summarize(struct hwloc_backend *backend, struct procinfo *infos, int fulldiscovery) +{ + struct hwloc_topology *topology = backend->topology; + struct hwloc_x86_backend_data_s *data = backend->private_data; + unsigned nbprocs = data->nbprocs; + hwloc_bitmap_t complete_cpuset = hwloc_bitmap_alloc(); + unsigned i, j, l, level; + int one = -1; + hwloc_bitmap_t remaining_cpuset; + int gotnuma = 0; + + for (i = 0; i < nbprocs; i++) + if (infos[i].present) { + hwloc_bitmap_set(complete_cpuset, i); + one = i; + } + + if (one == -1) { + hwloc_bitmap_free(complete_cpuset); + return; + } + + remaining_cpuset = hwloc_bitmap_alloc(); + + /* Ideally, when fulldiscovery=0, we could add any object that doesn't exist yet. + * But what if the x86 and the native backends disagree because one is buggy? Which one to trust? + * We only add missing caches, and annotate other existing objects for now. + */ + + if (hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_PACKAGE)) { + /* Look for packages */ + hwloc_obj_t package; + + hwloc_bitmap_copy(remaining_cpuset, complete_cpuset); + while ((i = hwloc_bitmap_first(remaining_cpuset)) != (unsigned) -1) { + if (fulldiscovery) { + unsigned packageid = infos[i].packageid; + hwloc_bitmap_t package_cpuset = hwloc_bitmap_alloc(); + + for (j = i; j < nbprocs; j++) { + if (infos[j].packageid == packageid) { + hwloc_bitmap_set(package_cpuset, j); + hwloc_bitmap_clr(remaining_cpuset, j); + } + } + package = hwloc_alloc_setup_object(topology, HWLOC_OBJ_PACKAGE, packageid); + package->cpuset = package_cpuset; + + hwloc_x86_add_cpuinfos(package, &infos[i], 0); + + hwloc_debug_1arg_bitmap("os package %u has cpuset %s\n", + packageid, package_cpuset); + hwloc_insert_object_by_cpuset(topology, package); + + } else { + /* Annotate packages previously-existing packages */ + hwloc_bitmap_t set = hwloc_bitmap_alloc(); + hwloc_bitmap_set(set, i); + package = hwloc_get_next_obj_covering_cpuset_by_type(topology, set, HWLOC_OBJ_PACKAGE, NULL); + hwloc_bitmap_free(set); + if (package) { + /* Found package above that PU, annotate if no such attribute yet */ + hwloc_x86_add_cpuinfos(package, &infos[i], 1); + hwloc_bitmap_andnot(remaining_cpuset, remaining_cpuset, package->cpuset); + } else { + /* No package, annotate the root object */ + hwloc_x86_add_cpuinfos(hwloc_get_root_obj(topology), &infos[i], 1); + break; + } + } + } + } + + /* Look for Numa nodes inside packages (cannot be filtered-out) */ + if (fulldiscovery && getenv("HWLOC_X86_TOPOEXT_NUMANODES")) { + hwloc_bitmap_t node_cpuset; + hwloc_obj_t node; + + /* FIXME: if there's memory inside the root object, divide it into NUMA nodes? */ + + hwloc_bitmap_copy(remaining_cpuset, complete_cpuset); + while ((i = hwloc_bitmap_first(remaining_cpuset)) != (unsigned) -1) { + unsigned packageid = infos[i].packageid; + unsigned nodeid = infos[i].nodeid; + + if (nodeid == (unsigned)-1) { + hwloc_bitmap_clr(remaining_cpuset, i); + continue; + } + + node_cpuset = hwloc_bitmap_alloc(); + for (j = i; j < nbprocs; j++) { + if (infos[j].nodeid == (unsigned) -1) { + hwloc_bitmap_clr(remaining_cpuset, j); + continue; + } + + if (infos[j].packageid == packageid && infos[j].nodeid == nodeid) { + hwloc_bitmap_set(node_cpuset, j); + hwloc_bitmap_clr(remaining_cpuset, j); + } + } + node = hwloc_alloc_setup_object(topology, HWLOC_OBJ_NUMANODE, nodeid); + node->cpuset = node_cpuset; + node->nodeset = hwloc_bitmap_alloc(); + hwloc_bitmap_set(node->nodeset, nodeid); + hwloc_debug_1arg_bitmap("os node %u has cpuset %s\n", + nodeid, node_cpuset); + hwloc_insert_object_by_cpuset(topology, node); + gotnuma++; + } + } + + if (hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_GROUP)) { + if (fulldiscovery) { + char *env; + int dont_merge; + hwloc_bitmap_t unit_cpuset, die_cpuset; + hwloc_obj_t unit, die; + + /* Look for Compute units inside packages */ + hwloc_bitmap_copy(remaining_cpuset, complete_cpuset); + while ((i = hwloc_bitmap_first(remaining_cpuset)) != (unsigned) -1) { + unsigned packageid = infos[i].packageid; + unsigned unitid = infos[i].unitid; + + if (unitid == (unsigned)-1) { + hwloc_bitmap_clr(remaining_cpuset, i); + continue; + } + + unit_cpuset = hwloc_bitmap_alloc(); + for (j = i; j < nbprocs; j++) { + if (infos[j].unitid == (unsigned) -1) { + hwloc_bitmap_clr(remaining_cpuset, j); + continue; + } + + if (infos[j].packageid == packageid && infos[j].unitid == unitid) { + hwloc_bitmap_set(unit_cpuset, j); + hwloc_bitmap_clr(remaining_cpuset, j); + } + } + unit = hwloc_alloc_setup_object(topology, HWLOC_OBJ_GROUP, unitid); + unit->cpuset = unit_cpuset; + unit->subtype = strdup("ComputeUnit"); + unit->attr->group.kind = HWLOC_GROUP_KIND_AMD_COMPUTE_UNIT; + hwloc_debug_1arg_bitmap("os unit %u has cpuset %s\n", + unitid, unit_cpuset); + hwloc_insert_object_by_cpuset(topology, unit); + } + + /* Look for Dies inside packages */ + env = getenv("HWLOC_DONT_MERGE_DIE_GROUPS"); + dont_merge = env && atoi(env); + hwloc_bitmap_copy(remaining_cpuset, complete_cpuset); + while ((i = hwloc_bitmap_first(remaining_cpuset)) != (unsigned) -1) { + unsigned packageid = infos[i].packageid; + unsigned dieid = infos[i].dieid; + + if (dieid == (unsigned)-1) { + hwloc_bitmap_clr(remaining_cpuset, i); + continue; + } + + die_cpuset = hwloc_bitmap_alloc(); + for (j = i; j < nbprocs; j++) { + if (infos[j].dieid == (unsigned) -1) { + hwloc_bitmap_clr(remaining_cpuset, j); + continue; + } + + if (infos[j].packageid == packageid && infos[j].dieid == dieid) { + hwloc_bitmap_set(die_cpuset, j); + hwloc_bitmap_clr(remaining_cpuset, j); + } + } + die = hwloc_alloc_setup_object(topology, HWLOC_OBJ_GROUP, dieid); + die->cpuset = die_cpuset; + die->subtype = strdup("Die"); + die->attr->group.kind = HWLOC_GROUP_KIND_INTEL_DIE; + die->attr->group.dont_merge = dont_merge; + hwloc_debug_1arg_bitmap("os die %u has cpuset %s\n", + dieid, die_cpuset); + hwloc_insert_object_by_cpuset(topology, die); + } + + /* Look for unknown objects */ + if (infos[one].otherids) { + for (level = infos[one].levels-1; level <= infos[one].levels-1; level--) { + if (infos[one].otherids[level] != UINT_MAX) { + hwloc_bitmap_t unknown_cpuset; + hwloc_obj_t unknown_obj; + + hwloc_bitmap_copy(remaining_cpuset, complete_cpuset); + while ((i = hwloc_bitmap_first(remaining_cpuset)) != (unsigned) -1) { + unsigned unknownid = infos[i].otherids[level]; + + unknown_cpuset = hwloc_bitmap_alloc(); + for (j = i; j < nbprocs; j++) { + if (infos[j].otherids[level] == unknownid) { + hwloc_bitmap_set(unknown_cpuset, j); + hwloc_bitmap_clr(remaining_cpuset, j); + } + } + unknown_obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_GROUP, unknownid); + unknown_obj->cpuset = unknown_cpuset; + unknown_obj->attr->group.kind = HWLOC_GROUP_KIND_INTEL_EXTTOPOENUM_UNKNOWN; + unknown_obj->attr->group.subkind = level; + hwloc_debug_2args_bitmap("os unknown%u %u has cpuset %s\n", + level, unknownid, unknown_cpuset); + hwloc_insert_object_by_cpuset(topology, unknown_obj); + } + } + } + } + } + } + + if (hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_CORE)) { + /* Look for cores */ + if (fulldiscovery) { + hwloc_bitmap_t core_cpuset; + hwloc_obj_t core; + + hwloc_bitmap_copy(remaining_cpuset, complete_cpuset); + while ((i = hwloc_bitmap_first(remaining_cpuset)) != (unsigned) -1) { + unsigned packageid = infos[i].packageid; + unsigned nodeid = infos[i].nodeid; + unsigned coreid = infos[i].coreid; + + if (coreid == (unsigned) -1) { + hwloc_bitmap_clr(remaining_cpuset, i); + continue; + } + + core_cpuset = hwloc_bitmap_alloc(); + for (j = i; j < nbprocs; j++) { + if (infos[j].coreid == (unsigned) -1) { + hwloc_bitmap_clr(remaining_cpuset, j); + continue; + } + + if (infos[j].packageid == packageid && infos[j].nodeid == nodeid && infos[j].coreid == coreid) { + hwloc_bitmap_set(core_cpuset, j); + hwloc_bitmap_clr(remaining_cpuset, j); + } + } + core = hwloc_alloc_setup_object(topology, HWLOC_OBJ_CORE, coreid); + core->cpuset = core_cpuset; + hwloc_debug_1arg_bitmap("os core %u has cpuset %s\n", + coreid, core_cpuset); + hwloc_insert_object_by_cpuset(topology, core); + } + } + } + + /* Look for PUs (cannot be filtered-out) */ + if (fulldiscovery) { + hwloc_debug("%s", "\n\n * CPU cpusets *\n\n"); + for (i=0; icpuset = hwloc_bitmap_alloc(); + hwloc_bitmap_only(obj->cpuset, i); + hwloc_debug_1arg_bitmap("PU %u has cpuset %s\n", i, obj->cpuset); + hwloc_insert_object_by_cpuset(topology, obj); + } + } + + /* Look for caches */ + /* First find max level */ + level = 0; + for (i = 0; i < nbprocs; i++) + for (j = 0; j < infos[i].numcaches; j++) + if (infos[i].cache[j].level > level) + level = infos[i].cache[j].level; + while (level > 0) { + hwloc_obj_cache_type_t type; + HWLOC_BUILD_ASSERT(HWLOC_OBJ_CACHE_DATA == HWLOC_OBJ_CACHE_UNIFIED+1); + HWLOC_BUILD_ASSERT(HWLOC_OBJ_CACHE_INSTRUCTION == HWLOC_OBJ_CACHE_DATA+1); + for (type = HWLOC_OBJ_CACHE_UNIFIED; type <= HWLOC_OBJ_CACHE_INSTRUCTION; type++) { + /* Look for caches of that type at level level */ + hwloc_obj_type_t otype; + hwloc_obj_t cache; + + otype = hwloc_cache_type_by_depth_type(level, type); + if (otype == HWLOC_OBJ_TYPE_NONE) + continue; + if (!hwloc_filter_check_keep_object_type(topology, otype)) + continue; + + hwloc_bitmap_copy(remaining_cpuset, complete_cpuset); + while ((i = hwloc_bitmap_first(remaining_cpuset)) != (unsigned) -1) { + hwloc_bitmap_t puset; + + for (l = 0; l < infos[i].numcaches; l++) { + if (infos[i].cache[l].level == level && infos[i].cache[l].type == type) + break; + } + if (l == infos[i].numcaches) { + /* no cache Llevel of that type in i */ + hwloc_bitmap_clr(remaining_cpuset, i); + continue; + } + + puset = hwloc_bitmap_alloc(); + hwloc_bitmap_set(puset, i); + cache = hwloc_get_next_obj_covering_cpuset_by_type(topology, puset, otype, NULL); + hwloc_bitmap_free(puset); + + if (cache) { + /* Found cache above that PU, annotate if no such attribute yet */ + if (!hwloc_obj_get_info_by_name(cache, "Inclusive")) + hwloc_obj_add_info(cache, "Inclusive", infos[i].cache[l].inclusive ? "1" : "0"); + hwloc_bitmap_andnot(remaining_cpuset, remaining_cpuset, cache->cpuset); + } else { + /* Add the missing cache */ + hwloc_bitmap_t cache_cpuset; + unsigned packageid = infos[i].packageid; + unsigned cacheid = infos[i].cache[l].cacheid; + /* Now look for others sharing it */ + cache_cpuset = hwloc_bitmap_alloc(); + for (j = i; j < nbprocs; j++) { + unsigned l2; + for (l2 = 0; l2 < infos[j].numcaches; l2++) { + if (infos[j].cache[l2].level == level && infos[j].cache[l2].type == type) + break; + } + if (l2 == infos[j].numcaches) { + /* no cache Llevel of that type in j */ + hwloc_bitmap_clr(remaining_cpuset, j); + continue; + } + if (infos[j].packageid == packageid && infos[j].cache[l2].cacheid == cacheid) { + hwloc_bitmap_set(cache_cpuset, j); + hwloc_bitmap_clr(remaining_cpuset, j); + } + } + cache = hwloc_alloc_setup_object(topology, otype, HWLOC_UNKNOWN_INDEX); + cache->attr->cache.depth = level; + cache->attr->cache.size = infos[i].cache[l].size; + cache->attr->cache.linesize = infos[i].cache[l].linesize; + cache->attr->cache.associativity = infos[i].cache[l].ways; + cache->attr->cache.type = infos[i].cache[l].type; + cache->cpuset = cache_cpuset; + hwloc_obj_add_info(cache, "Inclusive", infos[i].cache[l].inclusive ? "1" : "0"); + hwloc_debug_2args_bitmap("os L%u cache %u has cpuset %s\n", + level, cacheid, cache_cpuset); + hwloc_insert_object_by_cpuset(topology, cache); + } + } + } + level--; + } + + /* FIXME: if KNL and L2 disabled, add tiles instead of L2 */ + + hwloc_bitmap_free(remaining_cpuset); + hwloc_bitmap_free(complete_cpuset); + + if (gotnuma) + topology->support.discovery->numa = 1; +} + +static int +look_procs(struct hwloc_backend *backend, struct procinfo *infos, int fulldiscovery, + unsigned highest_cpuid, unsigned highest_ext_cpuid, unsigned *features, enum cpuid_type cpuid_type, + int (*get_cpubind)(hwloc_topology_t topology, hwloc_cpuset_t set, int flags), + int (*set_cpubind)(hwloc_topology_t topology, hwloc_const_cpuset_t set, int flags)) +{ + struct hwloc_x86_backend_data_s *data = backend->private_data; + struct hwloc_topology *topology = backend->topology; + unsigned nbprocs = data->nbprocs; + hwloc_bitmap_t orig_cpuset = NULL; + hwloc_bitmap_t set = NULL; + unsigned i; + + if (!data->src_cpuiddump_path) { + orig_cpuset = hwloc_bitmap_alloc(); + if (get_cpubind(topology, orig_cpuset, HWLOC_CPUBIND_STRICT)) { + hwloc_bitmap_free(orig_cpuset); + return -1; + } + set = hwloc_bitmap_alloc(); + } + + for (i = 0; i < nbprocs; i++) { + struct cpuiddump *src_cpuiddump = NULL; + if (data->src_cpuiddump_path) { + src_cpuiddump = cpuiddump_read(data->src_cpuiddump_path, i); + if (!src_cpuiddump) + continue; + } else { + hwloc_bitmap_only(set, i); + hwloc_debug("binding to CPU%u\n", i); + if (set_cpubind(topology, set, HWLOC_CPUBIND_STRICT)) { + hwloc_debug("could not bind to CPU%u: %s\n", i, strerror(errno)); + continue; + } + } + + look_proc(backend, &infos[i], highest_cpuid, highest_ext_cpuid, features, cpuid_type, src_cpuiddump); + + if (data->src_cpuiddump_path) { + cpuiddump_free(src_cpuiddump); + } + } + + if (!data->src_cpuiddump_path) { + set_cpubind(topology, orig_cpuset, 0); + hwloc_bitmap_free(set); + hwloc_bitmap_free(orig_cpuset); + } + + if (!data->apicid_unique) + fulldiscovery = 0; + else + summarize(backend, infos, fulldiscovery); + return 0; +} + +#if defined HWLOC_FREEBSD_SYS && defined HAVE_CPUSET_SETID +#include +#include +typedef cpusetid_t hwloc_x86_os_state_t; +static void hwloc_x86_os_state_save(hwloc_x86_os_state_t *state, struct cpuiddump *src_cpuiddump) +{ + if (!src_cpuiddump) { + /* temporary make all cpus available during discovery */ + cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, state); + cpuset_setid(CPU_WHICH_PID, -1, 0); + } +} +static void hwloc_x86_os_state_restore(hwloc_x86_os_state_t *state, struct cpuiddump *src_cpuiddump) +{ + if (!src_cpuiddump) { + /* restore initial cpuset */ + cpuset_setid(CPU_WHICH_PID, -1, *state); + } +} +#else /* !defined HWLOC_FREEBSD_SYS || !defined HAVE_CPUSET_SETID */ +typedef void * hwloc_x86_os_state_t; +static void hwloc_x86_os_state_save(hwloc_x86_os_state_t *state __hwloc_attribute_unused, struct cpuiddump *src_cpuiddump __hwloc_attribute_unused) { } +static void hwloc_x86_os_state_restore(hwloc_x86_os_state_t *state __hwloc_attribute_unused, struct cpuiddump *src_cpuiddump __hwloc_attribute_unused) { } +#endif /* !defined HWLOC_FREEBSD_SYS || !defined HAVE_CPUSET_SETID */ + +/* GenuineIntel */ +#define INTEL_EBX ('G' | ('e'<<8) | ('n'<<16) | ('u'<<24)) +#define INTEL_EDX ('i' | ('n'<<8) | ('e'<<16) | ('I'<<24)) +#define INTEL_ECX ('n' | ('t'<<8) | ('e'<<16) | ('l'<<24)) + +/* AuthenticAMD */ +#define AMD_EBX ('A' | ('u'<<8) | ('t'<<16) | ('h'<<24)) +#define AMD_EDX ('e' | ('n'<<8) | ('t'<<16) | ('i'<<24)) +#define AMD_ECX ('c' | ('A'<<8) | ('M'<<16) | ('D'<<24)) + +/* HYGON "HygonGenuine" */ +#define HYGON_EBX ('H' | ('y'<<8) | ('g'<<16) | ('o'<<24)) +#define HYGON_EDX ('n' | ('G'<<8) | ('e'<<16) | ('n'<<24)) +#define HYGON_ECX ('u' | ('i'<<8) | ('n'<<16) | ('e'<<24)) + +/* (Zhaoxin) CentaurHauls */ +#define ZX_EBX ('C' | ('e'<<8) | ('n'<<16) | ('t'<<24)) +#define ZX_EDX ('a' | ('u'<<8) | ('r'<<16) | ('H'<<24)) +#define ZX_ECX ('a' | ('u'<<8) | ('l'<<16) | ('s'<<24)) +/* (Zhaoxin) Shanghai */ +#define SH_EBX (' ' | (' '<<8) | ('S'<<16) | ('h'<<24)) +#define SH_EDX ('a' | ('n'<<8) | ('g'<<16) | ('h'<<24)) +#define SH_ECX ('a' | ('i'<<8) | (' '<<16) | (' '<<24)) + +/* fake cpubind for when nbprocs=1 and no binding support */ +static int fake_get_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, + hwloc_cpuset_t set __hwloc_attribute_unused, + int flags __hwloc_attribute_unused) +{ + return 0; +} +static int fake_set_cpubind(hwloc_topology_t topology __hwloc_attribute_unused, + hwloc_const_cpuset_t set __hwloc_attribute_unused, + int flags __hwloc_attribute_unused) +{ + return 0; +} + +static +int hwloc_look_x86(struct hwloc_backend *backend, int fulldiscovery) +{ + struct hwloc_x86_backend_data_s *data = backend->private_data; + unsigned nbprocs = data->nbprocs; + unsigned eax, ebx, ecx = 0, edx; + unsigned i; + unsigned highest_cpuid; + unsigned highest_ext_cpuid; + /* This stores cpuid features with the same indexing as Linux */ + unsigned features[10] = { 0 }; + struct procinfo *infos = NULL; + enum cpuid_type cpuid_type = unknown; + hwloc_x86_os_state_t os_state; + struct hwloc_binding_hooks hooks; + struct hwloc_topology_support support; + struct hwloc_topology_membind_support memsupport __hwloc_attribute_unused; + int (*get_cpubind)(hwloc_topology_t topology, hwloc_cpuset_t set, int flags) = NULL; + int (*set_cpubind)(hwloc_topology_t topology, hwloc_const_cpuset_t set, int flags) = NULL; + struct cpuiddump *src_cpuiddump = NULL; + int ret = -1; + + if (data->src_cpuiddump_path) { + /* just read cpuid from the dump */ + src_cpuiddump = cpuiddump_read(data->src_cpuiddump_path, 0); + if (!src_cpuiddump) + goto out; + + } else { + /* otherwise check if binding works */ + memset(&hooks, 0, sizeof(hooks)); + support.membind = &memsupport; + hwloc_set_native_binding_hooks(&hooks, &support); + if (hooks.get_thisthread_cpubind && hooks.set_thisthread_cpubind) { + get_cpubind = hooks.get_thisthread_cpubind; + set_cpubind = hooks.set_thisthread_cpubind; + } else if (hooks.get_thisproc_cpubind && hooks.set_thisproc_cpubind) { + /* FIXME: if called by a multithreaded program, we will restore the original process binding + * for each thread instead of their own original thread binding. + * See issue #158. + */ + get_cpubind = hooks.get_thisproc_cpubind; + set_cpubind = hooks.set_thisproc_cpubind; + } else { + /* we need binding support if there are multiple PUs */ + if (nbprocs > 1) + goto out; + get_cpubind = fake_get_cpubind; + set_cpubind = fake_set_cpubind; + } + } + + if (!src_cpuiddump && !hwloc_have_x86_cpuid()) + goto out; + + infos = calloc(nbprocs, sizeof(struct procinfo)); + if (NULL == infos) + goto out; + for (i = 0; i < nbprocs; i++) { + infos[i].nodeid = (unsigned) -1; + infos[i].packageid = (unsigned) -1; + infos[i].dieid = (unsigned) -1; + infos[i].unitid = (unsigned) -1; + infos[i].coreid = (unsigned) -1; + infos[i].threadid = (unsigned) -1; + } + + eax = 0x00; + cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump); + highest_cpuid = eax; + if (ebx == INTEL_EBX && ecx == INTEL_ECX && edx == INTEL_EDX) + cpuid_type = intel; + else if (ebx == AMD_EBX && ecx == AMD_ECX && edx == AMD_EDX) + cpuid_type = amd; + else if ((ebx == ZX_EBX && ecx == ZX_ECX && edx == ZX_EDX) + || (ebx == SH_EBX && ecx == SH_ECX && edx == SH_EDX)) + cpuid_type = zhaoxin; + else if (ebx == HYGON_EBX && ecx == HYGON_ECX && edx == HYGON_EDX) + cpuid_type = hygon; + + hwloc_debug("highest cpuid %x, cpuid type %u\n", highest_cpuid, cpuid_type); + if (highest_cpuid < 0x01) { + goto out_with_infos; + } + + eax = 0x01; + cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump); + features[0] = edx; + features[4] = ecx; + + eax = 0x80000000; + cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump); + highest_ext_cpuid = eax; + + hwloc_debug("highest extended cpuid %x\n", highest_ext_cpuid); + + if (highest_cpuid >= 0x7) { + eax = 0x7; + ecx = 0; + cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump); + features[9] = ebx; + } + + if (cpuid_type != intel && highest_ext_cpuid >= 0x80000001) { + eax = 0x80000001; + cpuid_or_from_dump(&eax, &ebx, &ecx, &edx, src_cpuiddump); + features[1] = edx; + features[6] = ecx; + } + + hwloc_x86_os_state_save(&os_state, src_cpuiddump); + + ret = look_procs(backend, infos, fulldiscovery, + highest_cpuid, highest_ext_cpuid, features, cpuid_type, + get_cpubind, set_cpubind); + if (!ret) + /* success, we're done */ + goto out_with_os_state; + + if (nbprocs == 1) { + /* only one processor, no need to bind */ + look_proc(backend, &infos[0], highest_cpuid, highest_ext_cpuid, features, cpuid_type, src_cpuiddump); + summarize(backend, infos, fulldiscovery); + ret = 0; + } + +out_with_os_state: + hwloc_x86_os_state_restore(&os_state, src_cpuiddump); + +out_with_infos: + if (NULL != infos) { + for (i = 0; i < nbprocs; i++) { + free(infos[i].cache); + free(infos[i].otherids); + } + free(infos); + } + +out: + if (src_cpuiddump) + cpuiddump_free(src_cpuiddump); + return ret; +} + +static int +hwloc_x86_discover(struct hwloc_backend *backend) +{ + struct hwloc_x86_backend_data_s *data = backend->private_data; + struct hwloc_topology *topology = backend->topology; + int alreadypus = 0; + int ret; + +#if HAVE_DECL_RUNNING_ON_VALGRIND + if (RUNNING_ON_VALGRIND && !data->src_cpuiddump_path) { + fprintf(stderr, "hwloc x86 backend cannot work under Valgrind, disabling.\n" + "May be reenabled by dumping CPUIDs with hwloc-gather-cpuid\n" + "and reloading them under Valgrind with HWLOC_CPUID_PATH.\n"); + return 0; + } +#endif + + if (data->src_cpuiddump_path) { + assert(data->nbprocs > 0); /* enforced by hwloc_x86_component_instantiate() */ + topology->support.discovery->pu = 1; + } else { + int nbprocs = hwloc_fallback_nbprocessors(topology); + if (nbprocs >= 1) + topology->support.discovery->pu = 1; + else + nbprocs = 1; + data->nbprocs = (unsigned) nbprocs; + } + + if (topology->levels[0][0]->cpuset) { + /* somebody else discovered things */ + if (topology->nb_levels == 2 && topology->level_nbobjects[1] == data->nbprocs) { + /* only PUs were discovered, as much as we would, complete the topology with everything else */ + alreadypus = 1; + goto fulldiscovery; + } + + /* several object types were added, we can't easily complete, just do partial discovery */ + hwloc_topology_reconnect(topology, 0); + ret = hwloc_look_x86(backend, 0); + if (ret) + hwloc_obj_add_info(topology->levels[0][0], "Backend", "x86"); + return 0; + } else { + /* topology is empty, initialize it */ + hwloc_alloc_root_sets(topology->levels[0][0]); + } + +fulldiscovery: + if (hwloc_look_x86(backend, 1) < 0) { + /* if failed, create PUs */ + if (!alreadypus) + hwloc_setup_pu_level(topology, data->nbprocs); + } + + hwloc_obj_add_info(topology->levels[0][0], "Backend", "x86"); + + if (!data->src_cpuiddump_path) { /* CPUID dump works for both x86 and x86_64 */ +#ifdef HAVE_UNAME + hwloc_add_uname_info(topology, NULL); /* we already know is_thissystem() is true */ +#else + /* uname isn't available, manually setup the "Architecture" info */ +#ifdef HWLOC_X86_64_ARCH + hwloc_obj_add_info(topology->levels[0][0], "Architecture", "x86_64"); +#else + hwloc_obj_add_info(topology->levels[0][0], "Architecture", "x86"); +#endif +#endif + } + + return 1; +} + +static int +hwloc_x86_check_cpuiddump_input(const char *src_cpuiddump_path, hwloc_bitmap_t set) +{ + +#if !(defined HWLOC_WIN_SYS && !defined __MINGW32__ && !defined __CYGWIN__) /* needs a lot of work */ + struct dirent *dirent; + DIR *dir; + FILE *file; + char line [32]; + + dir = opendir(src_cpuiddump_path); + if (!dir) + return -1; + + char path[strlen(src_cpuiddump_path) + strlen("/hwloc-cpuid-info") + 1]; + sprintf(path, "%s/hwloc-cpuid-info", src_cpuiddump_path); + file = fopen(path, "r"); + if (!file) { + fprintf(stderr, "Couldn't open dumped cpuid summary %s\n", path); + goto out_with_dir; + } + if (!fgets(line, sizeof(line), file)) { + fprintf(stderr, "Found read dumped cpuid summary in %s\n", path); + fclose(file); + goto out_with_dir; + } + fclose(file); + if (strcmp(line, "Architecture: x86\n")) { + fprintf(stderr, "Found non-x86 dumped cpuid summary in %s: %s\n", path, line); + goto out_with_dir; + } + + while ((dirent = readdir(dir)) != NULL) { + if (!strncmp(dirent->d_name, "pu", 2)) { + char *end; + unsigned long idx = strtoul(dirent->d_name+2, &end, 10); + if (!*end) + hwloc_bitmap_set(set, idx); + else + fprintf(stderr, "Ignoring invalid dirent `%s' in dumped cpuid directory `%s'\n", + dirent->d_name, src_cpuiddump_path); + } + } + closedir(dir); + + if (hwloc_bitmap_iszero(set)) { + fprintf(stderr, "Did not find any valid pu%%u entry in dumped cpuid directory `%s'\n", + src_cpuiddump_path); + return -1; + } else if (hwloc_bitmap_last(set) != hwloc_bitmap_weight(set) - 1) { + /* The x86 backends enforces contigous set of PUs starting at 0 so far */ + fprintf(stderr, "Found non-contigous pu%%u range in dumped cpuid directory `%s'\n", + src_cpuiddump_path); + return -1; + } + + return 0; + +out_with_dir: + closedir(dir); +#endif /* HWLOC_WIN_SYS & !__MINGW32__ needs a lot of work */ + return -1; +} + +static void +hwloc_x86_backend_disable(struct hwloc_backend *backend) +{ + struct hwloc_x86_backend_data_s *data = backend->private_data; + hwloc_bitmap_free(data->apicid_set); + free(data->src_cpuiddump_path); + free(data); +} + +static struct hwloc_backend * +hwloc_x86_component_instantiate(struct hwloc_disc_component *component, + const void *_data1 __hwloc_attribute_unused, + const void *_data2 __hwloc_attribute_unused, + const void *_data3 __hwloc_attribute_unused) +{ + struct hwloc_backend *backend; + struct hwloc_x86_backend_data_s *data; + const char *src_cpuiddump_path; + + backend = hwloc_backend_alloc(component); + if (!backend) + goto out; + + data = malloc(sizeof(*data)); + if (!data) { + errno = ENOMEM; + goto out_with_backend; + } + + backend->private_data = data; + backend->discover = hwloc_x86_discover; + backend->disable = hwloc_x86_backend_disable; + + /* default values */ + data->is_knl = 0; + data->apicid_set = hwloc_bitmap_alloc(); + data->apicid_unique = 1; + data->src_cpuiddump_path = NULL; + + src_cpuiddump_path = getenv("HWLOC_CPUID_PATH"); + if (src_cpuiddump_path) { + hwloc_bitmap_t set = hwloc_bitmap_alloc(); + if (!hwloc_x86_check_cpuiddump_input(src_cpuiddump_path, set)) { + backend->is_thissystem = 0; + data->src_cpuiddump_path = strdup(src_cpuiddump_path); + assert(!hwloc_bitmap_iszero(set)); /* enforced by hwloc_x86_check_cpuiddump_input() */ + data->nbprocs = hwloc_bitmap_weight(set); + } else { + fprintf(stderr, "Ignoring dumped cpuid directory.\n"); + } + hwloc_bitmap_free(set); + } + + return backend; + + out_with_backend: + free(backend); + out: + return NULL; +} + +static struct hwloc_disc_component hwloc_x86_disc_component = { + HWLOC_DISC_COMPONENT_TYPE_CPU, + "x86", + HWLOC_DISC_COMPONENT_TYPE_GLOBAL, + hwloc_x86_component_instantiate, + 45, /* between native and no_os */ + 1, + NULL +}; + +const struct hwloc_component hwloc_x86_component = { + HWLOC_COMPONENT_ABI, + NULL, NULL, + HWLOC_COMPONENT_TYPE_DISC, + 0, + &hwloc_x86_disc_component +}; diff --git a/src/3rdparty/hwloc/src/topology-xml-nolibxml.c b/src/3rdparty/hwloc/src/topology-xml-nolibxml.c new file mode 100644 index 00000000..5a0d02da --- /dev/null +++ b/src/3rdparty/hwloc/src/topology-xml-nolibxml.c @@ -0,0 +1,919 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2018 Inria. All rights reserved. + * Copyright © 2009-2011 Université Bordeaux + * Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#ifdef HAVE_UNISTD_H +#include +#endif + +/******************* + * Import routines * + *******************/ + +struct hwloc__nolibxml_backend_data_s { + size_t buflen; /* size of both buffer and copy buffers, set during backend_init() */ + char *buffer; /* allocated and filled during backend_init() */ + char *copy; /* allocated during backend_init(), used later during actual parsing */ +}; + +typedef struct hwloc__nolibxml_import_state_data_s { + char *tagbuffer; /* buffer containing the next tag */ + char *attrbuffer; /* buffer containing the next attribute of the current node */ + char *tagname; /* tag name of the current node */ + int closed; /* set if the current node is auto-closing */ +} __hwloc_attribute_may_alias * hwloc__nolibxml_import_state_data_t; + +static char * +hwloc__nolibxml_import_ignore_spaces(char *buffer) +{ + return buffer + strspn(buffer, " \t\n"); +} + +static int +hwloc__nolibxml_import_next_attr(hwloc__xml_import_state_t state, char **namep, char **valuep) +{ + hwloc__nolibxml_import_state_data_t nstate = (void*) state->data; + size_t namelen; + size_t len, escaped; + char *buffer, *value, *end; + + if (!nstate->attrbuffer) + return -1; + + /* find the beginning of an attribute */ + buffer = hwloc__nolibxml_import_ignore_spaces(nstate->attrbuffer); + namelen = strspn(buffer, "abcdefghijklmnopqrstuvwxyz_"); + if (buffer[namelen] != '=' || buffer[namelen+1] != '\"') + return -1; + buffer[namelen] = '\0'; + *namep = buffer; + + /* find the beginning of its value, and unescape it */ + *valuep = value = buffer+namelen+2; + len = 0; escaped = 0; + while (value[len+escaped] != '\"') { + if (value[len+escaped] == '&') { + if (!strncmp(&value[1+len+escaped], "#10;", 4)) { + escaped += 4; + value[len] = '\n'; + } else if (!strncmp(&value[1+len+escaped], "#13;", 4)) { + escaped += 4; + value[len] = '\r'; + } else if (!strncmp(&value[1+len+escaped], "#9;", 3)) { + escaped += 3; + value[len] = '\t'; + } else if (!strncmp(&value[1+len+escaped], "quot;", 5)) { + escaped += 5; + value[len] = '\"'; + } else if (!strncmp(&value[1+len+escaped], "lt;", 3)) { + escaped += 3; + value[len] = '<'; + } else if (!strncmp(&value[1+len+escaped], "gt;", 3)) { + escaped += 3; + value[len] = '>'; + } else if (!strncmp(&value[1+len+escaped], "amp;", 4)) { + escaped += 4; + value[len] = '&'; + } else { + return -1; + } + } else { + value[len] = value[len+escaped]; + } + len++; + if (value[len+escaped] == '\0') + return -1; + } + value[len] = '\0'; + + /* find next attribute */ + end = &value[len+escaped+1]; /* skip the ending " */ + nstate->attrbuffer = hwloc__nolibxml_import_ignore_spaces(end); + return 0; +} + +static int +hwloc__nolibxml_import_find_child(hwloc__xml_import_state_t state, + hwloc__xml_import_state_t childstate, + char **tagp) +{ + hwloc__nolibxml_import_state_data_t nstate = (void*) state->data; + hwloc__nolibxml_import_state_data_t nchildstate = (void*) childstate->data; + char *buffer = nstate->tagbuffer; + char *end; + char *tag; + size_t namelen; + + childstate->parent = state; + childstate->global = state->global; + + /* auto-closed tags have no children */ + if (nstate->closed) + return 0; + + /* find the beginning of the tag */ + buffer = hwloc__nolibxml_import_ignore_spaces(buffer); + if (buffer[0] != '<') + return -1; + buffer++; + + /* if closing tag, return nothing and do not advance */ + if (buffer[0] == '/') + return 0; + + /* normal tag */ + tag = nchildstate->tagname = buffer; + + /* find the end, mark it and return it */ + end = strchr(buffer, '>'); + if (!end) + return -1; + end[0] = '\0'; + nchildstate->tagbuffer = end+1; + + /* handle auto-closing tags */ + if (end[-1] == '/') { + nchildstate->closed = 1; + end[-1] = '\0'; + } else + nchildstate->closed = 0; + + /* find attributes */ + namelen = strspn(buffer, "abcdefghijklmnopqrstuvwxyz1234567890_"); + + if (buffer[namelen] == '\0') { + /* no attributes */ + nchildstate->attrbuffer = NULL; + *tagp = tag; + return 1; + } + + if (buffer[namelen] != ' ') + return -1; + + /* found a space, likely starting attributes */ + buffer[namelen] = '\0'; + nchildstate->attrbuffer = buffer+namelen+1; + *tagp = tag; + return 1; +} + +static int +hwloc__nolibxml_import_close_tag(hwloc__xml_import_state_t state) +{ + hwloc__nolibxml_import_state_data_t nstate = (void*) state->data; + char *buffer = nstate->tagbuffer; + char *end; + + /* auto-closed tags need nothing */ + if (nstate->closed) + return 0; + + /* find the beginning of the tag */ + buffer = hwloc__nolibxml_import_ignore_spaces(buffer); + if (buffer[0] != '<') + return -1; + buffer++; + + /* find the end, mark it and return it to the parent */ + end = strchr(buffer, '>'); + if (!end) + return -1; + end[0] = '\0'; + nstate->tagbuffer = end+1; + + /* if closing tag, return nothing */ + if (buffer[0] != '/' || strcmp(buffer+1, nstate->tagname) ) + return -1; + return 0; +} + +static void +hwloc__nolibxml_import_close_child(hwloc__xml_import_state_t state) +{ + hwloc__nolibxml_import_state_data_t nstate = (void*) state->data; + hwloc__nolibxml_import_state_data_t nparent = (void*) state->parent->data; + nparent->tagbuffer = nstate->tagbuffer; +} + +static int +hwloc__nolibxml_import_get_content(hwloc__xml_import_state_t state, + char **beginp, size_t expected_length) +{ + hwloc__nolibxml_import_state_data_t nstate = (void*) state->data; + char *buffer = nstate->tagbuffer; + size_t length; + char *end; + + /* auto-closed tags have no content */ + if (nstate->closed) { + if (expected_length) + return -1; + *beginp = (char *) ""; + return 0; + } + + /* find the next tag, where the content ends */ + end = strchr(buffer, '<'); + if (!end) + return -1; + + length = (size_t) (end-buffer); + if (length != expected_length) + return -1; + nstate->tagbuffer = end; + *end = '\0'; /* mark as 0-terminated for now */ + *beginp = buffer; + return 1; +} + +static void +hwloc__nolibxml_import_close_content(hwloc__xml_import_state_t state) +{ + /* put back the '<' that we overwrote to 0-terminate the content */ + hwloc__nolibxml_import_state_data_t nstate = (void*) state->data; + if (!nstate->closed) + *nstate->tagbuffer = '<'; +} + +static int +hwloc_nolibxml_look_init(struct hwloc_xml_backend_data_s *bdata, + struct hwloc__xml_import_state_s *state) +{ + hwloc__nolibxml_import_state_data_t nstate = (void*) state->data; + struct hwloc__nolibxml_backend_data_s *nbdata = bdata->data; + unsigned major, minor; + char *end; + char *buffer; + + HWLOC_BUILD_ASSERT(sizeof(*nstate) <= sizeof(state->data)); + + /* use a copy in the temporary buffer, we may modify during parsing */ + buffer = nbdata->copy; + memcpy(buffer, nbdata->buffer, nbdata->buflen); + + /* skip headers */ + while (!strncmp(buffer, "", &major, &minor) == 2) { + bdata->version_major = major; + bdata->version_minor = minor; + end = strchr(buffer, '>') + 1; + } else if (!strncmp(buffer, "", 10)) { + bdata->version_major = 1; + bdata->version_minor = 0; + end = buffer + 10; + } else if (!strncmp(buffer, "", 6)) { + bdata->version_major = 0; + bdata->version_minor = 9; + end = buffer + 6; + } else + goto failed; + + state->global->next_attr = hwloc__nolibxml_import_next_attr; + state->global->find_child = hwloc__nolibxml_import_find_child; + state->global->close_tag = hwloc__nolibxml_import_close_tag; + state->global->close_child = hwloc__nolibxml_import_close_child; + state->global->get_content = hwloc__nolibxml_import_get_content; + state->global->close_content = hwloc__nolibxml_import_close_content; + state->parent = NULL; + nstate->closed = 0; + nstate->tagbuffer = end; + nstate->tagname = (char *) "topology"; + nstate->attrbuffer = NULL; + return 0; /* success */ + + failed: + return -1; /* failed */ +} + +/* can be called at the end of the import (to cleanup things early), + * or by backend_exit() if load failed for other reasons. + */ +static void +hwloc_nolibxml_free_buffers(struct hwloc_xml_backend_data_s *bdata) +{ + struct hwloc__nolibxml_backend_data_s *nbdata = bdata->data; + if (nbdata->buffer) { + free(nbdata->buffer); + nbdata->buffer = NULL; + } + if (nbdata->copy) { + free(nbdata->copy); + nbdata->copy = NULL; + } +} + +static void +hwloc_nolibxml_look_done(struct hwloc_xml_backend_data_s *bdata, int result) +{ + hwloc_nolibxml_free_buffers(bdata); + + if (result < 0 && hwloc__xml_verbose()) + fprintf(stderr, "Failed to parse XML input with the minimalistic parser. If it was not\n" + "generated by hwloc, try enabling full XML support with libxml2.\n"); +} + +/******************** + * Backend routines * + ********************/ + +static void +hwloc_nolibxml_backend_exit(struct hwloc_xml_backend_data_s *bdata) +{ + struct hwloc__nolibxml_backend_data_s *nbdata = bdata->data; + hwloc_nolibxml_free_buffers(bdata); + free(nbdata); +} + +static int +hwloc_nolibxml_read_file(const char *xmlpath, char **bufferp, size_t *buflenp) +{ + FILE * file; + size_t buflen, offset, readlen; + struct stat statbuf; + char *buffer, *tmp; + size_t ret; + + if (!strcmp(xmlpath, "-")) + xmlpath = "/dev/stdin"; + + file = fopen(xmlpath, "r"); + if (!file) + goto out; + + /* find the required buffer size for regular files, or use 4k when unknown, we'll realloc later if needed */ + buflen = 4096; + if (!stat(xmlpath, &statbuf)) + if (S_ISREG(statbuf.st_mode)) + buflen = statbuf.st_size+1; /* one additional byte so that the first fread() gets EOF too */ + + buffer = malloc(buflen+1); /* one more byte for the ending \0 */ + if (!buffer) + goto out_with_file; + + offset = 0; readlen = buflen; + while (1) { + ret = fread(buffer+offset, 1, readlen, file); + + offset += ret; + buffer[offset] = 0; + + if (ret != readlen) + break; + + buflen *= 2; + tmp = realloc(buffer, buflen+1); + if (!tmp) + goto out_with_buffer; + buffer = tmp; + readlen = buflen/2; + } + + fclose(file); + *bufferp = buffer; + *buflenp = offset+1; + return 0; + + out_with_buffer: + free(buffer); + out_with_file: + fclose(file); + out: + return -1; +} + +static int +hwloc_nolibxml_backend_init(struct hwloc_xml_backend_data_s *bdata, + const char *xmlpath, const char *xmlbuffer, int xmlbuflen) +{ + struct hwloc__nolibxml_backend_data_s *nbdata = malloc(sizeof(*nbdata)); + + if (!nbdata) + goto out; + bdata->data = nbdata; + + if (xmlbuffer) { + nbdata->buffer = malloc(xmlbuflen+1); + if (!nbdata->buffer) + goto out_with_nbdata; + nbdata->buflen = xmlbuflen+1; + memcpy(nbdata->buffer, xmlbuffer, xmlbuflen); + nbdata->buffer[xmlbuflen] = '\0'; + + } else { + int err = hwloc_nolibxml_read_file(xmlpath, &nbdata->buffer, &nbdata->buflen); + if (err < 0) + goto out_with_nbdata; + } + + /* allocate a temporary copy buffer that we may modify during parsing */ + nbdata->copy = malloc(nbdata->buflen+1); + if (!nbdata->copy) + goto out_with_buffer; + nbdata->copy[nbdata->buflen] = '\0'; + + bdata->look_init = hwloc_nolibxml_look_init; + bdata->look_done = hwloc_nolibxml_look_done; + bdata->backend_exit = hwloc_nolibxml_backend_exit; + return 0; + +out_with_buffer: + free(nbdata->buffer); +out_with_nbdata: + free(nbdata); +out: + return -1; +} + +static int +hwloc_nolibxml_import_diff(struct hwloc__xml_import_state_s *state, + const char *xmlpath, const char *xmlbuffer, int xmlbuflen, + hwloc_topology_diff_t *firstdiffp, char **refnamep) +{ + hwloc__nolibxml_import_state_data_t nstate = (void*) state->data; + struct hwloc__xml_import_state_s childstate; + char *refname = NULL; + char *buffer, *tmp, *tag; + size_t buflen; + int ret; + + HWLOC_BUILD_ASSERT(sizeof(*nstate) <= sizeof(state->data)); + + if (xmlbuffer) { + buffer = malloc(xmlbuflen); + if (!buffer) + goto out; + memcpy(buffer, xmlbuffer, xmlbuflen); + buflen = xmlbuflen; + + } else { + ret = hwloc_nolibxml_read_file(xmlpath, &buffer, &buflen); + if (ret < 0) + goto out; + } + + /* skip headers */ + tmp = buffer; + while (!strncmp(tmp, "global->next_attr = hwloc__nolibxml_import_next_attr; + state->global->find_child = hwloc__nolibxml_import_find_child; + state->global->close_tag = hwloc__nolibxml_import_close_tag; + state->global->close_child = hwloc__nolibxml_import_close_child; + state->global->get_content = hwloc__nolibxml_import_get_content; + state->global->close_content = hwloc__nolibxml_import_close_content; + state->parent = NULL; + nstate->closed = 0; + nstate->tagbuffer = tmp; + nstate->tagname = NULL; + nstate->attrbuffer = NULL; + + /* find root */ + ret = hwloc__nolibxml_import_find_child(state, &childstate, &tag); + if (ret < 0) + goto out_with_buffer; + if (!tag || strcmp(tag, "topologydiff")) + goto out_with_buffer; + + while (1) { + char *attrname, *attrvalue; + if (hwloc__nolibxml_import_next_attr(&childstate, &attrname, &attrvalue) < 0) + break; + if (!strcmp(attrname, "refname")) { + free(refname); + refname = strdup(attrvalue); + } else + goto out_with_buffer; + } + + ret = hwloc__xml_import_diff(&childstate, firstdiffp); + if (refnamep && !ret) + *refnamep = refname; + else + free(refname); + + free(buffer); + return ret; + +out_with_buffer: + free(buffer); + free(refname); +out: + return -1; +} + +/******************* + * Export routines * + *******************/ + +typedef struct hwloc__nolibxml_export_state_data_s { + char *buffer; /* (moving) buffer where to write */ + size_t written; /* how many bytes were written (or would have be written if not truncated) */ + size_t remaining; /* how many bytes are still available in the buffer */ + unsigned indent; /* indentation level for the next line */ + unsigned nr_children; + unsigned has_content; +} __hwloc_attribute_may_alias * hwloc__nolibxml_export_state_data_t; + +static void +hwloc__nolibxml_export_update_buffer(hwloc__nolibxml_export_state_data_t ndata, int res) +{ + if (res >= 0) { + ndata->written += res; + if (res >= (int) ndata->remaining) + res = ndata->remaining>0 ? (int)ndata->remaining-1 : 0; + ndata->buffer += res; + ndata->remaining -= res; + } +} + +static char * +hwloc__nolibxml_export_escape_string(const char *src) +{ + size_t fulllen, sublen; + char *escaped, *dst; + + fulllen = strlen(src); + + sublen = strcspn(src, "\n\r\t\"<>&"); + if (sublen == fulllen) + return NULL; /* nothing to escape */ + + escaped = malloc(fulllen*6+1); /* escaped chars are replaced by at most 6 char */ + dst = escaped; + + memcpy(dst, src, sublen); + src += sublen; + dst += sublen; + + while (*src) { + int replen; + switch (*src) { + case '\n': strcpy(dst, " "); replen=5; break; + case '\r': strcpy(dst, " "); replen=5; break; + case '\t': strcpy(dst, " "); replen=4; break; + case '\"': strcpy(dst, """); replen=6; break; + case '<': strcpy(dst, "<"); replen=4; break; + case '>': strcpy(dst, ">"); replen=4; break; + case '&': strcpy(dst, "&"); replen=5; break; + default: replen=0; break; + } + dst+=replen; src++; + + sublen = strcspn(src, "\n\r\t\"<>&"); + memcpy(dst, src, sublen); + src += sublen; + dst += sublen; + } + + *dst = 0; + return escaped; +} + +static void +hwloc__nolibxml_export_new_child(hwloc__xml_export_state_t parentstate, + hwloc__xml_export_state_t state, + const char *name) +{ + hwloc__nolibxml_export_state_data_t npdata = (void *) parentstate->data; + hwloc__nolibxml_export_state_data_t ndata = (void *) state->data; + int res; + + assert(!npdata->has_content); + if (!npdata->nr_children) { + res = hwloc_snprintf(npdata->buffer, npdata->remaining, ">\n"); + hwloc__nolibxml_export_update_buffer(npdata, res); + } + npdata->nr_children++; + + state->parent = parentstate; + state->new_child = parentstate->new_child; + state->new_prop = parentstate->new_prop; + state->add_content = parentstate->add_content; + state->end_object = parentstate->end_object; + state->global = parentstate->global; + + ndata->buffer = npdata->buffer; + ndata->written = npdata->written; + ndata->remaining = npdata->remaining; + ndata->indent = npdata->indent + 2; + + ndata->nr_children = 0; + ndata->has_content = 0; + + res = hwloc_snprintf(ndata->buffer, ndata->remaining, "%*s<%s", (int) npdata->indent, "", name); + hwloc__nolibxml_export_update_buffer(ndata, res); +} + +static void +hwloc__nolibxml_export_new_prop(hwloc__xml_export_state_t state, const char *name, const char *value) +{ + hwloc__nolibxml_export_state_data_t ndata = (void *) state->data; + char *escaped = hwloc__nolibxml_export_escape_string(value); + int res = hwloc_snprintf(ndata->buffer, ndata->remaining, " %s=\"%s\"", name, escaped ? (const char *) escaped : value); + hwloc__nolibxml_export_update_buffer(ndata, res); + free(escaped); +} + +static void +hwloc__nolibxml_export_end_object(hwloc__xml_export_state_t state, const char *name) +{ + hwloc__nolibxml_export_state_data_t ndata = (void *) state->data; + hwloc__nolibxml_export_state_data_t npdata = (void *) state->parent->data; + int res; + + assert (!(ndata->has_content && ndata->nr_children)); + if (ndata->has_content) { + res = hwloc_snprintf(ndata->buffer, ndata->remaining, "\n", name); + } else if (ndata->nr_children) { + res = hwloc_snprintf(ndata->buffer, ndata->remaining, "%*s\n", (int) npdata->indent, "", name); + } else { + res = hwloc_snprintf(ndata->buffer, ndata->remaining, "/>\n"); + } + hwloc__nolibxml_export_update_buffer(ndata, res); + + npdata->buffer = ndata->buffer; + npdata->written = ndata->written; + npdata->remaining = ndata->remaining; +} + +static void +hwloc__nolibxml_export_add_content(hwloc__xml_export_state_t state, const char *buffer, size_t length) +{ + hwloc__nolibxml_export_state_data_t ndata = (void *) state->data; + int res; + + assert(!ndata->nr_children); + if (!ndata->has_content) { + res = hwloc_snprintf(ndata->buffer, ndata->remaining, ">"); + hwloc__nolibxml_export_update_buffer(ndata, res); + } + ndata->has_content = 1; + + res = hwloc_snprintf(ndata->buffer, ndata->remaining, buffer, length); + hwloc__nolibxml_export_update_buffer(ndata, res); +} + +static size_t +hwloc___nolibxml_prepare_export(hwloc_topology_t topology, struct hwloc__xml_export_data_s *edata, + char *xmlbuffer, int buflen, unsigned long flags) +{ + struct hwloc__xml_export_state_s state, childstate; + hwloc__nolibxml_export_state_data_t ndata = (void *) &state.data; + int v1export = flags & HWLOC_TOPOLOGY_EXPORT_XML_FLAG_V1; + int res; + + HWLOC_BUILD_ASSERT(sizeof(*ndata) <= sizeof(state.data)); + + state.new_child = hwloc__nolibxml_export_new_child; + state.new_prop = hwloc__nolibxml_export_new_prop; + state.add_content = hwloc__nolibxml_export_add_content; + state.end_object = hwloc__nolibxml_export_end_object; + state.global = edata; + + ndata->indent = 0; + ndata->written = 0; + ndata->buffer = xmlbuffer; + ndata->remaining = buflen; + + ndata->nr_children = 1; /* don't close a non-existing previous tag when opening the topology tag */ + ndata->has_content = 0; + + res = hwloc_snprintf(ndata->buffer, ndata->remaining, + "\n" + "\n", v1export ? "hwloc.dtd" : "hwloc2.dtd"); + hwloc__nolibxml_export_update_buffer(ndata, res); + hwloc__nolibxml_export_new_child(&state, &childstate, "topology"); + if (!(flags & HWLOC_TOPOLOGY_EXPORT_XML_FLAG_V1)) + hwloc__nolibxml_export_new_prop(&childstate, "version", "2.0"); + hwloc__xml_export_topology (&childstate, topology, flags); + hwloc__nolibxml_export_end_object(&childstate, "topology"); + + return ndata->written+1; /* ending \0 */ +} + +static int +hwloc_nolibxml_export_buffer(hwloc_topology_t topology, struct hwloc__xml_export_data_s *edata, + char **bufferp, int *buflenp, unsigned long flags) +{ + char *buffer; + size_t bufferlen, res; + + bufferlen = 16384; /* random guess for large enough default */ + buffer = malloc(bufferlen); + if (!buffer) + return -1; + res = hwloc___nolibxml_prepare_export(topology, edata, buffer, (int)bufferlen, flags); + + if (res > bufferlen) { + char *tmp = realloc(buffer, res); + if (!tmp) { + free(buffer); + return -1; + } + buffer = tmp; + hwloc___nolibxml_prepare_export(topology, edata, buffer, (int)res, flags); + } + + *bufferp = buffer; + *buflenp = (int)res; + return 0; +} + +static int +hwloc_nolibxml_export_file(hwloc_topology_t topology, struct hwloc__xml_export_data_s *edata, + const char *filename, unsigned long flags) +{ + FILE *file; + char *buffer; + int bufferlen; + int ret; + + ret = hwloc_nolibxml_export_buffer(topology, edata, &buffer, &bufferlen, flags); + if (ret < 0) + return -1; + + if (!strcmp(filename, "-")) { + file = stdout; + } else { + file = fopen(filename, "w"); + if (!file) { + free(buffer); + return -1; + } + } + + ret = (int)fwrite(buffer, 1, bufferlen-1 /* don't write the ending \0 */, file); + if (ret == bufferlen-1) { + ret = 0; + } else { + errno = ferror(file); + ret = -1; + } + + free(buffer); + + if (file != stdout) + fclose(file); + return ret; +} + +static size_t +hwloc___nolibxml_prepare_export_diff(hwloc_topology_diff_t diff, const char *refname, char *xmlbuffer, int buflen) +{ + struct hwloc__xml_export_state_s state, childstate; + hwloc__nolibxml_export_state_data_t ndata = (void *) &state.data; + int res; + + HWLOC_BUILD_ASSERT(sizeof(*ndata) <= sizeof(state.data)); + + state.new_child = hwloc__nolibxml_export_new_child; + state.new_prop = hwloc__nolibxml_export_new_prop; + state.add_content = hwloc__nolibxml_export_add_content; + state.end_object = hwloc__nolibxml_export_end_object; + + ndata->indent = 0; + ndata->written = 0; + ndata->buffer = xmlbuffer; + ndata->remaining = buflen; + + ndata->nr_children = 1; /* don't close a non-existing previous tag when opening the topology tag */ + ndata->has_content = 0; + + res = hwloc_snprintf(ndata->buffer, ndata->remaining, + "\n" + "\n"); + hwloc__nolibxml_export_update_buffer(ndata, res); + hwloc__nolibxml_export_new_child(&state, &childstate, "topologydiff"); + if (refname) + hwloc__nolibxml_export_new_prop(&childstate, "refname", refname); + hwloc__xml_export_diff (&childstate, diff); + hwloc__nolibxml_export_end_object(&childstate, "topologydiff"); + + return ndata->written+1; +} + +static int +hwloc_nolibxml_export_diff_buffer(hwloc_topology_diff_t diff, const char *refname, char **bufferp, int *buflenp) +{ + char *buffer; + size_t bufferlen, res; + + bufferlen = 16384; /* random guess for large enough default */ + buffer = malloc(bufferlen); + if (!buffer) + return -1; + res = hwloc___nolibxml_prepare_export_diff(diff, refname, buffer, (int)bufferlen); + + if (res > bufferlen) { + char *tmp = realloc(buffer, res); + if (!tmp) { + free(buffer); + return -1; + } + buffer = tmp; + hwloc___nolibxml_prepare_export_diff(diff, refname, buffer, (int)res); + } + + *bufferp = buffer; + *buflenp = (int)res; + return 0; +} + +static int +hwloc_nolibxml_export_diff_file(hwloc_topology_diff_t diff, const char *refname, const char *filename) +{ + FILE *file; + char *buffer; + int bufferlen; + int ret; + + ret = hwloc_nolibxml_export_diff_buffer(diff, refname, &buffer, &bufferlen); + if (ret < 0) + return -1; + + if (!strcmp(filename, "-")) { + file = stdout; + } else { + file = fopen(filename, "w"); + if (!file) { + free(buffer); + return -1; + } + } + + ret = (int)fwrite(buffer, 1, bufferlen-1 /* don't write the ending \0 */, file); + if (ret == bufferlen-1) { + ret = 0; + } else { + errno = ferror(file); + ret = -1; + } + + free(buffer); + + if (file != stdout) + fclose(file); + return ret; +} + +static void +hwloc_nolibxml_free_buffer(void *xmlbuffer) +{ + free(xmlbuffer); +} + +/************* + * Callbacks * + *************/ + +static struct hwloc_xml_callbacks hwloc_xml_nolibxml_callbacks = { + hwloc_nolibxml_backend_init, + hwloc_nolibxml_export_file, + hwloc_nolibxml_export_buffer, + hwloc_nolibxml_free_buffer, + hwloc_nolibxml_import_diff, + hwloc_nolibxml_export_diff_file, + hwloc_nolibxml_export_diff_buffer +}; + +static struct hwloc_xml_component hwloc_nolibxml_xml_component = { + &hwloc_xml_nolibxml_callbacks, + NULL +}; + +const struct hwloc_component hwloc_xml_nolibxml_component = { + HWLOC_COMPONENT_ABI, + NULL, NULL, + HWLOC_COMPONENT_TYPE_XML, + 0, + &hwloc_nolibxml_xml_component +}; diff --git a/src/3rdparty/hwloc/src/topology-xml.c b/src/3rdparty/hwloc/src/topology-xml.c new file mode 100644 index 00000000..e7c5ef62 --- /dev/null +++ b/src/3rdparty/hwloc/src/topology-xml.c @@ -0,0 +1,2886 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2019 Inria. All rights reserved. + * Copyright © 2009-2011 Université Bordeaux + * Copyright © 2009-2018 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +#include +#include +#include +#include +#include +#include + +#include + +int +hwloc__xml_verbose(void) +{ + static int checked = 0; + static int verbose = 0; + if (!checked) { + const char *env = getenv("HWLOC_XML_VERBOSE"); + if (env) + verbose = atoi(env); + checked = 1; + } + return verbose; +} + +static int +hwloc_nolibxml_import(void) +{ + static int checked = 0; + static int nolibxml = 0; + if (!checked) { + const char *env = getenv("HWLOC_LIBXML"); + if (env) { + nolibxml = !atoi(env); + } else { + env = getenv("HWLOC_LIBXML_IMPORT"); + if (env) + nolibxml = !atoi(env); + } + checked = 1; + } + return nolibxml; +} + +static int +hwloc_nolibxml_export(void) +{ + static int checked = 0; + static int nolibxml = 0; + if (!checked) { + const char *env = getenv("HWLOC_LIBXML"); + if (env) { + nolibxml = !atoi(env); + } else { + env = getenv("HWLOC_LIBXML_EXPORT"); + if (env) + nolibxml = !atoi(env); + } + checked = 1; + } + return nolibxml; +} + +#define BASE64_ENCODED_LENGTH(length) (4*(((length)+2)/3)) + +/********************************* + ********* XML callbacks ********* + *********************************/ + +/* set when registering nolibxml and libxml components. + * modifications protected by the components mutex. + * read by the common XML code in topology-xml.c to jump to the right XML backend. + */ +static struct hwloc_xml_callbacks *hwloc_nolibxml_callbacks = NULL, *hwloc_libxml_callbacks = NULL; + +void +hwloc_xml_callbacks_register(struct hwloc_xml_component *comp) +{ + if (!hwloc_nolibxml_callbacks) + hwloc_nolibxml_callbacks = comp->nolibxml_callbacks; + if (!hwloc_libxml_callbacks) + hwloc_libxml_callbacks = comp->libxml_callbacks; +} + +void +hwloc_xml_callbacks_reset(void) +{ + hwloc_nolibxml_callbacks = NULL; + hwloc_libxml_callbacks = NULL; +} + +/************************************************ + ********* XML import (common routines) ********* + ************************************************/ + +#define _HWLOC_OBJ_CACHE_OLD (HWLOC_OBJ_TYPE_MAX+1) /* temporarily used when importing pre-v2.0 attribute-less cache types */ +#define _HWLOC_OBJ_FUTURE (HWLOC_OBJ_TYPE_MAX+2) /* temporarily used when ignoring future types */ + +static void +hwloc__xml_import_object_attr(struct hwloc_topology *topology, + struct hwloc_xml_backend_data_s *data, + struct hwloc_obj *obj, + const char *name, const char *value, + hwloc__xml_import_state_t state) +{ + if (!strcmp(name, "type")) { + /* already handled */ + return; + } + + else if (!strcmp(name, "os_index")) + obj->os_index = strtoul(value, NULL, 10); + else if (!strcmp(name, "gp_index")) { + obj->gp_index = strtoull(value, NULL, 10); + if (!obj->gp_index && hwloc__xml_verbose()) + fprintf(stderr, "%s: unexpected zero gp_index, topology may be invalid\n", state->global->msgprefix); + if (obj->gp_index >= topology->next_gp_index) + topology->next_gp_index = obj->gp_index + 1; + } else if (!strcmp(name, "cpuset")) { + if (!obj->cpuset) + obj->cpuset = hwloc_bitmap_alloc(); + hwloc_bitmap_sscanf(obj->cpuset, value); + } else if (!strcmp(name, "complete_cpuset")) { + if (!obj->complete_cpuset) + obj->complete_cpuset = hwloc_bitmap_alloc(); + hwloc_bitmap_sscanf(obj->complete_cpuset, value); + } else if (!strcmp(name, "allowed_cpuset")) { + /* ignored except for root */ + if (!obj->parent) + hwloc_bitmap_sscanf(topology->allowed_cpuset, value); + } else if (!strcmp(name, "nodeset")) { + if (!obj->nodeset) + obj->nodeset = hwloc_bitmap_alloc(); + hwloc_bitmap_sscanf(obj->nodeset, value); + } else if (!strcmp(name, "complete_nodeset")) { + if (!obj->complete_nodeset) + obj->complete_nodeset = hwloc_bitmap_alloc(); + hwloc_bitmap_sscanf(obj->complete_nodeset, value); + } else if (!strcmp(name, "allowed_nodeset")) { + /* ignored except for root */ + if (!obj->parent) + hwloc_bitmap_sscanf(topology->allowed_nodeset, value); + } else if (!strcmp(name, "name")) { + if (obj->name) + free(obj->name); + obj->name = strdup(value); + } else if (!strcmp(name, "subtype")) { + if (obj->subtype) + free(obj->subtype); + obj->subtype = strdup(value); + } + + else if (!strcmp(name, "cache_size")) { + unsigned long long lvalue = strtoull(value, NULL, 10); + if (hwloc__obj_type_is_cache(obj->type) || obj->type == _HWLOC_OBJ_CACHE_OLD) + obj->attr->cache.size = lvalue; + else if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring cache_size attribute for non-cache object type\n", + state->global->msgprefix); + } + + else if (!strcmp(name, "cache_linesize")) { + unsigned long lvalue = strtoul(value, NULL, 10); + if (hwloc__obj_type_is_cache(obj->type) || obj->type == _HWLOC_OBJ_CACHE_OLD) + obj->attr->cache.linesize = lvalue; + else if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring cache_linesize attribute for non-cache object type\n", + state->global->msgprefix); + } + + else if (!strcmp(name, "cache_associativity")) { + int lvalue = atoi(value); + if (hwloc__obj_type_is_cache(obj->type) || obj->type == _HWLOC_OBJ_CACHE_OLD) + obj->attr->cache.associativity = lvalue; + else if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring cache_associativity attribute for non-cache object type\n", + state->global->msgprefix); + } + + else if (!strcmp(name, "cache_type")) { + unsigned long lvalue = strtoul(value, NULL, 10); + if (hwloc__obj_type_is_cache(obj->type) || obj->type == _HWLOC_OBJ_CACHE_OLD) { + if (lvalue == HWLOC_OBJ_CACHE_UNIFIED + || lvalue == HWLOC_OBJ_CACHE_DATA + || lvalue == HWLOC_OBJ_CACHE_INSTRUCTION) + obj->attr->cache.type = (hwloc_obj_cache_type_t) lvalue; + else + fprintf(stderr, "%s: ignoring invalid cache_type attribute %lu\n", + state->global->msgprefix, lvalue); + } else if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring cache_type attribute for non-cache object type\n", + state->global->msgprefix); + } + + else if (!strcmp(name, "local_memory")) { + unsigned long long lvalue = strtoull(value, NULL, 10); + if (obj->type == HWLOC_OBJ_NUMANODE) + obj->attr->numanode.local_memory = lvalue; + else if (!obj->parent) + topology->machine_memory.local_memory = lvalue; + else if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring local_memory attribute for non-NUMAnode non-root object\n", + state->global->msgprefix); + } + + else if (!strcmp(name, "depth")) { + unsigned long lvalue = strtoul(value, NULL, 10); + if (hwloc__obj_type_is_cache(obj->type) || obj->type == _HWLOC_OBJ_CACHE_OLD) { + obj->attr->cache.depth = lvalue; + } else if (obj->type == HWLOC_OBJ_GROUP || obj->type == HWLOC_OBJ_BRIDGE) { + /* will be overwritten by the core */ + } else if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring depth attribute for object type without depth\n", + state->global->msgprefix); + } + + else if (!strcmp(name, "kind")) { + unsigned long lvalue = strtoul(value, NULL, 10); + if (obj->type == HWLOC_OBJ_GROUP) + obj->attr->group.kind = lvalue; + else if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring kind attribute for non-group object type\n", + state->global->msgprefix); + } + + else if (!strcmp(name, "subkind")) { + unsigned long lvalue = strtoul(value, NULL, 10); + if (obj->type == HWLOC_OBJ_GROUP) + obj->attr->group.subkind = lvalue; + else if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring subkind attribute for non-group object type\n", + state->global->msgprefix); + } + + else if (!strcmp(name, "dont_merge")) { + unsigned long lvalue = strtoul(value, NULL, 10); + if (obj->type == HWLOC_OBJ_GROUP) + obj->attr->group.dont_merge = lvalue; + else if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring dont_merge attribute for non-group object type\n", + state->global->msgprefix); + } + + else if (!strcmp(name, "pci_busid")) { + switch (obj->type) { + case HWLOC_OBJ_PCI_DEVICE: + case HWLOC_OBJ_BRIDGE: { + unsigned domain, bus, dev, func; + if (sscanf(value, "%04x:%02x:%02x.%01x", + &domain, &bus, &dev, &func) != 4) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring invalid pci_busid format string %s\n", + state->global->msgprefix, value); + } else { + obj->attr->pcidev.domain = domain; + obj->attr->pcidev.bus = bus; + obj->attr->pcidev.dev = dev; + obj->attr->pcidev.func = func; + } + break; + } + default: + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring pci_busid attribute for non-PCI object\n", + state->global->msgprefix); + break; + } + } + + else if (!strcmp(name, "pci_type")) { + switch (obj->type) { + case HWLOC_OBJ_PCI_DEVICE: + case HWLOC_OBJ_BRIDGE: { + unsigned classid, vendor, device, subvendor, subdevice, revision; + if (sscanf(value, "%04x [%04x:%04x] [%04x:%04x] %02x", + &classid, &vendor, &device, &subvendor, &subdevice, &revision) != 6) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring invalid pci_type format string %s\n", + state->global->msgprefix, value); + } else { + obj->attr->pcidev.class_id = classid; + obj->attr->pcidev.vendor_id = vendor; + obj->attr->pcidev.device_id = device; + obj->attr->pcidev.subvendor_id = subvendor; + obj->attr->pcidev.subdevice_id = subdevice; + obj->attr->pcidev.revision = revision; + } + break; + } + default: + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring pci_type attribute for non-PCI object\n", + state->global->msgprefix); + break; + } + } + + else if (!strcmp(name, "pci_link_speed")) { + switch (obj->type) { + case HWLOC_OBJ_PCI_DEVICE: + case HWLOC_OBJ_BRIDGE: { + obj->attr->pcidev.linkspeed = (float) atof(value); + break; + } + default: + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring pci_link_speed attribute for non-PCI object\n", + state->global->msgprefix); + break; + } + } + + else if (!strcmp(name, "bridge_type")) { + switch (obj->type) { + case HWLOC_OBJ_BRIDGE: { + unsigned upstream_type, downstream_type; + if (sscanf(value, "%u-%u", &upstream_type, &downstream_type) != 2) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring invalid bridge_type format string %s\n", + state->global->msgprefix, value); + } else { + obj->attr->bridge.upstream_type = (hwloc_obj_bridge_type_t) upstream_type; + obj->attr->bridge.downstream_type = (hwloc_obj_bridge_type_t) downstream_type; + }; + break; + } + default: + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring bridge_type attribute for non-bridge object\n", + state->global->msgprefix); + break; + } + } + + else if (!strcmp(name, "bridge_pci")) { + switch (obj->type) { + case HWLOC_OBJ_BRIDGE: { + unsigned domain, secbus, subbus; + if (sscanf(value, "%04x:[%02x-%02x]", + &domain, &secbus, &subbus) != 3) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring invalid bridge_pci format string %s\n", + state->global->msgprefix, value); + } else { + obj->attr->bridge.downstream.pci.domain = domain; + obj->attr->bridge.downstream.pci.secondary_bus = secbus; + obj->attr->bridge.downstream.pci.subordinate_bus = subbus; + } + break; + } + default: + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring bridge_pci attribute for non-bridge object\n", + state->global->msgprefix); + break; + } + } + + else if (!strcmp(name, "osdev_type")) { + switch (obj->type) { + case HWLOC_OBJ_OS_DEVICE: { + unsigned osdev_type; + if (sscanf(value, "%u", &osdev_type) != 1) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring invalid osdev_type format string %s\n", + state->global->msgprefix, value); + } else + obj->attr->osdev.type = (hwloc_obj_osdev_type_t) osdev_type; + break; + } + default: + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring osdev_type attribute for non-osdev object\n", + state->global->msgprefix); + break; + } + } + + else if (data->version_major < 2) { + /************************ + * deprecated from 1.x + */ + if (!strcmp(name, "os_level") + || !strcmp(name, "online_cpuset")) + { /* ignored */ } + + /************************* + * deprecated from 1.0 + */ + else if (!strcmp(name, "dmi_board_vendor")) { + if (value[0]) + hwloc_obj_add_info(obj, "DMIBoardVendor", value); + } + else if (!strcmp(name, "dmi_board_name")) { + if (value[0]) + hwloc_obj_add_info(obj, "DMIBoardName", value); + } + + else if (data->version_major < 1) { + /************************* + * deprecated from 0.9 + */ + if (!strcmp(name, "memory_kB")) { + unsigned long long lvalue = strtoull(value, NULL, 10); + if (obj->type == _HWLOC_OBJ_CACHE_OLD) + obj->attr->cache.size = lvalue << 10; + else if (obj->type == HWLOC_OBJ_NUMANODE) + obj->attr->numanode.local_memory = lvalue << 10; + else if (!obj->parent) + topology->machine_memory.local_memory = lvalue << 10; + else if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring memory_kB attribute for non-NUMAnode non-root object\n", + state->global->msgprefix); + } + else if (!strcmp(name, "huge_page_size_kB")) { + unsigned long lvalue = strtoul(value, NULL, 10); + if (obj->type == HWLOC_OBJ_NUMANODE || !obj->parent) { + struct hwloc_numanode_attr_s *memory = obj->type == HWLOC_OBJ_NUMANODE ? &obj->attr->numanode : &topology->machine_memory; + if (!memory->page_types) { + memory->page_types = malloc(sizeof(*memory->page_types)); + memory->page_types_len = 1; + } + memory->page_types[0].size = lvalue << 10; + } else if (hwloc__xml_verbose()) { + fprintf(stderr, "%s: ignoring huge_page_size_kB attribute for non-NUMAnode non-root object\n", + state->global->msgprefix); + } + } + else if (!strcmp(name, "huge_page_free")) { + unsigned long lvalue = strtoul(value, NULL, 10); + if (obj->type == HWLOC_OBJ_NUMANODE || !obj->parent) { + struct hwloc_numanode_attr_s *memory = obj->type == HWLOC_OBJ_NUMANODE ? &obj->attr->numanode : &topology->machine_memory; + if (!memory->page_types) { + memory->page_types = malloc(sizeof(*memory->page_types)); + memory->page_types_len = 1; + } + memory->page_types[0].count = lvalue; + } else if (hwloc__xml_verbose()) { + fprintf(stderr, "%s: ignoring huge_page_free attribute for non-NUMAnode non-root object\n", + state->global->msgprefix); + } + } + /* end of deprecated from 0.9 */ + else goto unknown; + } + /* end of deprecated from 1.0 */ + else goto unknown; + } + else { + unknown: + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring unknown object attribute %s\n", + state->global->msgprefix, name); + } +} + + +static int +hwloc__xml_import_info(struct hwloc_xml_backend_data_s *data, + hwloc_obj_t obj, + hwloc__xml_import_state_t state) +{ + char *infoname = NULL; + char *infovalue = NULL; + + while (1) { + char *attrname, *attrvalue; + if (state->global->next_attr(state, &attrname, &attrvalue) < 0) + break; + if (!strcmp(attrname, "name")) + infoname = attrvalue; + else if (!strcmp(attrname, "value")) + infovalue = attrvalue; + else + return -1; + } + + if (infoname) { + /* empty strings are ignored by libxml */ + if (data->version_major < 2 && + (!strcmp(infoname, "Type") || !strcmp(infoname, "CoProcType"))) { + /* 1.x stored subtype in Type or CoProcType */ + if (infovalue) { + if (obj->subtype) + free(obj->subtype); + obj->subtype = strdup(infovalue); + } + } else { + if (infovalue) + hwloc_obj_add_info(obj, infoname, infovalue); + } + } + + return state->global->close_tag(state); +} + +static int +hwloc__xml_import_pagetype(hwloc_topology_t topology __hwloc_attribute_unused, struct hwloc_numanode_attr_s *memory, + hwloc__xml_import_state_t state) +{ + uint64_t size = 0, count = 0; + + while (1) { + char *attrname, *attrvalue; + if (state->global->next_attr(state, &attrname, &attrvalue) < 0) + break; + if (!strcmp(attrname, "size")) + size = strtoull(attrvalue, NULL, 10); + else if (!strcmp(attrname, "count")) + count = strtoull(attrvalue, NULL, 10); + else + return -1; + } + + if (size) { + unsigned idx = memory->page_types_len; + struct hwloc_memory_page_type_s *tmp; + tmp = realloc(memory->page_types, (idx+1)*sizeof(*memory->page_types)); + if (tmp) { /* if failed to allocate, ignore this page_type entry */ + memory->page_types = tmp; + memory->page_types_len = idx+1; + memory->page_types[idx].size = size; + memory->page_types[idx].count = count; + } + } + + return state->global->close_tag(state); +} + +static int +hwloc__xml_v1import_distances(struct hwloc_xml_backend_data_s *data, + hwloc_obj_t obj, + hwloc__xml_import_state_t state) +{ + unsigned long reldepth = 0, nbobjs = 0; + float latbase = 0; + char *tag; + int ret; + + while (1) { + char *attrname, *attrvalue; + if (state->global->next_attr(state, &attrname, &attrvalue) < 0) + break; + if (!strcmp(attrname, "nbobjs")) + nbobjs = strtoul(attrvalue, NULL, 10); + else if (!strcmp(attrname, "relative_depth")) + reldepth = strtoul(attrvalue, NULL, 10); + else if (!strcmp(attrname, "latency_base")) + latbase = (float) atof(attrvalue); + else + return -1; + } + + if (nbobjs && reldepth && latbase) { + unsigned i; + float *matrix; + struct hwloc__xml_imported_v1distances_s *v1dist; + + matrix = malloc(nbobjs*nbobjs*sizeof(float)); + v1dist = malloc(sizeof(*v1dist)); + if (!matrix || !v1dist) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: failed to allocate v1distance matrix for %lu objects\n", + state->global->msgprefix, nbobjs); + free(v1dist); + free(matrix); + return -1; + } + + v1dist->kind = HWLOC_DISTANCES_KIND_FROM_OS|HWLOC_DISTANCES_KIND_MEANS_LATENCY; + /* TODO: we can't know for sure if it comes from the OS. + * On Linux/x86, it would be 10 on the diagonal. + * On Solaris/T5, 15 on the diagonal. + * Just check whether all values are integers, and that all values on the diagonal are minimal and identical? + */ + + v1dist->nbobjs = nbobjs; + v1dist->floats = matrix; + + for(i=0; iglobal->find_child(state, &childstate, &tag); + if (ret <= 0 || strcmp(tag, "latency")) { + /* a latency child is needed */ + free(matrix); + free(v1dist); + return -1; + } + + ret = state->global->next_attr(&childstate, &attrname, &attrvalue); + if (ret < 0 || strcmp(attrname, "value")) { + free(matrix); + free(v1dist); + return -1; + } + + val = (float) atof((char *) attrvalue); + matrix[i] = val * latbase; + + ret = state->global->close_tag(&childstate); + if (ret < 0) { + free(matrix); + free(v1dist); + return -1; + } + + state->global->close_child(&childstate); + } + + if (nbobjs < 2) { + /* distances with a single object are useless, even if the XML isn't invalid */ + assert(nbobjs == 1); + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring invalid distance matrix with only 1 object\n", + state->global->msgprefix); + free(matrix); + free(v1dist); + + } else if (obj->parent) { + /* we currently only import distances attached to root. + * we can't save obj in v1dist because obj could be dropped during insert if ignored. + * we could save its complete_cpu/nodeset instead to find it back later. + * but it doesn't matter much since only NUMA distances attached to root matter. + */ + free(matrix); + free(v1dist); + + } else { + /* queue the distance for real */ + v1dist->prev = data->last_v1dist; + v1dist->next = NULL; + if (data->last_v1dist) + data->last_v1dist->next = v1dist; + else + data->first_v1dist = v1dist; + data->last_v1dist = v1dist; + } + } + + return state->global->close_tag(state); +} + +static int +hwloc__xml_import_userdata(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj, + hwloc__xml_import_state_t state) +{ + size_t length = 0; + int encoded = 0; + char *name = NULL; /* optional */ + int ret; + + while (1) { + char *attrname, *attrvalue; + if (state->global->next_attr(state, &attrname, &attrvalue) < 0) + break; + if (!strcmp(attrname, "length")) + length = strtoul(attrvalue, NULL, 10); + else if (!strcmp(attrname, "encoding")) + encoded = !strcmp(attrvalue, "base64"); + else if (!strcmp(attrname, "name")) + name = attrvalue; + else + return -1; + } + + if (!topology->userdata_import_cb) { + char *buffer; + size_t reallength = encoded ? BASE64_ENCODED_LENGTH(length) : length; + ret = state->global->get_content(state, &buffer, reallength); + if (ret < 0) + return -1; + + } else if (topology->userdata_not_decoded) { + char *buffer, *fakename; + size_t reallength = encoded ? BASE64_ENCODED_LENGTH(length) : length; + ret = state->global->get_content(state, &buffer, reallength); + if (ret < 0) + return -1; + fakename = malloc(6 + 1 + (name ? strlen(name) : 4) + 1); + if (!fakename) + return -1; + sprintf(fakename, encoded ? "base64%c%s" : "normal%c%s", name ? ':' : '-', name ? name : "anon"); + topology->userdata_import_cb(topology, obj, fakename, buffer, length); + free(fakename); + + } else if (encoded && length) { + char *encoded_buffer; + size_t encoded_length = BASE64_ENCODED_LENGTH(length); + ret = state->global->get_content(state, &encoded_buffer, encoded_length); + if (ret < 0) + return -1; + if (ret) { + char *decoded_buffer = malloc(length+1); + if (!decoded_buffer) + return -1; + assert(encoded_buffer[encoded_length] == 0); + ret = hwloc_decode_from_base64(encoded_buffer, decoded_buffer, length+1); + if (ret != (int) length) { + free(decoded_buffer); + return -1; + } + topology->userdata_import_cb(topology, obj, name, decoded_buffer, length); + free(decoded_buffer); + } + + } else { /* always handle length==0 in the non-encoded case */ + char *buffer = (char *) ""; + if (length) { + ret = state->global->get_content(state, &buffer, length); + if (ret < 0) + return -1; + } + topology->userdata_import_cb(topology, obj, name, buffer, length); + } + + state->global->close_content(state); + return state->global->close_tag(state); +} + +static void hwloc__xml_import_report_outoforder(hwloc_topology_t topology, hwloc_obj_t new, hwloc_obj_t old) +{ + char *progname = hwloc_progname(topology); + const char *origversion = hwloc_obj_get_info_by_name(topology->levels[0][0], "hwlocVersion"); + const char *origprogname = hwloc_obj_get_info_by_name(topology->levels[0][0], "ProcessName"); + char *c1, *cc1, t1[64]; + char *c2 = NULL, *cc2 = NULL, t2[64]; + + hwloc_bitmap_asprintf(&c1, new->cpuset); + hwloc_bitmap_asprintf(&cc1, new->complete_cpuset); + hwloc_obj_type_snprintf(t1, sizeof(t1), new, 0); + + if (old->cpuset) + hwloc_bitmap_asprintf(&c2, old->cpuset); + if (old->complete_cpuset) + hwloc_bitmap_asprintf(&cc2, old->complete_cpuset); + hwloc_obj_type_snprintf(t2, sizeof(t2), old, 0); + + fprintf(stderr, "****************************************************************************\n"); + fprintf(stderr, "* hwloc has encountered an out-of-order XML topology load.\n"); + fprintf(stderr, "* Object %s cpuset %s complete %s\n", + t1, c1, cc1); + fprintf(stderr, "* was inserted after object %s with %s and %s.\n", + t2, c2 ? c2 : "none", cc2 ? cc2 : "none"); + fprintf(stderr, "* The error occured in hwloc %s inside process `%s', while\n", + HWLOC_VERSION, + progname ? progname : ""); + if (origversion || origprogname) + fprintf(stderr, "* the input XML was generated by hwloc %s inside process `%s'.\n", + origversion ? origversion : "(unknown version)", + origprogname ? origprogname : ""); + else + fprintf(stderr, "* the input XML was generated by an unspecified ancient hwloc release.\n"); + fprintf(stderr, "* Please check that your input topology XML file is valid.\n"); + fprintf(stderr, "* Set HWLOC_DEBUG_CHECK=1 in the environment to detect further issues.\n"); + fprintf(stderr, "****************************************************************************\n"); + + free(c1); + free(cc1); + free(c2); + free(cc2); + free(progname); +} + +static int +hwloc__xml_import_object(hwloc_topology_t topology, + struct hwloc_xml_backend_data_s *data, + hwloc_obj_t parent, hwloc_obj_t obj, int *gotignored, + hwloc__xml_import_state_t state) +{ + int ignored = 0; + int childrengotignored = 0; + int attribute_less_cache = 0; + int numa_was_root = 0; + char *tag; + struct hwloc__xml_import_state_s childstate; + + /* set parent now since it's used during import below or in subfunctions */ + obj->parent = parent; + + /* process attributes */ + while (1) { + char *attrname, *attrvalue; + if (state->global->next_attr(state, &attrname, &attrvalue) < 0) + break; + if (!strcmp(attrname, "type")) { + if (hwloc_type_sscanf(attrvalue, &obj->type, NULL, 0) < 0) { + if (!strcasecmp(attrvalue, "Cache")) { + obj->type = _HWLOC_OBJ_CACHE_OLD; /* will be fixed below */ + attribute_less_cache = 1; + } else if (!strcasecmp(attrvalue, "System")) { + if (!parent) + obj->type = HWLOC_OBJ_MACHINE; + else { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: obsolete System object only allowed at root\n", + state->global->msgprefix); + goto error_with_object; + } + } else if (!strcasecmp(attrvalue, "Die")) { + /* deal with possible future type */ + obj->type = HWLOC_OBJ_GROUP; + obj->subtype = strdup("Die"); + obj->attr->group.kind = HWLOC_GROUP_KIND_INTEL_DIE; + obj->attr->group.dont_merge = data->dont_merge_die_groups; + } else if (!strcasecmp(attrvalue, "Tile")) { + /* deal with possible future type */ + obj->type = HWLOC_OBJ_GROUP; + obj->subtype = strdup("Tile"); + obj->attr->group.kind = HWLOC_GROUP_KIND_INTEL_TILE; + } else if (!strcasecmp(attrvalue, "Module")) { + /* deal with possible future type */ + obj->type = HWLOC_OBJ_GROUP; + obj->subtype = strdup("Module"); + obj->attr->group.kind = HWLOC_GROUP_KIND_INTEL_MODULE; + } else if (!strcasecmp(attrvalue, "MemCache")) { + /* ignore possible future type */ + obj->type = _HWLOC_OBJ_FUTURE; + ignored = 1; + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: %s object not-supported, will be ignored\n", + state->global->msgprefix, attrvalue); + } else { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: unrecognized object type string %s\n", + state->global->msgprefix, attrvalue); + goto error_with_object; + } + } + } else { + /* type needed first */ + if (obj->type == HWLOC_OBJ_TYPE_NONE) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: object attribute %s found before type\n", + state->global->msgprefix, attrname); + goto error_with_object; + } + hwloc__xml_import_object_attr(topology, data, obj, attrname, attrvalue, state); + } + } + + /* process non-object subnodes to get info attrs (as well as page_types, etc) */ + while (1) { + int ret; + + tag = NULL; + ret = state->global->find_child(state, &childstate, &tag); + if (ret < 0) + goto error; + if (!ret) + break; + + if (!strcmp(tag, "object")) { + /* we'll handle children later */ + break; + + } else if (!strcmp(tag, "page_type")) { + if (obj->type == HWLOC_OBJ_NUMANODE) { + ret = hwloc__xml_import_pagetype(topology, &obj->attr->numanode, &childstate); + } else if (!parent) { + ret = hwloc__xml_import_pagetype(topology, &topology->machine_memory, &childstate); + } else { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: invalid non-NUMAnode object child %s\n", + state->global->msgprefix, tag); + ret = -1; + } + + } else if (!strcmp(tag, "info")) { + ret = hwloc__xml_import_info(data, obj, &childstate); + } else if (data->version_major < 2 && !strcmp(tag, "distances")) { + ret = hwloc__xml_v1import_distances(data, obj, &childstate); + } else if (!strcmp(tag, "userdata")) { + ret = hwloc__xml_import_userdata(topology, obj, &childstate); + } else { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: invalid special object child %s\n", + state->global->msgprefix, tag); + ret = -1; + } + + if (ret < 0) + goto error; + + state->global->close_child(&childstate); + } + + if (parent && obj->type == HWLOC_OBJ_MACHINE) { + /* replace non-root Machine with Groups */ + obj->type = HWLOC_OBJ_GROUP; + } + + if (parent && data->version_major >= 2) { + /* check parent/child types for 2.x */ + if (hwloc__obj_type_is_normal(obj->type)) { + if (!hwloc__obj_type_is_normal(parent->type)) { + if (hwloc__xml_verbose()) + fprintf(stderr, "normal object %s cannot be child of non-normal parent %s\n", + hwloc_obj_type_string(obj->type), hwloc_obj_type_string(parent->type)); + goto error_with_object; + } + } else if (hwloc__obj_type_is_memory(obj->type)) { + if (hwloc__obj_type_is_io(parent->type) || HWLOC_OBJ_MISC == parent->type) { + if (hwloc__xml_verbose()) + fprintf(stderr, "Memory object %s cannot be child of non-normal-or-memory parent %s\n", + hwloc_obj_type_string(obj->type), hwloc_obj_type_string(parent->type)); + goto error_with_object; + } + } else if (hwloc__obj_type_is_io(obj->type)) { + if (hwloc__obj_type_is_memory(parent->type) || HWLOC_OBJ_MISC == parent->type) { + if (hwloc__xml_verbose()) + fprintf(stderr, "I/O object %s cannot be child of non-normal-or-I/O parent %s\n", + hwloc_obj_type_string(obj->type), hwloc_obj_type_string(parent->type)); + goto error_with_object; + } + } + + } else if (parent && data->version_major < 2) { + /* check parent/child types for pre-v2.0 */ + if (hwloc__obj_type_is_normal(obj->type) || HWLOC_OBJ_NUMANODE == obj->type) { + if (hwloc__obj_type_is_special(parent->type)) { + if (hwloc__xml_verbose()) + fprintf(stderr, "v1.x normal v1.x object %s cannot be child of special parent %s\n", + hwloc_obj_type_string(obj->type), hwloc_obj_type_string(parent->type)); + goto error_with_object; + } + } else if (hwloc__obj_type_is_io(obj->type)) { + if (HWLOC_OBJ_MISC == parent->type) { + if (hwloc__xml_verbose()) + fprintf(stderr, "I/O object %s cannot be child of Misc parent\n", + hwloc_obj_type_string(obj->type)); + goto error_with_object; + } + } + } + + if (data->version_major < 2) { + /*************************** + * 1.x specific checks + */ + + /* attach pre-v2.0 children of NUMA nodes to normal parent */ + if (parent && parent->type == HWLOC_OBJ_NUMANODE) { + parent = parent->parent; + assert(parent); + } + + /* insert a group above pre-v2.0 NUMA nodes if needed */ + if (obj->type == HWLOC_OBJ_NUMANODE) { + if (!parent) { + /* crazy case of NUMA node root (only possible when filtering Machine keep_structure in v1.x), + * reinsert a Machine object + */ + hwloc_obj_t machine = hwloc_alloc_setup_object(topology, HWLOC_OBJ_MACHINE, HWLOC_UNKNOWN_INDEX); + machine->cpuset = hwloc_bitmap_dup(obj->cpuset); + machine->complete_cpuset = hwloc_bitmap_dup(obj->cpuset); + machine->nodeset = hwloc_bitmap_dup(obj->nodeset); + machine->complete_nodeset = hwloc_bitmap_dup(obj->complete_nodeset); + topology->levels[0][0] = machine; + parent = machine; + numa_was_root = 1; + + } else if (!hwloc_bitmap_isequal(obj->complete_cpuset, parent->complete_cpuset)) { + /* This NUMA node has a different locality from its parent. + * Don't attach it to this parent, or it well get its parent cpusets. + * Add an intermediate Group with the desired locality. + */ + int needgroup = 1; + hwloc_obj_t sibling; + + sibling = parent->memory_first_child; + if (sibling && !sibling->subtype + && !sibling->next_sibling + && obj->subtype && !strcmp(obj->subtype, "MCDRAM") + && hwloc_bitmap_iszero(obj->complete_cpuset)) { + /* this is KNL MCDRAM, we want to attach it near its DDR sibling */ + needgroup = 0; + } + /* Ideally we would also detect similar cases on future non-KNL platforms with multiple local NUMA nodes. + * That's unlikely to occur with v1.x. + * And we have no way to be sure if this CPU-less node is desired or not. + */ + + if (needgroup + && hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_GROUP)) { + hwloc_obj_t group = hwloc_alloc_setup_object(topology, HWLOC_OBJ_GROUP, HWLOC_UNKNOWN_INDEX); + group->gp_index = 0; /* will be initialized at the end of the discovery once we know the max */ + group->cpuset = hwloc_bitmap_dup(obj->cpuset); + group->complete_cpuset = hwloc_bitmap_dup(obj->cpuset); + group->nodeset = hwloc_bitmap_dup(obj->nodeset); + group->complete_nodeset = hwloc_bitmap_dup(obj->complete_nodeset); + group->attr->group.kind = HWLOC_GROUP_KIND_MEMORY; + hwloc_insert_object_by_parent(topology, parent, group); + parent = group; + } + } + } + + /* fixup attribute-less caches imported from pre-v2.0 XMLs */ + if (attribute_less_cache) { + assert(obj->type == _HWLOC_OBJ_CACHE_OLD); + obj->type = hwloc_cache_type_by_depth_type(obj->attr->cache.depth, obj->attr->cache.type); + } + + /* fixup Misc objects inserted by cpusets in pre-v2.0 XMLs */ + if (obj->type == HWLOC_OBJ_MISC && obj->cpuset) + obj->type = HWLOC_OBJ_GROUP; + + /* check set consistency. + * 1.7.2 and earlier reported I/O Groups with only a cpuset, we don't want to reject those XMLs yet. + * Ignore those Groups since fixing the missing sets is hard (would need to look at children sets which are not available yet). + * Just abort the XML for non-Groups. + */ + if (!obj->cpuset != !obj->complete_cpuset) { + /* has some cpuset without others */ + if (obj->type == HWLOC_OBJ_GROUP) { + ignored = 1; + } else { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: invalid object %s P#%u with some missing cpusets\n", + state->global->msgprefix, hwloc_obj_type_string(obj->type), obj->os_index); + goto error_with_object; + } + } else if (!obj->nodeset != !obj->complete_nodeset) { + /* has some nodeset without others */ + if (obj->type == HWLOC_OBJ_GROUP) { + ignored = 1; + } else { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: invalid object %s P#%u with some missing nodesets\n", + state->global->msgprefix, hwloc_obj_type_string(obj->type), obj->os_index); + goto error_with_object; + } + } else if (obj->nodeset && !obj->cpuset) { + /* has nodesets without cpusets (the contrary is allowed in pre-2.0) */ + if (obj->type == HWLOC_OBJ_GROUP) { + ignored = 1; + } else { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: invalid object %s P#%u with either cpuset or nodeset missing\n", + state->global->msgprefix, hwloc_obj_type_string(obj->type), obj->os_index); + goto error_with_object; + } + } + /* end of 1.x specific checks */ + } + + /* check that cache attributes are coherent with the actual type */ + if (hwloc__obj_type_is_cache(obj->type) + && obj->type != hwloc_cache_type_by_depth_type(obj->attr->cache.depth, obj->attr->cache.type)) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: invalid cache type %s with attribute depth %u and type %d\n", + state->global->msgprefix, hwloc_obj_type_string(obj->type), obj->attr->cache.depth, (int) obj->attr->cache.type); + goto error_with_object; + } + + /* check special types vs cpuset */ + if (!obj->cpuset && !hwloc__obj_type_is_special(obj->type)) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: invalid normal object %s P#%u without cpuset\n", + state->global->msgprefix, hwloc_obj_type_string(obj->type), obj->os_index); + goto error_with_object; + } + if (obj->cpuset && hwloc__obj_type_is_special(obj->type)) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: invalid special object %s with cpuset\n", + state->global->msgprefix, hwloc_obj_type_string(obj->type)); + goto error_with_object; + } + + /* check parent vs child sets */ + if (obj->cpuset && parent && !parent->cpuset) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: invalid object %s P#%u with cpuset while parent has none\n", + state->global->msgprefix, hwloc_obj_type_string(obj->type), obj->os_index); + goto error_with_object; + } + if (obj->nodeset && parent && !parent->nodeset) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: invalid object %s P#%u with nodeset while parent has none\n", + state->global->msgprefix, hwloc_obj_type_string(obj->type), obj->os_index); + goto error_with_object; + } + + /* check NUMA nodes */ + if (obj->type == HWLOC_OBJ_NUMANODE) { + if (!obj->nodeset) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: invalid NUMA node object P#%u without nodeset\n", + state->global->msgprefix, obj->os_index); + goto error_with_object; + } + data->nbnumanodes++; + obj->prev_cousin = data->last_numanode; + obj->next_cousin = NULL; + if (data->last_numanode) + data->last_numanode->next_cousin = obj; + else + data->first_numanode = obj; + data->last_numanode = obj; + } + + if (!hwloc_filter_check_keep_object(topology, obj)) { + /* Ignore this object instead of inserting it. + * + * Well, let the core ignore the root object later + * because we don't know yet if root has more than one child. + */ + if (parent) + ignored = 1; + } + + if (parent && !ignored) { + /* root->parent is NULL, and root is already inserted */ + hwloc_insert_object_by_parent(topology, parent, obj); + /* insert_object_by_parent() doesn't merge during insert, so obj is still valid */ + } + + /* process object subnodes, if we found one win the above loop */ + while (tag) { + int ret; + + if (!strcmp(tag, "object")) { + hwloc_obj_t childobj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_TYPE_MAX, HWLOC_UNKNOWN_INDEX); + childobj->parent = ignored ? parent : obj; + ret = hwloc__xml_import_object(topology, data, ignored ? parent : obj, childobj, + &childrengotignored, + &childstate); + } else { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: invalid special object child %s while looking for objects\n", + state->global->msgprefix, tag); + ret = -1; + } + + if (ret < 0) + goto error; + + state->global->close_child(&childstate); + + tag = NULL; + ret = state->global->find_child(state, &childstate, &tag); + if (ret < 0) + goto error; + if (!ret) + break; + } + + if (numa_was_root) { + /* duplicate NUMA infos to root, most of them are likely root-specific */ + unsigned i; + for(i=0; iinfos_count; i++) { + struct hwloc_info_s *info = &obj->infos[i]; + hwloc_obj_add_info(parent, info->name, info->value); + } + /* TODO some infos are root-only (hwlocVersion, ProcessName, etc), remove them from obj? */ + } + + if (ignored) { + /* drop that object, and tell the parent that one child got ignored */ + hwloc_free_unlinked_object(obj); + *gotignored = 1; + + } else if (obj->first_child) { + /* now that all children are inserted, make sure they are in-order, + * so that the core doesn't have to deal with crappy children list. + */ + hwloc_obj_t cur, next; + for(cur = obj->first_child, next = cur->next_sibling; + next; + cur = next, next = next->next_sibling) { + /* If reordering is needed, at least one pair of consecutive children will be out-of-order. + * So just check pairs of consecutive children. + * + * We checked above that complete_cpuset is always set. + */ + if (hwloc_bitmap_compare_first(next->complete_cpuset, cur->complete_cpuset) < 0) { + /* next should be before cur */ + if (!childrengotignored) { + static int reported = 0; + if (!reported && !hwloc_hide_errors()) { + hwloc__xml_import_report_outoforder(topology, next, cur); + reported = 1; + } + } + hwloc__reorder_children(obj); + break; + } + } + /* no need to reorder memory children as long as there are no intermediate memory objects + * that could cause reordering when filtered-out. + */ + } + + return state->global->close_tag(state); + + error_with_object: + if (parent) + /* root->parent is NULL, and root is already inserted. the caller will cleanup that root. */ + hwloc_free_unlinked_object(obj); + error: + return -1; +} + +static int +hwloc__xml_v2import_distances(hwloc_topology_t topology, + hwloc__xml_import_state_t state) +{ + hwloc_obj_type_t type = HWLOC_OBJ_TYPE_NONE; + unsigned nbobjs = 0; + int indexing = 0; + int os_indexing = 0; + int gp_indexing = 0; + unsigned long kind = 0; + unsigned nr_indexes, nr_u64values; + uint64_t *indexes; + uint64_t *u64values; + int ret; + + /* process attributes */ + while (1) { + char *attrname, *attrvalue; + if (state->global->next_attr(state, &attrname, &attrvalue) < 0) + break; + if (!strcmp(attrname, "nbobjs")) + nbobjs = strtoul(attrvalue, NULL, 10); + else if (!strcmp(attrname, "type")) { + if (hwloc_type_sscanf(attrvalue, &type, NULL, 0) < 0) + goto out; + } + else if (!strcmp(attrname, "indexing")) { + indexing = 1; + if (!strcmp(attrvalue, "os")) + os_indexing = 1; + else if (!strcmp(attrvalue, "gp")) + gp_indexing = 1; + } + else if (!strcmp(attrname, "kind")) { + kind = strtoul(attrvalue, NULL, 10); + } + else { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring unknown distance attribute %s\n", + state->global->msgprefix, attrname); + } + } + + /* abort if missing attribute */ + if (!nbobjs || type == HWLOC_OBJ_TYPE_NONE || !indexing || !kind) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: distance2 missing some attributes\n", + state->global->msgprefix); + goto out; + } + + indexes = malloc(nbobjs*sizeof(*indexes)); + u64values = malloc(nbobjs*nbobjs*sizeof(*u64values)); + if (!indexes || !u64values) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: failed to allocate distances arrays for %u objects\n", + state->global->msgprefix, nbobjs); + goto out_with_arrays; + } + + /* process children */ + nr_indexes = 0; + nr_u64values = 0; + while (1) { + struct hwloc__xml_import_state_s childstate; + char *attrname, *attrvalue, *tag, *buffer; + int length; + int is_index = 0; + int is_u64values = 0; + + ret = state->global->find_child(state, &childstate, &tag); + if (ret <= 0) + break; + + if (!strcmp(tag, "indexes")) + is_index = 1; + else if (!strcmp(tag, "u64values")) + is_u64values = 1; + if (!is_index && !is_u64values) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: distance2 with unrecognized child %s\n", + state->global->msgprefix, tag); + goto out_with_arrays; + } + + if (state->global->next_attr(&childstate, &attrname, &attrvalue) < 0 + || strcmp(attrname, "length")) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: distance2 child must have length attribute\n", + state->global->msgprefix); + goto out_with_arrays; + } + length = atoi(attrvalue); + + ret = state->global->get_content(&childstate, &buffer, length); + if (ret < 0) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: distance2 child needs content of length %d\n", + state->global->msgprefix, length); + goto out_with_arrays; + } + + if (is_index) { + /* get indexes */ + char *tmp; + if (nr_indexes >= nbobjs) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: distance2 with more than %u indexes\n", + state->global->msgprefix, nbobjs); + goto out_with_arrays; + } + tmp = buffer; + while (1) { + char *next; + unsigned long long u = strtoull(tmp, &next, 0); + if (next == tmp) + break; + indexes[nr_indexes++] = u; + if (*next != ' ') + break; + if (nr_indexes == nbobjs) + break; + tmp = next+1; + } + + } else if (is_u64values) { + /* get uint64_t values */ + char *tmp; + if (nr_u64values >= nbobjs*nbobjs) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: distance2 with more than %u u64values\n", + state->global->msgprefix, nbobjs*nbobjs); + goto out_with_arrays; + } + tmp = buffer; + while (1) { + char *next; + unsigned long long u = strtoull(tmp, &next, 0); + if (next == tmp) + break; + u64values[nr_u64values++] = u; + if (*next != ' ') + break; + if (nr_u64values == nbobjs*nbobjs) + break; + tmp = next+1; + } + } + + state->global->close_content(&childstate); + + ret = state->global->close_tag(&childstate); + if (ret < 0) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: distance2 with more than %u indexes\n", + state->global->msgprefix, nbobjs); + goto out_with_arrays; + } + + state->global->close_child(&childstate); + } + + if (nr_indexes != nbobjs) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: distance2 with less than %u indexes\n", + state->global->msgprefix, nbobjs); + goto out_with_arrays; + } + if (nr_u64values != nbobjs*nbobjs) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: distance2 with less than %u u64values\n", + state->global->msgprefix, nbobjs*nbobjs); + goto out_with_arrays; + } + + if (nbobjs < 2) { + /* distances with a single object are useless, even if the XML isn't invalid */ + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring distances2 with only %u objects\n", + state->global->msgprefix, nbobjs); + goto out_ignore; + } + if (type == HWLOC_OBJ_PU || type == HWLOC_OBJ_NUMANODE) { + if (!os_indexing) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring PU or NUMA distances2 without os_indexing\n", + state->global->msgprefix); + goto out_ignore; + } + } else { + if (!gp_indexing) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring !PU or !NUMA distances2 without gp_indexing\n", + state->global->msgprefix); + goto out_ignore; + } + } + + hwloc_internal_distances_add_by_index(topology, type, nbobjs, indexes, u64values, kind, 0); + + /* prevent freeing below */ + indexes = NULL; + u64values = NULL; + + out_ignore: + free(indexes); + free(u64values); + return state->global->close_tag(state); + + out_with_arrays: + free(indexes); + free(u64values); + out: + return -1; +} + +static int +hwloc__xml_import_diff_one(hwloc__xml_import_state_t state, + hwloc_topology_diff_t *firstdiffp, + hwloc_topology_diff_t *lastdiffp) +{ + char *type_s = NULL; + char *obj_depth_s = NULL; + char *obj_index_s = NULL; + char *obj_attr_type_s = NULL; +/* char *obj_attr_index_s = NULL; unused for now */ + char *obj_attr_name_s = NULL; + char *obj_attr_oldvalue_s = NULL; + char *obj_attr_newvalue_s = NULL; + + while (1) { + char *attrname, *attrvalue; + if (state->global->next_attr(state, &attrname, &attrvalue) < 0) + break; + if (!strcmp(attrname, "type")) + type_s = attrvalue; + else if (!strcmp(attrname, "obj_depth")) + obj_depth_s = attrvalue; + else if (!strcmp(attrname, "obj_index")) + obj_index_s = attrvalue; + else if (!strcmp(attrname, "obj_attr_type")) + obj_attr_type_s = attrvalue; + else if (!strcmp(attrname, "obj_attr_index")) + { /* obj_attr_index_s = attrvalue; unused for now */ } + else if (!strcmp(attrname, "obj_attr_name")) + obj_attr_name_s = attrvalue; + else if (!strcmp(attrname, "obj_attr_oldvalue")) + obj_attr_oldvalue_s = attrvalue; + else if (!strcmp(attrname, "obj_attr_newvalue")) + obj_attr_newvalue_s = attrvalue; + else { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring unknown diff attribute %s\n", + state->global->msgprefix, attrname); + return -1; + } + } + + if (type_s) { + switch (atoi(type_s)) { + default: + break; + case HWLOC_TOPOLOGY_DIFF_OBJ_ATTR: { + /* object attribute diff */ + hwloc_topology_diff_obj_attr_type_t obj_attr_type; + hwloc_topology_diff_t diff; + + /* obj_attr mandatory generic attributes */ + if (!obj_depth_s || !obj_index_s || !obj_attr_type_s) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: missing mandatory obj attr generic attributes\n", + state->global->msgprefix); + break; + } + + /* obj_attr mandatory attributes common to all subtypes */ + if (!obj_attr_oldvalue_s || !obj_attr_newvalue_s) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: missing mandatory obj attr value attributes\n", + state->global->msgprefix); + break; + } + + /* mandatory attributes for obj_attr_info subtype */ + obj_attr_type = atoi(obj_attr_type_s); + if (obj_attr_type == HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_INFO && !obj_attr_name_s) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: missing mandatory obj attr info name attribute\n", + state->global->msgprefix); + break; + } + + /* now we know we have everything we need */ + diff = malloc(sizeof(*diff)); + if (!diff) + return -1; + diff->obj_attr.type = HWLOC_TOPOLOGY_DIFF_OBJ_ATTR; + diff->obj_attr.obj_depth = atoi(obj_depth_s); + diff->obj_attr.obj_index = atoi(obj_index_s); + memset(&diff->obj_attr.diff, 0, sizeof(diff->obj_attr.diff)); + diff->obj_attr.diff.generic.type = obj_attr_type; + + switch (atoi(obj_attr_type_s)) { + case HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_SIZE: + diff->obj_attr.diff.uint64.oldvalue = strtoull(obj_attr_oldvalue_s, NULL, 0); + diff->obj_attr.diff.uint64.newvalue = strtoull(obj_attr_newvalue_s, NULL, 0); + break; + case HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_INFO: + diff->obj_attr.diff.string.name = strdup(obj_attr_name_s); + /* FALLTHRU */ + case HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_NAME: + diff->obj_attr.diff.string.oldvalue = strdup(obj_attr_oldvalue_s); + diff->obj_attr.diff.string.newvalue = strdup(obj_attr_newvalue_s); + break; + } + + if (*firstdiffp) + (*lastdiffp)->generic.next = diff; + else + *firstdiffp = diff; + *lastdiffp = diff; + diff->generic.next = NULL; + } + } + } + + return state->global->close_tag(state); +} + +int +hwloc__xml_import_diff(hwloc__xml_import_state_t state, + hwloc_topology_diff_t *firstdiffp) +{ + hwloc_topology_diff_t firstdiff = NULL, lastdiff = NULL; + *firstdiffp = NULL; + + while (1) { + struct hwloc__xml_import_state_s childstate; + char *tag; + int ret; + + ret = state->global->find_child(state, &childstate, &tag); + if (ret < 0) + return -1; + if (!ret) + break; + + if (!strcmp(tag, "diff")) { + ret = hwloc__xml_import_diff_one(&childstate, &firstdiff, &lastdiff); + } else + ret = -1; + + if (ret < 0) + return ret; + + state->global->close_child(&childstate); + } + + *firstdiffp = firstdiff; + return 0; +} + +/*********************************** + ********* main XML import ********* + ***********************************/ + +static void +hwloc_convert_from_v1dist_floats(hwloc_topology_t topology, unsigned nbobjs, float *floats, uint64_t *u64s) +{ + unsigned i; + int is_uint; + char *env; + float scale = 1000.f; + char scalestring[20]; + + env = getenv("HWLOC_XML_V1DIST_SCALE"); + if (env) { + scale = (float) atof(env); + goto scale; + } + + is_uint = 1; + /* find out if all values are integers */ + for(i=0; i .001f && fptr < .999f) { + is_uint = 0; + break; + } + u64s[i] = (int)(f+.5f); + } + if (is_uint) + return; + + scale: + /* TODO heuristic to find a good scale */ + for(i=0; itopology; + struct hwloc_xml_backend_data_s *data = backend->private_data; + struct hwloc__xml_import_state_s state, childstate; + struct hwloc_obj *root = topology->levels[0][0]; + char *tag; + int gotignored = 0; + hwloc_localeswitch_declare; + char *env; + int ret; + + state.global = data; + + assert(!root->cpuset); + + hwloc_localeswitch_init(); + + data->nbnumanodes = 0; + data->first_numanode = data->last_numanode = NULL; + data->first_v1dist = data->last_v1dist = NULL; + + env = getenv("HWLOC_DONT_MERGE_DIE_GROUPS"); + data->dont_merge_die_groups = env && atoi(env); + + ret = data->look_init(data, &state); + if (ret < 0) + goto failed; + + if (data->version_major > 2) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: cannot import XML version %u.%u > 2\n", + data->msgprefix, data->version_major, data->version_minor); + goto err; + } + + /* find root object tag and import it */ + ret = state.global->find_child(&state, &childstate, &tag); + if (ret < 0 || !ret || strcmp(tag, "object")) + goto failed; + ret = hwloc__xml_import_object(topology, data, NULL /* no parent */, root, + &gotignored, + &childstate); + if (ret < 0) + goto failed; + state.global->close_child(&childstate); + assert(!gotignored); + + /* the root may have changed if we had to reinsert a Machine */ + root = topology->levels[0][0]; + + if (data->version_major >= 2) { + /* find v2 distances */ + while (1) { + ret = state.global->find_child(&state, &childstate, &tag); + if (ret < 0) + goto failed; + if (!ret) + break; + if (strcmp(tag, "distances2")) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: ignoring unknown tag `%s' after root object, expected `distances2'\n", + data->msgprefix, tag); + goto done; + } + ret = hwloc__xml_v2import_distances(topology, &childstate); + if (ret < 0) + goto failed; + state.global->close_child(&childstate); + } + } + + /* find end of topology tag */ + state.global->close_tag(&state); + +done: + if (!root->cpuset) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: invalid root object without cpuset\n", + data->msgprefix); + goto err; + } + + /* update pre-v2.0 memory group gp_index */ + if (data->version_major < 2 && data->first_numanode) { + hwloc_obj_t node = data->first_numanode; + do { + if (node->parent->type == HWLOC_OBJ_GROUP + && !node->parent->gp_index) + node->parent->gp_index = topology->next_gp_index++; + node = node->next_cousin; + } while (node); + } + + if (data->version_major < 2 && data->first_v1dist) { + /* handle v1 distances */ + struct hwloc__xml_imported_v1distances_s *v1dist, *v1next = data->first_v1dist; + while ((v1dist = v1next) != NULL) { + unsigned nbobjs = v1dist->nbobjs; + v1next = v1dist->next; + /* Handle distances as NUMA node distances if nbobjs matches. + * Otherwise drop, only NUMA distances really matter. + * + * We could also attach to a random level with the right nbobjs, + * but it would require to have those objects in the original XML order (like the first_numanode cousin-list). + * because the topology order can be different if some parents are ignored during load. + */ + if (nbobjs == data->nbnumanodes) { + hwloc_obj_t *objs = malloc(nbobjs*sizeof(hwloc_obj_t)); + uint64_t *values = malloc(nbobjs*nbobjs*sizeof(*values)); + if (objs && values) { + hwloc_obj_t node; + unsigned i; + for(i=0, node = data->first_numanode; + inext_cousin) + objs[i] = node; +hwloc_convert_from_v1dist_floats(topology, nbobjs, v1dist->floats, values); + hwloc_internal_distances_add(topology, nbobjs, objs, values, v1dist->kind, 0); + } else { + free(objs); + free(values); + } + } + free(v1dist->floats); + free(v1dist); + } + data->first_v1dist = data->last_v1dist = NULL; + } + + /* FIXME: + * We should check that the existing object sets are consistent: + * no intersection between objects of a same level, + * object sets included in parent sets. + * hwloc never generated such buggy XML, but users could create one. + * + * We want to add these checks to the existing core code that + * adds missing sets and propagates parent/children sets + * (in case another backend ever generates buggy object sets as well). + */ + + if (data->version_major >= 2) { + /* v2 must have non-empty nodesets since at least one NUMA node is required */ + if (!root->nodeset) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: invalid root object without nodeset\n", + data->msgprefix); + goto err; + } + if (hwloc_bitmap_iszero(root->nodeset)) { + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: invalid root object with empty nodeset\n", + data->msgprefix); + goto err; + } + } else { + /* if v1 without nodeset, the core will add a default NUMA node and nodesets */ + } + + /* allocate default cpusets and nodesets if missing, the core will restrict them */ + hwloc_alloc_root_sets(root); + + /* keep the "Backend" information intact */ + /* we could add "BackendSource=XML" to notify that XML was used between the actual backend and here */ + + topology->support.discovery->pu = 1; + if (data->nbnumanodes) { + topology->support.discovery->numa = 1; + topology->support.discovery->numa_memory = 1; // FIXME + } + + if (data->look_done) + data->look_done(data, 0); + + hwloc_localeswitch_fini(); + return 0; + + failed: + if (data->look_done) + data->look_done(data, -1); + if (hwloc__xml_verbose()) + fprintf(stderr, "%s: XML component discovery failed.\n", + data->msgprefix); + err: + hwloc_free_object_siblings_and_children(root->first_child); + root->first_child = NULL; + hwloc_free_object_siblings_and_children(root->memory_first_child); + root->memory_first_child = NULL; + hwloc_free_object_siblings_and_children(root->io_first_child); + root->io_first_child = NULL; + hwloc_free_object_siblings_and_children(root->misc_first_child); + root->misc_first_child = NULL; + + /* make sure the core will abort */ + if (root->cpuset) + hwloc_bitmap_zero(root->cpuset); + if (root->nodeset) + hwloc_bitmap_zero(root->nodeset); + + hwloc_localeswitch_fini(); + return -1; +} + +/* this can be the first XML call */ +int +hwloc_topology_diff_load_xml(const char *xmlpath, + hwloc_topology_diff_t *firstdiffp, char **refnamep) +{ + struct hwloc__xml_import_state_s state; + struct hwloc_xml_backend_data_s fakedata; /* only for storing global info during parsing */ + hwloc_localeswitch_declare; + const char *local_basename; + int force_nolibxml; + int ret; + + state.global = &fakedata; + + local_basename = strrchr(xmlpath, '/'); + if (local_basename) + local_basename++; + else + local_basename = xmlpath; + fakedata.msgprefix = strdup(local_basename); + + hwloc_components_init(); + assert(hwloc_nolibxml_callbacks); + + hwloc_localeswitch_init(); + + *firstdiffp = NULL; + + force_nolibxml = hwloc_nolibxml_import(); +retry: + if (!hwloc_libxml_callbacks || (hwloc_nolibxml_callbacks && force_nolibxml)) + ret = hwloc_nolibxml_callbacks->import_diff(&state, xmlpath, NULL, 0, firstdiffp, refnamep); + else { + ret = hwloc_libxml_callbacks->import_diff(&state, xmlpath, NULL, 0, firstdiffp, refnamep); + if (ret < 0 && errno == ENOSYS) { + hwloc_libxml_callbacks = NULL; + goto retry; + } + } + + hwloc_localeswitch_fini(); + hwloc_components_fini(); + free(fakedata.msgprefix); + return ret; +} + +/* this can be the first XML call */ +int +hwloc_topology_diff_load_xmlbuffer(const char *xmlbuffer, int buflen, + hwloc_topology_diff_t *firstdiffp, char **refnamep) +{ + struct hwloc__xml_import_state_s state; + struct hwloc_xml_backend_data_s fakedata; /* only for storing global info during parsing */ + hwloc_localeswitch_declare; + int force_nolibxml; + int ret; + + state.global = &fakedata; + fakedata.msgprefix = strdup("xmldiffbuffer"); + + hwloc_components_init(); + assert(hwloc_nolibxml_callbacks); + + hwloc_localeswitch_init(); + + *firstdiffp = NULL; + + force_nolibxml = hwloc_nolibxml_import(); + retry: + if (!hwloc_libxml_callbacks || (hwloc_nolibxml_callbacks && force_nolibxml)) + ret = hwloc_nolibxml_callbacks->import_diff(&state, NULL, xmlbuffer, buflen, firstdiffp, refnamep); + else { + ret = hwloc_libxml_callbacks->import_diff(&state, NULL, xmlbuffer, buflen, firstdiffp, refnamep); + if (ret < 0 && errno == ENOSYS) { + hwloc_libxml_callbacks = NULL; + goto retry; + } + } + + hwloc_localeswitch_fini(); + hwloc_components_fini(); + free(fakedata.msgprefix); + return ret; +} + +/************************************************ + ********* XML export (common routines) ********* + ************************************************/ + +#define HWLOC_XML_CHAR_VALID(c) (((c) >= 32 && (c) <= 126) || (c) == '\t' || (c) == '\n' || (c) == '\r') + +static int +hwloc__xml_export_check_buffer(const char *buf, size_t length) +{ + unsigned i; + for(i=0; itype == HWLOC_OBJ_PACKAGE) + state->new_prop(state, "type", "Socket"); + else if (v1export && hwloc__obj_type_is_cache(obj->type)) + state->new_prop(state, "type", "Cache"); + else + state->new_prop(state, "type", hwloc_obj_type_string(obj->type)); + + if (obj->os_index != HWLOC_UNKNOWN_INDEX) { + sprintf(tmp, "%u", obj->os_index); + state->new_prop(state, "os_index", tmp); + } + + if (obj->cpuset) { + if (v1export && obj->type == HWLOC_OBJ_NUMANODE && obj->sibling_rank > 0) { + /* v1 non-first NUMA nodes have empty cpusets */ + state->new_prop(state, "cpuset", "0x0"); + state->new_prop(state, "online_cpuset", "0x0"); + state->new_prop(state, "complete_cpuset", "0x0"); + state->new_prop(state, "allowed_cpuset", "0x0"); + + } else { + /* normal case */ + hwloc_bitmap_asprintf(&setstring, obj->cpuset); + state->new_prop(state, "cpuset", setstring); + + hwloc_bitmap_asprintf(&setstring2, obj->complete_cpuset); + state->new_prop(state, "complete_cpuset", setstring2); + free(setstring2); + + if (v1export) + state->new_prop(state, "online_cpuset", setstring); + free(setstring); + + if (v1export || !obj->parent) { + hwloc_bitmap_t allowed_cpuset = hwloc_bitmap_dup(obj->cpuset); + hwloc_bitmap_and(allowed_cpuset, allowed_cpuset, topology->allowed_cpuset); + hwloc_bitmap_asprintf(&setstring, allowed_cpuset); + state->new_prop(state, "allowed_cpuset", setstring); + free(setstring); + hwloc_bitmap_free(allowed_cpuset); + } + } + + /* If exporting v1, we should clear second local NUMA bits from nodeset, + * but the importer will clear them anyway. + */ + hwloc_bitmap_asprintf(&setstring, obj->nodeset); + state->new_prop(state, "nodeset", setstring); + free(setstring); + + hwloc_bitmap_asprintf(&setstring, obj->complete_nodeset); + state->new_prop(state, "complete_nodeset", setstring); + free(setstring); + + if (v1export || !obj->parent) { + hwloc_bitmap_t allowed_nodeset = hwloc_bitmap_dup(obj->nodeset); + hwloc_bitmap_and(allowed_nodeset, allowed_nodeset, topology->allowed_nodeset); + hwloc_bitmap_asprintf(&setstring, allowed_nodeset); + state->new_prop(state, "allowed_nodeset", setstring); + free(setstring); + hwloc_bitmap_free(allowed_nodeset); + } + } + + if (!v1export) { + sprintf(tmp, "%llu", (unsigned long long) obj->gp_index); + state->new_prop(state, "gp_index", tmp); + } + + if (obj->name) { + char *name = hwloc__xml_export_safestrdup(obj->name); + state->new_prop(state, "name", name); + free(name); + } + if (!v1export && obj->subtype) { + char *subtype = hwloc__xml_export_safestrdup(obj->subtype); + state->new_prop(state, "subtype", subtype); + free(subtype); + } + + switch (obj->type) { + case HWLOC_OBJ_NUMANODE: + if (obj->attr->numanode.local_memory) { + sprintf(tmp, "%llu", (unsigned long long) obj->attr->numanode.local_memory); + state->new_prop(state, "local_memory", tmp); + } + for(i=0; iattr->numanode.page_types_len; i++) { + struct hwloc__xml_export_state_s childstate; + state->new_child(state, &childstate, "page_type"); + sprintf(tmp, "%llu", (unsigned long long) obj->attr->numanode.page_types[i].size); + childstate.new_prop(&childstate, "size", tmp); + sprintf(tmp, "%llu", (unsigned long long) obj->attr->numanode.page_types[i].count); + childstate.new_prop(&childstate, "count", tmp); + childstate.end_object(&childstate, "page_type"); + } + break; + case HWLOC_OBJ_L1CACHE: + case HWLOC_OBJ_L2CACHE: + case HWLOC_OBJ_L3CACHE: + case HWLOC_OBJ_L4CACHE: + case HWLOC_OBJ_L5CACHE: + case HWLOC_OBJ_L1ICACHE: + case HWLOC_OBJ_L2ICACHE: + case HWLOC_OBJ_L3ICACHE: + sprintf(tmp, "%llu", (unsigned long long) obj->attr->cache.size); + state->new_prop(state, "cache_size", tmp); + sprintf(tmp, "%u", obj->attr->cache.depth); + state->new_prop(state, "depth", tmp); + sprintf(tmp, "%u", (unsigned) obj->attr->cache.linesize); + state->new_prop(state, "cache_linesize", tmp); + sprintf(tmp, "%d", obj->attr->cache.associativity); + state->new_prop(state, "cache_associativity", tmp); + sprintf(tmp, "%d", (int) obj->attr->cache.type); + state->new_prop(state, "cache_type", tmp); + break; + case HWLOC_OBJ_GROUP: + if (v1export) { + sprintf(tmp, "%u", obj->attr->group.depth); + state->new_prop(state, "depth", tmp); + if (obj->attr->group.dont_merge) + state->new_prop(state, "dont_merge", "1"); + } else { + sprintf(tmp, "%u", obj->attr->group.kind); + state->new_prop(state, "kind", tmp); + sprintf(tmp, "%u", obj->attr->group.subkind); + state->new_prop(state, "subkind", tmp); + if (obj->attr->group.dont_merge) + state->new_prop(state, "dont_merge", "1"); + } + break; + case HWLOC_OBJ_BRIDGE: + sprintf(tmp, "%d-%d", (int) obj->attr->bridge.upstream_type, (int) obj->attr->bridge.downstream_type); + state->new_prop(state, "bridge_type", tmp); + sprintf(tmp, "%u", obj->attr->bridge.depth); + state->new_prop(state, "depth", tmp); + if (obj->attr->bridge.downstream_type == HWLOC_OBJ_BRIDGE_PCI) { + sprintf(tmp, "%04x:[%02x-%02x]", + (unsigned) obj->attr->bridge.downstream.pci.domain, + (unsigned) obj->attr->bridge.downstream.pci.secondary_bus, + (unsigned) obj->attr->bridge.downstream.pci.subordinate_bus); + state->new_prop(state, "bridge_pci", tmp); + } + if (obj->attr->bridge.upstream_type != HWLOC_OBJ_BRIDGE_PCI) + break; + /* FALLTHRU */ + case HWLOC_OBJ_PCI_DEVICE: + sprintf(tmp, "%04x:%02x:%02x.%01x", + (unsigned) obj->attr->pcidev.domain, + (unsigned) obj->attr->pcidev.bus, + (unsigned) obj->attr->pcidev.dev, + (unsigned) obj->attr->pcidev.func); + state->new_prop(state, "pci_busid", tmp); + sprintf(tmp, "%04x [%04x:%04x] [%04x:%04x] %02x", + (unsigned) obj->attr->pcidev.class_id, + (unsigned) obj->attr->pcidev.vendor_id, (unsigned) obj->attr->pcidev.device_id, + (unsigned) obj->attr->pcidev.subvendor_id, (unsigned) obj->attr->pcidev.subdevice_id, + (unsigned) obj->attr->pcidev.revision); + state->new_prop(state, "pci_type", tmp); + sprintf(tmp, "%f", obj->attr->pcidev.linkspeed); + state->new_prop(state, "pci_link_speed", tmp); + break; + case HWLOC_OBJ_OS_DEVICE: + sprintf(tmp, "%d", (int) obj->attr->osdev.type); + state->new_prop(state, "osdev_type", tmp); + break; + default: + break; + } + + for(i=0; iinfos_count; i++) { + char *name = hwloc__xml_export_safestrdup(obj->infos[i].name); + char *value = hwloc__xml_export_safestrdup(obj->infos[i].value); + struct hwloc__xml_export_state_s childstate; + state->new_child(state, &childstate, "info"); + childstate.new_prop(&childstate, "name", name); + childstate.new_prop(&childstate, "value", value); + childstate.end_object(&childstate, "info"); + free(name); + free(value); + } + if (v1export && obj->subtype) { + char *subtype = hwloc__xml_export_safestrdup(obj->subtype); + struct hwloc__xml_export_state_s childstate; + int is_coproctype = (obj->type == HWLOC_OBJ_OS_DEVICE && obj->attr->osdev.type == HWLOC_OBJ_OSDEV_COPROC); + state->new_child(state, &childstate, "info"); + childstate.new_prop(&childstate, "name", is_coproctype ? "CoProcType" : "Type"); + childstate.new_prop(&childstate, "value", subtype); + childstate.end_object(&childstate, "info"); + free(subtype); + } + + if (v1export && !obj->parent) { + /* only latency matrices covering the entire machine can be exported to v1 */ + struct hwloc_internal_distances_s *dist; + /* refresh distances since we need objects below */ + hwloc_internal_distances_refresh(topology); + for(dist = topology->first_dist; dist; dist = dist->next) { + struct hwloc__xml_export_state_s childstate; + unsigned nbobjs = dist->nbobjs; + int depth; + + if (nbobjs != (unsigned) hwloc_get_nbobjs_by_type(topology, dist->type)) + continue; + if (!(dist->kind & HWLOC_DISTANCES_KIND_MEANS_LATENCY)) + continue; + { + HWLOC_VLA(unsigned, logical_to_v2array, nbobjs); + for(i=0; iobjs[i]->logical_index] = i; + + /* compute the relative depth */ + if (dist->type == HWLOC_OBJ_NUMANODE) { + /* for NUMA nodes, use the highest normal-parent depth + 1 */ + depth = -1; + for(i=0; iobjs[i]->parent; + while (hwloc__obj_type_is_memory(parent->type)) + parent = parent->parent; + if (parent->depth+1 > depth) + depth = parent->depth+1; + } + } else { + /* for non-NUMA nodes, increase the object depth if any of them has memory above */ + int parent_with_memory = 0; + for(i=0; iobjs[i]->parent; + while (parent) { + if (parent->memory_first_child) { + parent_with_memory = 1; + goto done; + } + parent = parent->parent; + } + } + done: + depth = hwloc_get_type_depth(topology, dist->type) + parent_with_memory; + } + + state->new_child(state, &childstate, "distances"); + sprintf(tmp, "%u", nbobjs); + childstate.new_prop(&childstate, "nbobjs", tmp); + sprintf(tmp, "%d", depth); + childstate.new_prop(&childstate, "relative_depth", tmp); + sprintf(tmp, "%f", 1.f); + childstate.new_prop(&childstate, "latency_base", tmp); + for(i=0; ivalues[k]); + greatchildstate.new_prop(&greatchildstate, "value", tmp); + greatchildstate.end_object(&greatchildstate, "latency"); + } + } + childstate.end_object(&childstate, "distances"); + } + } + } + + if (obj->userdata && topology->userdata_export_cb) + topology->userdata_export_cb((void*) state, topology, obj); +} + +static void +hwloc__xml_v2export_object (hwloc__xml_export_state_t parentstate, hwloc_topology_t topology, hwloc_obj_t obj, unsigned long flags) +{ + struct hwloc__xml_export_state_s state; + hwloc_obj_t child; + + parentstate->new_child(parentstate, &state, "object"); + + hwloc__xml_export_object_contents(&state, topology, obj, flags); + + for_each_memory_child(child, obj) + hwloc__xml_v2export_object (&state, topology, child, flags); + for_each_child(child, obj) + hwloc__xml_v2export_object (&state, topology, child, flags); + for_each_io_child(child, obj) + hwloc__xml_v2export_object (&state, topology, child, flags); + for_each_misc_child(child, obj) + hwloc__xml_v2export_object (&state, topology, child, flags); + + state.end_object(&state, "object"); +} + +static void +hwloc__xml_v1export_object (hwloc__xml_export_state_t parentstate, hwloc_topology_t topology, hwloc_obj_t obj, unsigned long flags); + +static void +hwloc__xml_v1export_object_with_memory(hwloc__xml_export_state_t parentstate, hwloc_topology_t topology, hwloc_obj_t obj, unsigned long flags) +{ + struct hwloc__xml_export_state_s gstate, mstate, ostate, *state = parentstate; + hwloc_obj_t child; + + if (obj->parent->arity > 1 && obj->memory_arity > 1 && parentstate->global->v1_memory_group) { + /* child has sibling, we must add a Group around those memory children */ + hwloc_obj_t group = parentstate->global->v1_memory_group; + parentstate->new_child(parentstate, &gstate, "object"); + group->cpuset = obj->cpuset; + group->complete_cpuset = obj->complete_cpuset; + group->nodeset = obj->nodeset; + group->complete_nodeset = obj->complete_nodeset; + hwloc__xml_export_object_contents (&gstate, topology, group, flags); + group->cpuset = NULL; + group->complete_cpuset = NULL; + group->nodeset = NULL; + group->complete_nodeset = NULL; + state = &gstate; + } + + /* export first memory child */ + child = obj->memory_first_child; + assert(child->type == HWLOC_OBJ_NUMANODE); + state->new_child(state, &mstate, "object"); + hwloc__xml_export_object_contents (&mstate, topology, child, flags); + + /* then the actual object */ + mstate.new_child(&mstate, &ostate, "object"); + hwloc__xml_export_object_contents (&ostate, topology, obj, flags); + + /* then its normal/io/misc children */ + for_each_child(child, obj) + hwloc__xml_v1export_object (&ostate, topology, child, flags); + for_each_io_child(child, obj) + hwloc__xml_v1export_object (&ostate, topology, child, flags); + for_each_misc_child(child, obj) + hwloc__xml_v1export_object (&ostate, topology, child, flags); + + /* close object and first memory child */ + ostate.end_object(&ostate, "object"); + mstate.end_object(&mstate, "object"); + + /* now other memory children */ + for_each_memory_child(child, obj) + if (child->sibling_rank > 0) + hwloc__xml_v1export_object (state, topology, child, flags); + + if (state == &gstate) { + /* close group if any */ + gstate.end_object(&gstate, "object"); + } +} + +static void +hwloc__xml_v1export_object (hwloc__xml_export_state_t parentstate, hwloc_topology_t topology, hwloc_obj_t obj, unsigned long flags) +{ + struct hwloc__xml_export_state_s state; + hwloc_obj_t child; + + parentstate->new_child(parentstate, &state, "object"); + + hwloc__xml_export_object_contents(&state, topology, obj, flags); + + for_each_child(child, obj) { + if (!child->memory_arity) { + /* no memory child, just export normally */ + hwloc__xml_v1export_object (&state, topology, child, flags); + } else { + hwloc__xml_v1export_object_with_memory(&state, topology, child, flags); + } + } + + for_each_io_child(child, obj) + hwloc__xml_v1export_object (&state, topology, child, flags); + for_each_misc_child(child, obj) + hwloc__xml_v1export_object (&state, topology, child, flags); + + state.end_object(&state, "object"); +} + +#define EXPORT_ARRAY(state, type, nr, values, tagname, format, maxperline) do { \ + unsigned _i = 0; \ + while (_i<(nr)) { \ + char _tmp[255]; /* enough for (snprintf(format)+space) x maxperline */ \ + char _tmp2[16]; \ + size_t _len = 0; \ + unsigned _j; \ + struct hwloc__xml_export_state_s _childstate; \ + (state)->new_child(state, &_childstate, tagname); \ + for(_j=0; \ + _i+_j<(nr) && _jfirst_dist; dist; dist = dist->next) { + char tmp[255]; + unsigned nbobjs = dist->nbobjs; + struct hwloc__xml_export_state_s state; + + parentstate->new_child(parentstate, &state, "distances2"); + + state.new_prop(&state, "type", hwloc_obj_type_string(dist->type)); + sprintf(tmp, "%u", nbobjs); + state.new_prop(&state, "nbobjs", tmp); + sprintf(tmp, "%lu", dist->kind); + state.new_prop(&state, "kind", tmp); + + state.new_prop(&state, "indexing", + (dist->type == HWLOC_OBJ_NUMANODE || dist->type == HWLOC_OBJ_PU) ? "os" : "gp"); + /* TODO don't hardwire 10 below. either snprintf the max to guess it, or just append until the end of the buffer */ + EXPORT_ARRAY(&state, unsigned long long, nbobjs, dist->indexes, "indexes", "%llu", 10); + EXPORT_ARRAY(&state, unsigned long long, nbobjs*nbobjs, dist->values, "u64values", "%llu", 10); + state.end_object(&state, "distances2"); + } +} + +void +hwloc__xml_export_topology(hwloc__xml_export_state_t state, hwloc_topology_t topology, unsigned long flags) +{ + hwloc_obj_t root = hwloc_get_root_obj(topology); + + if (flags & HWLOC_TOPOLOGY_EXPORT_XML_FLAG_V1) { + if (root->memory_first_child) { + /* we don't use hwloc__xml_v1export_object_with_memory() because we want/can keep root above the numa node */ + struct hwloc__xml_export_state_s rstate, mstate; + hwloc_obj_t child; + /* export the root */ + state->new_child(state, &rstate, "object"); + hwloc__xml_export_object_contents (&rstate, topology, root, flags); + /* export first memory child */ + child = root->memory_first_child; + assert(child->type == HWLOC_OBJ_NUMANODE); + rstate.new_child(&rstate, &mstate, "object"); + hwloc__xml_export_object_contents (&mstate, topology, child, flags); + /* then its normal/io/misc children */ + for_each_child(child, root) + hwloc__xml_v1export_object (&mstate, topology, child, flags); + for_each_io_child(child, root) + hwloc__xml_v1export_object (&mstate, topology, child, flags); + for_each_misc_child(child, root) + hwloc__xml_v1export_object (&mstate, topology, child, flags); + /* close first memory child */ + mstate.end_object(&mstate, "object"); + /* now other memory children */ + for_each_memory_child(child, root) + if (child->sibling_rank > 0) + hwloc__xml_v1export_object (&rstate, topology, child, flags); + /* close the root */ + rstate.end_object(&rstate, "object"); + } else { + hwloc__xml_v1export_object(state, topology, root, flags); + } + + } else { + hwloc__xml_v2export_object (state, topology, root, flags); + hwloc__xml_v2export_distances (state, topology); + } +} + +void +hwloc__xml_export_diff(hwloc__xml_export_state_t parentstate, hwloc_topology_diff_t diff) +{ + while (diff) { + struct hwloc__xml_export_state_s state; + char tmp[255]; + + parentstate->new_child(parentstate, &state, "diff"); + + sprintf(tmp, "%d", (int) diff->generic.type); + state.new_prop(&state, "type", tmp); + + switch (diff->generic.type) { + case HWLOC_TOPOLOGY_DIFF_OBJ_ATTR: + sprintf(tmp, "%d", diff->obj_attr.obj_depth); + state.new_prop(&state, "obj_depth", tmp); + sprintf(tmp, "%u", diff->obj_attr.obj_index); + state.new_prop(&state, "obj_index", tmp); + + sprintf(tmp, "%d", (int) diff->obj_attr.diff.generic.type); + state.new_prop(&state, "obj_attr_type", tmp); + + switch (diff->obj_attr.diff.generic.type) { + case HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_SIZE: + sprintf(tmp, "%llu", (unsigned long long) diff->obj_attr.diff.uint64.index); + state.new_prop(&state, "obj_attr_index", tmp); + sprintf(tmp, "%llu", (unsigned long long) diff->obj_attr.diff.uint64.oldvalue); + state.new_prop(&state, "obj_attr_oldvalue", tmp); + sprintf(tmp, "%llu", (unsigned long long) diff->obj_attr.diff.uint64.newvalue); + state.new_prop(&state, "obj_attr_newvalue", tmp); + break; + case HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_NAME: + case HWLOC_TOPOLOGY_DIFF_OBJ_ATTR_INFO: + if (diff->obj_attr.diff.string.name) + state.new_prop(&state, "obj_attr_name", diff->obj_attr.diff.string.name); + state.new_prop(&state, "obj_attr_oldvalue", diff->obj_attr.diff.string.oldvalue); + state.new_prop(&state, "obj_attr_newvalue", diff->obj_attr.diff.string.newvalue); + break; + } + + break; + default: + assert(0); + } + state.end_object(&state, "diff"); + + diff = diff->generic.next; + } +} + +/********************************** + ********* main XML export ******** + **********************************/ + +/* this can be the first XML call */ +int hwloc_topology_export_xml(hwloc_topology_t topology, const char *filename, unsigned long flags) +{ + hwloc_localeswitch_declare; + struct hwloc__xml_export_data_s edata; + int force_nolibxml; + int ret; + + if (!topology->is_loaded) { + errno = EINVAL; + return -1; + } + + assert(hwloc_nolibxml_callbacks); /* the core called components_init() for the topology */ + + if (flags & ~HWLOC_TOPOLOGY_EXPORT_XML_FLAG_V1) { + errno = EINVAL; + return -1; + } + + hwloc_internal_distances_refresh(topology); + + hwloc_localeswitch_init(); + + edata.v1_memory_group = NULL; + if (flags & HWLOC_TOPOLOGY_EXPORT_XML_FLAG_V1) + /* temporary group to be used during v1 export of memory children */ + edata.v1_memory_group = hwloc_alloc_setup_object(topology, HWLOC_OBJ_GROUP, HWLOC_UNKNOWN_INDEX); + + force_nolibxml = hwloc_nolibxml_export(); +retry: + if (!hwloc_libxml_callbacks || (hwloc_nolibxml_callbacks && force_nolibxml)) + ret = hwloc_nolibxml_callbacks->export_file(topology, &edata, filename, flags); + else { + ret = hwloc_libxml_callbacks->export_file(topology, &edata, filename, flags); + if (ret < 0 && errno == ENOSYS) { + hwloc_libxml_callbacks = NULL; + goto retry; + } + } + + if (edata.v1_memory_group) + hwloc_free_unlinked_object(edata.v1_memory_group); + + hwloc_localeswitch_fini(); + return ret; +} + +/* this can be the first XML call */ +int hwloc_topology_export_xmlbuffer(hwloc_topology_t topology, char **xmlbuffer, int *buflen, unsigned long flags) +{ + hwloc_localeswitch_declare; + struct hwloc__xml_export_data_s edata; + int force_nolibxml; + int ret; + + if (!topology->is_loaded) { + errno = EINVAL; + return -1; + } + + assert(hwloc_nolibxml_callbacks); /* the core called components_init() for the topology */ + + if (flags & ~HWLOC_TOPOLOGY_EXPORT_XML_FLAG_V1) { + errno = EINVAL; + return -1; + } + + hwloc_internal_distances_refresh(topology); + + hwloc_localeswitch_init(); + + edata.v1_memory_group = NULL; + if (flags & HWLOC_TOPOLOGY_EXPORT_XML_FLAG_V1) + /* temporary group to be used during v1 export of memory children */ + edata.v1_memory_group = hwloc_alloc_setup_object(topology, HWLOC_OBJ_GROUP, HWLOC_UNKNOWN_INDEX); + + force_nolibxml = hwloc_nolibxml_export(); +retry: + if (!hwloc_libxml_callbacks || (hwloc_nolibxml_callbacks && force_nolibxml)) + ret = hwloc_nolibxml_callbacks->export_buffer(topology, &edata, xmlbuffer, buflen, flags); + else { + ret = hwloc_libxml_callbacks->export_buffer(topology, &edata, xmlbuffer, buflen, flags); + if (ret < 0 && errno == ENOSYS) { + hwloc_libxml_callbacks = NULL; + goto retry; + } + } + + if (edata.v1_memory_group) + hwloc_free_unlinked_object(edata.v1_memory_group); + + hwloc_localeswitch_fini(); + return ret; +} + +/* this can be the first XML call */ +int +hwloc_topology_diff_export_xml(hwloc_topology_diff_t diff, const char *refname, + const char *filename) +{ + hwloc_localeswitch_declare; + hwloc_topology_diff_t tmpdiff; + int force_nolibxml; + int ret; + + tmpdiff = diff; + while (tmpdiff) { + if (tmpdiff->generic.type == HWLOC_TOPOLOGY_DIFF_TOO_COMPLEX) { + errno = EINVAL; + return -1; + } + tmpdiff = tmpdiff->generic.next; + } + + hwloc_components_init(); + assert(hwloc_nolibxml_callbacks); + + hwloc_localeswitch_init(); + + force_nolibxml = hwloc_nolibxml_export(); +retry: + if (!hwloc_libxml_callbacks || (hwloc_nolibxml_callbacks && force_nolibxml)) + ret = hwloc_nolibxml_callbacks->export_diff_file(diff, refname, filename); + else { + ret = hwloc_libxml_callbacks->export_diff_file(diff, refname, filename); + if (ret < 0 && errno == ENOSYS) { + hwloc_libxml_callbacks = NULL; + goto retry; + } + } + + hwloc_localeswitch_fini(); + hwloc_components_fini(); + return ret; +} + +/* this can be the first XML call */ +int +hwloc_topology_diff_export_xmlbuffer(hwloc_topology_diff_t diff, const char *refname, + char **xmlbuffer, int *buflen) +{ + hwloc_localeswitch_declare; + hwloc_topology_diff_t tmpdiff; + int force_nolibxml; + int ret; + + tmpdiff = diff; + while (tmpdiff) { + if (tmpdiff->generic.type == HWLOC_TOPOLOGY_DIFF_TOO_COMPLEX) { + errno = EINVAL; + return -1; + } + tmpdiff = tmpdiff->generic.next; + } + + hwloc_components_init(); + assert(hwloc_nolibxml_callbacks); + + hwloc_localeswitch_init(); + + force_nolibxml = hwloc_nolibxml_export(); +retry: + if (!hwloc_libxml_callbacks || (hwloc_nolibxml_callbacks && force_nolibxml)) + ret = hwloc_nolibxml_callbacks->export_diff_buffer(diff, refname, xmlbuffer, buflen); + else { + ret = hwloc_libxml_callbacks->export_diff_buffer(diff, refname, xmlbuffer, buflen); + if (ret < 0 && errno == ENOSYS) { + hwloc_libxml_callbacks = NULL; + goto retry; + } + } + + hwloc_localeswitch_fini(); + hwloc_components_fini(); + return ret; +} + +void hwloc_free_xmlbuffer(hwloc_topology_t topology __hwloc_attribute_unused, char *xmlbuffer) +{ + int force_nolibxml; + + assert(hwloc_nolibxml_callbacks); /* the core called components_init() for the topology */ + + force_nolibxml = hwloc_nolibxml_export(); + if (!hwloc_libxml_callbacks || (hwloc_nolibxml_callbacks && force_nolibxml)) + hwloc_nolibxml_callbacks->free_buffer(xmlbuffer); + else + hwloc_libxml_callbacks->free_buffer(xmlbuffer); +} + +void +hwloc_topology_set_userdata_export_callback(hwloc_topology_t topology, + void (*export)(void *reserved, struct hwloc_topology *topology, struct hwloc_obj *obj)) +{ + topology->userdata_export_cb = export; +} + +static void +hwloc__export_obj_userdata(hwloc__xml_export_state_t parentstate, int encoded, + const char *name, size_t length, const void *buffer, size_t encoded_length) +{ + struct hwloc__xml_export_state_s state; + char tmp[255]; + parentstate->new_child(parentstate, &state, "userdata"); + if (name) + state.new_prop(&state, "name", name); + sprintf(tmp, "%lu", (unsigned long) length); + state.new_prop(&state, "length", tmp); + if (encoded) + state.new_prop(&state, "encoding", "base64"); + if (encoded_length) + state.add_content(&state, buffer, encoded ? encoded_length : length); + state.end_object(&state, "userdata"); +} + +int +hwloc_export_obj_userdata(void *reserved, + struct hwloc_topology *topology, struct hwloc_obj *obj __hwloc_attribute_unused, + const char *name, const void *buffer, size_t length) +{ + hwloc__xml_export_state_t state = reserved; + + if (!buffer) { + errno = EINVAL; + return -1; + } + + if ((name && hwloc__xml_export_check_buffer(name, strlen(name)) < 0) + || hwloc__xml_export_check_buffer(buffer, length) < 0) { + errno = EINVAL; + return -1; + } + + if (topology->userdata_not_decoded) { + int encoded; + size_t encoded_length; + const char *realname; + if (!strncmp(name, "base64", 6)) { + encoded = 1; + encoded_length = BASE64_ENCODED_LENGTH(length); + } else { + assert(!strncmp(name, "normal", 6)); + encoded = 0; + encoded_length = length; + } + if (name[6] == ':') + realname = name+7; + else { + assert(!strcmp(name+6, "-anon")); + realname = NULL; + } + hwloc__export_obj_userdata(state, encoded, realname, length, buffer, encoded_length); + + } else + hwloc__export_obj_userdata(state, 0, name, length, buffer, length); + + return 0; +} + +int +hwloc_export_obj_userdata_base64(void *reserved, + struct hwloc_topology *topology __hwloc_attribute_unused, struct hwloc_obj *obj __hwloc_attribute_unused, + const char *name, const void *buffer, size_t length) +{ + hwloc__xml_export_state_t state = reserved; + size_t encoded_length; + char *encoded_buffer; + int ret __hwloc_attribute_unused; + + if (!buffer) { + errno = EINVAL; + return -1; + } + + assert(!topology->userdata_not_decoded); + + if (name && hwloc__xml_export_check_buffer(name, strlen(name)) < 0) { + errno = EINVAL; + return -1; + } + + encoded_length = BASE64_ENCODED_LENGTH(length); + encoded_buffer = malloc(encoded_length+1); + if (!encoded_buffer) { + errno = ENOMEM; + return -1; + } + + ret = hwloc_encode_to_base64(buffer, length, encoded_buffer, encoded_length+1); + assert(ret == (int) encoded_length); + + hwloc__export_obj_userdata(state, 1, name, length, encoded_buffer, encoded_length); + + free(encoded_buffer); + return 0; +} + +void +hwloc_topology_set_userdata_import_callback(hwloc_topology_t topology, + void (*import)(struct hwloc_topology *topology, struct hwloc_obj *obj, const char *name, const void *buffer, size_t length)) +{ + topology->userdata_import_cb = import; +} + +/*************************************** + ************ XML component ************ + ***************************************/ + +static void +hwloc_xml_backend_disable(struct hwloc_backend *backend) +{ + struct hwloc_xml_backend_data_s *data = backend->private_data; + data->backend_exit(data); + free(data->msgprefix); + free(data); +} + +static struct hwloc_backend * +hwloc_xml_component_instantiate(struct hwloc_disc_component *component, + const void *_data1, + const void *_data2, + const void *_data3) +{ + struct hwloc_xml_backend_data_s *data; + struct hwloc_backend *backend; + const char *env; + int force_nolibxml; + const char * xmlpath = (const char *) _data1; + const char * xmlbuffer = (const char *) _data2; + int xmlbuflen = (int)(uintptr_t) _data3; + const char *local_basename; + int err; + + assert(hwloc_nolibxml_callbacks); /* the core called components_init() for the component's topology */ + + if (!xmlpath && !xmlbuffer) { + env = getenv("HWLOC_XMLFILE"); + if (env) { + /* 'xml' was given in HWLOC_COMPONENTS without a filename */ + xmlpath = env; + } else { + errno = EINVAL; + goto out; + } + } + + backend = hwloc_backend_alloc(component); + if (!backend) + goto out; + + data = malloc(sizeof(*data)); + if (!data) { + errno = ENOMEM; + goto out_with_backend; + } + + backend->private_data = data; + backend->discover = hwloc_look_xml; + backend->disable = hwloc_xml_backend_disable; + backend->is_thissystem = 0; + + if (xmlpath) { + local_basename = strrchr(xmlpath, '/'); + if (local_basename) + local_basename++; + else + local_basename = xmlpath; + } else { + local_basename = "xmlbuffer"; + } + data->msgprefix = strdup(local_basename); + + force_nolibxml = hwloc_nolibxml_import(); +retry: + if (!hwloc_libxml_callbacks || (hwloc_nolibxml_callbacks && force_nolibxml)) + err = hwloc_nolibxml_callbacks->backend_init(data, xmlpath, xmlbuffer, xmlbuflen); + else { + err = hwloc_libxml_callbacks->backend_init(data, xmlpath, xmlbuffer, xmlbuflen); + if (err < 0 && errno == ENOSYS) { + hwloc_libxml_callbacks = NULL; + goto retry; + } + } + if (err < 0) + goto out_with_data; + + return backend; + + out_with_data: + free(data->msgprefix); + free(data); + out_with_backend: + free(backend); + out: + return NULL; +} + +static struct hwloc_disc_component hwloc_xml_disc_component = { + HWLOC_DISC_COMPONENT_TYPE_GLOBAL, + "xml", + ~0, + hwloc_xml_component_instantiate, + 30, + 1, + NULL +}; + +const struct hwloc_component hwloc_xml_component = { + HWLOC_COMPONENT_ABI, + NULL, NULL, + HWLOC_COMPONENT_TYPE_DISC, + 0, + &hwloc_xml_disc_component +}; diff --git a/src/3rdparty/hwloc/src/topology.c b/src/3rdparty/hwloc/src/topology.c new file mode 100644 index 00000000..55678a08 --- /dev/null +++ b/src/3rdparty/hwloc/src/topology.c @@ -0,0 +1,4484 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2018 Inria. All rights reserved. + * Copyright © 2009-2012 Université Bordeaux + * Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +#include + +#define _ATFILE_SOURCE +#include +#include +#ifdef HAVE_DIRENT_H +#include +#endif +#ifdef HAVE_UNISTD_H +#include +#endif +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#ifdef HAVE_MACH_MACH_INIT_H +#include +#endif +#ifdef HAVE_MACH_MACH_HOST_H +#include +#endif + +#ifdef HAVE_SYS_PARAM_H +#include +#endif + +#ifdef HAVE_SYS_SYSCTL_H +#include +#endif + +#ifdef HWLOC_WIN_SYS +#include +#endif + +unsigned hwloc_get_api_version(void) +{ + return HWLOC_API_VERSION; +} + +int hwloc_topology_abi_check(hwloc_topology_t topology) +{ + return topology->topology_abi != HWLOC_TOPOLOGY_ABI ? -1 : 0; +} + +int hwloc_hide_errors(void) +{ + static int hide = 0; + static int checked = 0; + if (!checked) { + const char *envvar = getenv("HWLOC_HIDE_ERRORS"); + if (envvar) + hide = atoi(envvar); + checked = 1; + } + return hide; +} + +void hwloc_report_os_error(const char *msg, int line) +{ + static int reported = 0; + + if (!reported && !hwloc_hide_errors()) { + fprintf(stderr, "****************************************************************************\n"); + fprintf(stderr, "* hwloc %s received invalid information from the operating system.\n", HWLOC_VERSION); + fprintf(stderr, "*\n"); + fprintf(stderr, "* %s\n", msg); + fprintf(stderr, "* Error occurred in topology.c line %d\n", line); + fprintf(stderr, "*\n"); + fprintf(stderr, "* The following FAQ entry in the hwloc documentation may help:\n"); + fprintf(stderr, "* What should I do when hwloc reports \"operating system\" warnings?\n"); + fprintf(stderr, "* Otherwise please report this error message to the hwloc user's mailing list,\n"); +#ifdef HWLOC_LINUX_SYS + fprintf(stderr, "* along with the files generated by the hwloc-gather-topology script.\n"); +#else + fprintf(stderr, "* along with any relevant topology information from your platform.\n"); +#endif + fprintf(stderr, "* \n"); + fprintf(stderr, "* hwloc will now ignore this invalid topology information and continue.\n"); + fprintf(stderr, "****************************************************************************\n"); + reported = 1; + } +} + +#if defined(HAVE_SYSCTLBYNAME) +int hwloc_get_sysctlbyname(const char *name, int64_t *ret) +{ + union { + int32_t i32; + int64_t i64; + } n; + size_t size = sizeof(n); + if (sysctlbyname(name, &n, &size, NULL, 0)) + return -1; + switch (size) { + case sizeof(n.i32): + *ret = n.i32; + break; + case sizeof(n.i64): + *ret = n.i64; + break; + default: + return -1; + } + return 0; +} +#endif + +#if defined(HAVE_SYSCTL) +int hwloc_get_sysctl(int name[], unsigned namelen, int *ret) +{ + int n; + size_t size = sizeof(n); + if (sysctl(name, namelen, &n, &size, NULL, 0)) + return -1; + if (size != sizeof(n)) + return -1; + *ret = n; + return 0; +} +#endif + +/* Return the OS-provided number of processors. Unlike other methods such as + reading sysfs on Linux, this method is not virtualizable; thus it's only + used as a fall-back method, allowing virtual backends (FSROOT, etc) to + have the desired effect. */ +#ifndef HWLOC_WIN_SYS /* The windows implementation is in topology-windows.c */ +int +hwloc_fallback_nbprocessors(struct hwloc_topology *topology __hwloc_attribute_unused) { + int n; +#if HAVE_DECL__SC_NPROCESSORS_ONLN + n = sysconf(_SC_NPROCESSORS_ONLN); +#elif HAVE_DECL__SC_NPROC_ONLN + n = sysconf(_SC_NPROC_ONLN); +#elif HAVE_DECL__SC_NPROCESSORS_CONF + n = sysconf(_SC_NPROCESSORS_CONF); +#elif HAVE_DECL__SC_NPROC_CONF + n = sysconf(_SC_NPROC_CONF); +#elif defined(HAVE_HOST_INFO) && HAVE_HOST_INFO + struct host_basic_info info; + mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; + host_info(mach_host_self(), HOST_BASIC_INFO, (integer_t*) &info, &count); + n = info.avail_cpus; +#elif defined(HAVE_SYSCTLBYNAME) + int64_t nn; + if (hwloc_get_sysctlbyname("hw.ncpu", &nn)) + nn = -1; + n = nn; +#elif defined(HAVE_SYSCTL) && HAVE_DECL_CTL_HW && HAVE_DECL_HW_NCPU + static int name[2] = {CTL_HW, HW_NCPU}; + if (hwloc_get_sysctl(name, sizeof(name)/sizeof(*name), &n)) + n = -1; +#else +#ifdef __GNUC__ +#warning No known way to discover number of available processors on this system +#endif + n = -1; +#endif + return n; +} +#endif /* !HWLOC_WIN_SYS */ + +/* + * Use the given number of processors to set a PU level. + */ +void +hwloc_setup_pu_level(struct hwloc_topology *topology, + unsigned nb_pus) +{ + struct hwloc_obj *obj; + unsigned oscpu,cpu; + + hwloc_debug("%s", "\n\n * CPU cpusets *\n\n"); + for (cpu=0,oscpu=0; cpucpuset = hwloc_bitmap_alloc(); + hwloc_bitmap_only(obj->cpuset, oscpu); + + hwloc_debug_2args_bitmap("cpu %u (os %u) has cpuset %s\n", + cpu, oscpu, obj->cpuset); + hwloc_insert_object_by_cpuset(topology, obj); + + cpu++; + } +} + +/* Traverse children of a parent in a safe way: reread the next pointer as + * appropriate to prevent crash on child deletion: */ +#define for_each_child_safe(child, parent, pchild) \ + for (pchild = &(parent)->first_child, child = *pchild; \ + child; \ + /* Check whether the current child was not dropped. */ \ + (*pchild == child ? pchild = &(child->next_sibling) : NULL), \ + /* Get pointer to next child. */ \ + child = *pchild) +#define for_each_memory_child_safe(child, parent, pchild) \ + for (pchild = &(parent)->memory_first_child, child = *pchild; \ + child; \ + /* Check whether the current child was not dropped. */ \ + (*pchild == child ? pchild = &(child->next_sibling) : NULL), \ + /* Get pointer to next child. */ \ + child = *pchild) +#define for_each_io_child_safe(child, parent, pchild) \ + for (pchild = &(parent)->io_first_child, child = *pchild; \ + child; \ + /* Check whether the current child was not dropped. */ \ + (*pchild == child ? pchild = &(child->next_sibling) : NULL), \ + /* Get pointer to next child. */ \ + child = *pchild) +#define for_each_misc_child_safe(child, parent, pchild) \ + for (pchild = &(parent)->misc_first_child, child = *pchild; \ + child; \ + /* Check whether the current child was not dropped. */ \ + (*pchild == child ? pchild = &(child->next_sibling) : NULL), \ + /* Get pointer to next child. */ \ + child = *pchild) + +#ifdef HWLOC_DEBUG +/* Just for debugging. */ +static void +hwloc_debug_print_object(int indent __hwloc_attribute_unused, hwloc_obj_t obj) +{ + char type[64], idx[12], attr[1024], *cpuset = NULL; + hwloc_debug("%*s", 2*indent, ""); + hwloc_obj_type_snprintf(type, sizeof(type), obj, 1); + if (obj->os_index != HWLOC_UNKNOWN_INDEX) + snprintf(idx, sizeof(idx), "#%u", obj->os_index); + else + *idx = '\0'; + hwloc_obj_attr_snprintf(attr, sizeof(attr), obj, " ", 1); + hwloc_debug("%s%s%s%s%s", type, idx, *attr ? "(" : "", attr, *attr ? ")" : ""); + if (obj->name) + hwloc_debug(" name \"%s\"", obj->name); + if (obj->subtype) + hwloc_debug(" subtype \"%s\"", obj->subtype); + if (obj->cpuset) { + hwloc_bitmap_asprintf(&cpuset, obj->cpuset); + hwloc_debug(" cpuset %s", cpuset); + free(cpuset); + } + if (obj->complete_cpuset) { + hwloc_bitmap_asprintf(&cpuset, obj->complete_cpuset); + hwloc_debug(" complete %s", cpuset); + free(cpuset); + } + if (obj->nodeset) { + hwloc_bitmap_asprintf(&cpuset, obj->nodeset); + hwloc_debug(" nodeset %s", cpuset); + free(cpuset); + } + if (obj->complete_nodeset) { + hwloc_bitmap_asprintf(&cpuset, obj->complete_nodeset); + hwloc_debug(" completeN %s", cpuset); + free(cpuset); + } + if (obj->arity) + hwloc_debug(" arity %u", obj->arity); + hwloc_debug("%s", "\n"); +} + +static void +hwloc_debug_print_objects(int indent __hwloc_attribute_unused, hwloc_obj_t obj) +{ + hwloc_obj_t child; + hwloc_debug_print_object(indent, obj); + for_each_child (child, obj) + hwloc_debug_print_objects(indent + 1, child); + for_each_memory_child (child, obj) + hwloc_debug_print_objects(indent + 1, child); + for_each_io_child (child, obj) + hwloc_debug_print_objects(indent + 1, child); + for_each_misc_child (child, obj) + hwloc_debug_print_objects(indent + 1, child); +} +#else /* !HWLOC_DEBUG */ +#define hwloc_debug_print_object(indent, obj) do { /* nothing */ } while (0) +#define hwloc_debug_print_objects(indent, obj) do { /* nothing */ } while (0) +#endif /* !HWLOC_DEBUG */ + +void hwloc__free_infos(struct hwloc_info_s *infos, unsigned count) +{ + unsigned i; + for(i=0; iinfos, &obj->infos_count, name, value); +} + +/* This function may be called with topology->tma set, it cannot free() or realloc() */ +static int hwloc__tma_dup_infos(struct hwloc_tma *tma, hwloc_obj_t new, hwloc_obj_t src) +{ + unsigned i, j; + new->infos = hwloc_tma_calloc(tma, src->infos_count * sizeof(*src->infos)); + if (!new->infos) + return -1; + for(i=0; iinfos_count; i++) { + new->infos[i].name = hwloc_tma_strdup(tma, src->infos[i].name); + new->infos[i].value = hwloc_tma_strdup(tma, src->infos[i].value); + if (!new->infos[i].name || !new->infos[i].value) + goto failed; + } + new->infos_count = src->infos_count; + return 0; + + failed: + assert(!tma || !tma->dontfree); /* this tma cannot fail to allocate */ + for(j=0; j<=i; j++) { + free(new->infos[i].name); + free(new->infos[i].value); + } + free(new->infos); + new->infos = NULL; + return -1; +} + +static void +hwloc__free_object_contents(hwloc_obj_t obj) +{ + switch (obj->type) { + case HWLOC_OBJ_NUMANODE: + free(obj->attr->numanode.page_types); + break; + default: + break; + } + hwloc__free_infos(obj->infos, obj->infos_count); + free(obj->attr); + free(obj->children); + free(obj->subtype); + free(obj->name); + hwloc_bitmap_free(obj->cpuset); + hwloc_bitmap_free(obj->complete_cpuset); + hwloc_bitmap_free(obj->nodeset); + hwloc_bitmap_free(obj->complete_nodeset); +} + +/* Free an object and all its content. */ +void +hwloc_free_unlinked_object(hwloc_obj_t obj) +{ + hwloc__free_object_contents(obj); + free(obj); +} + +/* Replace old with contents of new object, and make new freeable by the caller. + * Only updates next_sibling/first_child pointers, + * so may only be used during early discovery. + */ +static void +hwloc_replace_linked_object(hwloc_obj_t old, hwloc_obj_t new) +{ + /* drop old fields */ + hwloc__free_object_contents(old); + /* copy old tree pointers to new */ + new->parent = old->parent; + new->next_sibling = old->next_sibling; + new->first_child = old->first_child; + new->memory_first_child = old->memory_first_child; + new->io_first_child = old->io_first_child; + new->misc_first_child = old->misc_first_child; + /* copy new contents to old now that tree pointers are OK */ + memcpy(old, new, sizeof(*old)); + /* clear new to that we may free it */ + memset(new, 0,sizeof(*new)); +} + +/* Remove an object and its children from its parent and free them. + * Only updates next_sibling/first_child pointers, + * so may only be used during early discovery or during destroy. + */ +static void +unlink_and_free_object_and_children(hwloc_obj_t *pobj) +{ + hwloc_obj_t obj = *pobj, child, *pchild; + + for_each_child_safe(child, obj, pchild) + unlink_and_free_object_and_children(pchild); + for_each_memory_child_safe(child, obj, pchild) + unlink_and_free_object_and_children(pchild); + for_each_io_child_safe(child, obj, pchild) + unlink_and_free_object_and_children(pchild); + for_each_misc_child_safe(child, obj, pchild) + unlink_and_free_object_and_children(pchild); + + *pobj = obj->next_sibling; + hwloc_free_unlinked_object(obj); +} + +/* Free an object and its children without unlinking from parent. + */ +void +hwloc_free_object_and_children(hwloc_obj_t obj) +{ + unlink_and_free_object_and_children(&obj); +} + +/* Free an object, its next siblings and their children without unlinking from parent. + */ +void +hwloc_free_object_siblings_and_children(hwloc_obj_t obj) +{ + while (obj) + unlink_and_free_object_and_children(&obj); +} + +/* insert the (non-empty) list of sibling starting at firstnew as new children of newparent, + * and return the address of the pointer to the next one + */ +static hwloc_obj_t * +insert_siblings_list(hwloc_obj_t *firstp, hwloc_obj_t firstnew, hwloc_obj_t newparent) +{ + hwloc_obj_t tmp; + assert(firstnew); + *firstp = tmp = firstnew; + tmp->parent = newparent; + while (tmp->next_sibling) { + tmp = tmp->next_sibling; + tmp->parent = newparent; + } + return &tmp->next_sibling; +} + +/* Take the new list starting at firstnew and prepend it to the old list starting at *firstp, + * and mark the new children as children of newparent. + * May be used during early or late discovery (updates prev_sibling and sibling_rank). + * List firstnew must be non-NULL. + */ +static void +prepend_siblings_list(hwloc_obj_t *firstp, hwloc_obj_t firstnew, hwloc_obj_t newparent) +{ + hwloc_obj_t *tmpp, tmp, last; + unsigned length; + + /* update parent pointers and find the length and end of the new list */ + for(length = 0, tmpp = &firstnew, last = NULL ; *tmpp; length++, last = *tmpp, tmpp = &((*tmpp)->next_sibling)) + (*tmpp)->parent = newparent; + + /* update sibling_rank */ + for(tmp = *firstp; tmp; tmp = tmp->next_sibling) + tmp->sibling_rank += length; /* if it wasn't initialized yet, it'll be overwritten later */ + + /* place the existing list at the end of the new one */ + *tmpp = *firstp; + if (*firstp) + (*firstp)->prev_sibling = last; + + /* use the beginning of the new list now */ + *firstp = firstnew; +} + +/* Take the new list starting at firstnew and append it to the old list starting at *firstp, + * and mark the new children as children of newparent. + * May be used during early or late discovery (updates prev_sibling and sibling_rank). + */ +static void +append_siblings_list(hwloc_obj_t *firstp, hwloc_obj_t firstnew, hwloc_obj_t newparent) +{ + hwloc_obj_t *tmpp, tmp, last; + unsigned length; + + /* find the length and end of the existing list */ + for(length = 0, tmpp = firstp, last = NULL ; *tmpp; length++, last = *tmpp, tmpp = &((*tmpp)->next_sibling)); + + /* update parent pointers and sibling_rank */ + for(tmp = firstnew; tmp; tmp = tmp->next_sibling) { + tmp->parent = newparent; + tmp->sibling_rank += length; /* if it wasn't set yet, it'll be overwritten later */ + } + + /* place new list at the end of the old one */ + *tmpp = firstnew; + if (firstnew) + firstnew->prev_sibling = last; +} + +/* Remove an object from its parent and free it. + * Only updates next_sibling/first_child pointers, + * so may only be used during early discovery. + * + * Children are inserted in the parent. + * If children should be inserted somewhere else (e.g. when merging with a child), + * the caller should move them before calling this function. + */ +static void +unlink_and_free_single_object(hwloc_obj_t *pparent) +{ + hwloc_obj_t old = *pparent; + hwloc_obj_t *lastp; + + if (old->type == HWLOC_OBJ_MISC) { + /* Misc object */ + + /* no normal children */ + assert(!old->first_child); + /* no memory children */ + assert(!old->memory_first_child); + /* no I/O children */ + assert(!old->io_first_child); + + if (old->misc_first_child) + /* insert old misc object children as new siblings below parent instead of old */ + lastp = insert_siblings_list(pparent, old->misc_first_child, old->parent); + else + lastp = pparent; + /* append old siblings back */ + *lastp = old->next_sibling; + + } else if (hwloc__obj_type_is_io(old->type)) { + /* I/O object */ + + /* no normal children */ + assert(!old->first_child); + /* no memory children */ + assert(!old->memory_first_child); + + if (old->io_first_child) + /* insert old I/O object children as new siblings below parent instead of old */ + lastp = insert_siblings_list(pparent, old->io_first_child, old->parent); + else + lastp = pparent; + /* append old siblings back */ + *lastp = old->next_sibling; + + /* append old Misc children to parent */ + if (old->misc_first_child) + append_siblings_list(&old->parent->misc_first_child, old->misc_first_child, old->parent); + + } else if (hwloc__obj_type_is_memory(old->type)) { + /* memory object */ + + /* no normal children */ + assert(!old->first_child); + /* no I/O children */ + assert(!old->io_first_child); + + if (old->memory_first_child) + /* insert old memory object children as new siblings below parent instead of old */ + lastp = insert_siblings_list(pparent, old->memory_first_child, old->parent); + else + lastp = pparent; + /* append old siblings back */ + *lastp = old->next_sibling; + + /* append old Misc children to parent */ + if (old->misc_first_child) + append_siblings_list(&old->parent->misc_first_child, old->misc_first_child, old->parent); + + } else { + /* Normal object */ + + if (old->first_child) + /* insert old object children as new siblings below parent instead of old */ + lastp = insert_siblings_list(pparent, old->first_child, old->parent); + else + lastp = pparent; + /* append old siblings back */ + *lastp = old->next_sibling; + + /* append old memory, I/O and Misc children to parent + * old->parent cannot be NULL (removing root), misc children should have been moved by the caller earlier. + */ + if (old->memory_first_child) + append_siblings_list(&old->parent->memory_first_child, old->memory_first_child, old->parent); + if (old->io_first_child) + append_siblings_list(&old->parent->io_first_child, old->io_first_child, old->parent); + if (old->misc_first_child) + append_siblings_list(&old->parent->misc_first_child, old->misc_first_child, old->parent); + } + + hwloc_free_unlinked_object(old); +} + +/* This function may use a tma, it cannot free() or realloc() */ +static int +hwloc__duplicate_object(struct hwloc_topology *newtopology, + struct hwloc_obj *newparent, + struct hwloc_obj *newobj, + struct hwloc_obj *src) +{ + struct hwloc_tma *tma = newtopology->tma; + hwloc_obj_t *level; + unsigned level_width; + size_t len; + unsigned i; + hwloc_obj_t child, prev; + int err = 0; + + /* either we're duplicating to an already allocated new root, which has no newparent, + * or we're duplicating to a non-yet allocated new non-root, which will have a newparent. + */ + assert(!newparent == !!newobj); + + if (!newobj) { + newobj = hwloc_alloc_setup_object(newtopology, src->type, src->os_index); + if (!newobj) + return -1; + } + + /* duplicate all non-object-pointer fields */ + newobj->logical_index = src->logical_index; + newobj->depth = src->depth; + newobj->sibling_rank = src->sibling_rank; + + newobj->type = src->type; + newobj->os_index = src->os_index; + newobj->gp_index = src->gp_index; + newobj->symmetric_subtree = src->symmetric_subtree; + + if (src->name) + newobj->name = hwloc_tma_strdup(tma, src->name); + if (src->subtype) + newobj->subtype = hwloc_tma_strdup(tma, src->subtype); + newobj->userdata = src->userdata; + + newobj->total_memory = src->total_memory; + + memcpy(newobj->attr, src->attr, sizeof(*newobj->attr)); + + if (src->type == HWLOC_OBJ_NUMANODE && src->attr->numanode.page_types_len) { + len = src->attr->numanode.page_types_len * sizeof(struct hwloc_memory_page_type_s); + newobj->attr->numanode.page_types = hwloc_tma_malloc(tma, len); + memcpy(newobj->attr->numanode.page_types, src->attr->numanode.page_types, len); + } + + newobj->cpuset = hwloc_bitmap_tma_dup(tma, src->cpuset); + newobj->complete_cpuset = hwloc_bitmap_tma_dup(tma, src->complete_cpuset); + newobj->nodeset = hwloc_bitmap_tma_dup(tma, src->nodeset); + newobj->complete_nodeset = hwloc_bitmap_tma_dup(tma, src->complete_nodeset); + + hwloc__tma_dup_infos(tma, newobj, src); + + /* find our level */ + if (src->depth < 0) { + i = HWLOC_SLEVEL_FROM_DEPTH(src->depth); + level = newtopology->slevels[i].objs; + level_width = newtopology->slevels[i].nbobjs; + /* deal with first/last pointers of special levels, even if not really needed */ + if (!newobj->logical_index) + newtopology->slevels[i].first = newobj; + if (newobj->logical_index == newtopology->slevels[i].nbobjs - 1) + newtopology->slevels[i].last = newobj; + } else { + level = newtopology->levels[src->depth]; + level_width = newtopology->level_nbobjects[src->depth]; + } + /* place us for real */ + assert(newobj->logical_index < level_width); + level[newobj->logical_index] = newobj; + /* link to already-inserted cousins + * (hwloc_pci_belowroot_apply_locality() can cause out-of-order logical indexes) + */ + if (newobj->logical_index > 0 && level[newobj->logical_index-1]) { + newobj->prev_cousin = level[newobj->logical_index-1]; + level[newobj->logical_index-1]->next_cousin = newobj; + } + if (newobj->logical_index < level_width-1 && level[newobj->logical_index+1]) { + newobj->next_cousin = level[newobj->logical_index+1]; + level[newobj->logical_index+1]->prev_cousin = newobj; + } + + /* prepare for children */ + if (src->arity) { + newobj->children = hwloc_tma_malloc(tma, src->arity * sizeof(*newobj->children)); + if (!newobj->children) + return -1; + } + newobj->arity = src->arity; + newobj->memory_arity = src->memory_arity; + newobj->io_arity = src->io_arity; + newobj->misc_arity = src->misc_arity; + + /* actually insert children now */ + for_each_child(child, src) { + err = hwloc__duplicate_object(newtopology, newobj, NULL, child); + if (err < 0) + goto out_with_children; + } + for_each_memory_child(child, src) { + err = hwloc__duplicate_object(newtopology, newobj, NULL, child); + if (err < 0) + return err; + } + for_each_io_child(child, src) { + err = hwloc__duplicate_object(newtopology, newobj, NULL, child); + if (err < 0) + goto out_with_children; + } + for_each_misc_child(child, src) { + err = hwloc__duplicate_object(newtopology, newobj, NULL, child); + if (err < 0) + goto out_with_children; + } + + out_with_children: + + /* link children if all of them where inserted */ + if (!err) { + /* only next_sibling is set by insert_by_parent(). + * sibling_rank was set above. + */ + if (newobj->arity) { + newobj->children[0]->prev_sibling = NULL; + for(i=1; iarity; i++) + newobj->children[i]->prev_sibling = newobj->children[i-1]; + newobj->last_child = newobj->children[newobj->arity-1]; + } + if (newobj->memory_arity) { + child = newobj->memory_first_child; + prev = NULL; + while (child) { + child->prev_sibling = prev; + prev = child; + child = child->next_sibling; + } + } + if (newobj->io_arity) { + child = newobj->io_first_child; + prev = NULL; + while (child) { + child->prev_sibling = prev; + prev = child; + child = child->next_sibling; + } + } + if (newobj->misc_arity) { + child = newobj->misc_first_child; + prev = NULL; + while (child) { + child->prev_sibling = prev; + prev = child; + child = child->next_sibling; + } + } + } + + /* some children insertion may have failed, but some children may have been inserted below us already. + * keep inserting ourself and let the caller clean the entire tree if we return an error. + */ + + if (newparent) { + /* no need to check the children insert order here, the source topology + * is supposed to be OK already, and we have debug asserts. + */ + hwloc_insert_object_by_parent(newtopology, newparent, newobj); + + /* place us inside our parent children array */ + if (hwloc__obj_type_is_normal(newobj->type)) + newparent->children[newobj->sibling_rank] = newobj; + } + + return err; +} + +static int +hwloc__topology_init (struct hwloc_topology **topologyp, unsigned nblevels, struct hwloc_tma *tma); + +/* This function may use a tma, it cannot free() or realloc() */ +int +hwloc__topology_dup(hwloc_topology_t *newp, + hwloc_topology_t old, + struct hwloc_tma *tma) +{ + hwloc_topology_t new; + hwloc_obj_t newroot; + hwloc_obj_t oldroot = hwloc_get_root_obj(old); + unsigned i; + int err; + + if (!old->is_loaded) { + errno = EINVAL; + return -1; + } + + err = hwloc__topology_init(&new, old->nb_levels_allocated, tma); + if (err < 0) + goto out; + + new->flags = old->flags; + memcpy(new->type_filter, old->type_filter, sizeof(old->type_filter)); + new->is_thissystem = old->is_thissystem; + new->is_loaded = 1; + new->pid = old->pid; + new->next_gp_index = old->next_gp_index; + + memcpy(&new->binding_hooks, &old->binding_hooks, sizeof(old->binding_hooks)); + + memcpy(new->support.discovery, old->support.discovery, sizeof(*old->support.discovery)); + memcpy(new->support.cpubind, old->support.cpubind, sizeof(*old->support.cpubind)); + memcpy(new->support.membind, old->support.membind, sizeof(*old->support.membind)); + + new->allowed_cpuset = hwloc_bitmap_tma_dup(tma, old->allowed_cpuset); + new->allowed_nodeset = hwloc_bitmap_tma_dup(tma, old->allowed_nodeset); + + new->userdata_export_cb = old->userdata_export_cb; + new->userdata_import_cb = old->userdata_import_cb; + new->userdata_not_decoded = old->userdata_not_decoded; + + assert(!old->machine_memory.local_memory); + assert(!old->machine_memory.page_types_len); + assert(!old->machine_memory.page_types); + + for(i = HWLOC_OBJ_TYPE_MIN; i < HWLOC_OBJ_TYPE_MAX; i++) + new->type_depth[i] = old->type_depth[i]; + + /* duplicate levels and we'll place objects there when duplicating objects */ + new->nb_levels = old->nb_levels; + assert(new->nb_levels_allocated >= new->nb_levels); + for(i=1 /* root level already allocated */ ; inb_levels; i++) { + new->level_nbobjects[i] = old->level_nbobjects[i]; + new->levels[i] = hwloc_tma_calloc(tma, new->level_nbobjects[i] * sizeof(*new->levels[i])); + } + for(i=0; islevels[i].nbobjs = old->slevels[i].nbobjs; + if (new->slevels[i].nbobjs) + new->slevels[i].objs = hwloc_tma_calloc(tma, new->slevels[i].nbobjs * sizeof(*new->slevels[i].objs)); + } + + /* recursively duplicate object children */ + newroot = hwloc_get_root_obj(new); + err = hwloc__duplicate_object(new, NULL, newroot, oldroot); + if (err < 0) + goto out_with_topology; + + err = hwloc_internal_distances_dup(new, old); + if (err < 0) + goto out_with_topology; + + /* we connected everything during duplication */ + new->modified = 0; + + /* no need to duplicate backends, topology is already loaded */ + new->backends = NULL; + new->get_pci_busid_cpuset_backend = NULL; + +#ifndef HWLOC_DEBUG + if (getenv("HWLOC_DEBUG_CHECK")) +#endif + hwloc_topology_check(new); + + *newp = new; + return 0; + + out_with_topology: + assert(!tma || !tma->dontfree); /* this tma cannot fail to allocate */ + hwloc_topology_destroy(new); + out: + return -1; +} + +int +hwloc_topology_dup(hwloc_topology_t *newp, + hwloc_topology_t old) +{ + return hwloc__topology_dup(newp, old, NULL); +} + +/* WARNING: The indexes of this array MUST match the ordering that of + the obj_order_type[] array, below. Specifically, the values must + be laid out such that: + + obj_order_type[obj_type_order[N]] = N + + for all HWLOC_OBJ_* values of N. Put differently: + + obj_type_order[A] = B + + where the A values are in order of the hwloc_obj_type_t enum, and + the B values are the corresponding indexes of obj_order_type. + + We can't use C99 syntax to initialize this in a little safer manner + -- bummer. :-( + + Correctness is asserted in hwloc_topology_init() when debug is enabled. + */ +/***** Make sure you update obj_type_priority[] below as well. *****/ +static const unsigned obj_type_order[] = { + /* first entry is HWLOC_OBJ_MACHINE */ 0, + /* next entry is HWLOC_OBJ_PACKAGE */ 3, + /* next entry is HWLOC_OBJ_CORE */ 12, + /* next entry is HWLOC_OBJ_PU */ 16, + /* next entry is HWLOC_OBJ_L1CACHE */ 10, + /* next entry is HWLOC_OBJ_L2CACHE */ 8, + /* next entry is HWLOC_OBJ_L3CACHE */ 6, + /* next entry is HWLOC_OBJ_L4CACHE */ 5, + /* next entry is HWLOC_OBJ_L5CACHE */ 4, + /* next entry is HWLOC_OBJ_L1ICACHE */ 11, + /* next entry is HWLOC_OBJ_L2ICACHE */ 9, + /* next entry is HWLOC_OBJ_L3ICACHE */ 7, + /* next entry is HWLOC_OBJ_GROUP */ 1, + /* next entry is HWLOC_OBJ_NUMANODE */ 2, + /* next entry is HWLOC_OBJ_BRIDGE */ 13, + /* next entry is HWLOC_OBJ_PCI_DEVICE */ 14, + /* next entry is HWLOC_OBJ_OS_DEVICE */ 15, + /* next entry is HWLOC_OBJ_MISC */ 17 +}; + +#ifndef NDEBUG /* only used in debug check assert if !NDEBUG */ +static const hwloc_obj_type_t obj_order_type[] = { + HWLOC_OBJ_MACHINE, + HWLOC_OBJ_GROUP, + HWLOC_OBJ_NUMANODE, + HWLOC_OBJ_PACKAGE, + HWLOC_OBJ_L5CACHE, + HWLOC_OBJ_L4CACHE, + HWLOC_OBJ_L3CACHE, + HWLOC_OBJ_L3ICACHE, + HWLOC_OBJ_L2CACHE, + HWLOC_OBJ_L2ICACHE, + HWLOC_OBJ_L1CACHE, + HWLOC_OBJ_L1ICACHE, + HWLOC_OBJ_CORE, + HWLOC_OBJ_BRIDGE, + HWLOC_OBJ_PCI_DEVICE, + HWLOC_OBJ_OS_DEVICE, + HWLOC_OBJ_PU, + HWLOC_OBJ_MISC /* Misc is always a leaf */ +}; +#endif +/***** Make sure you update obj_type_priority[] below as well. *****/ + +/* priority to be used when merging identical parent/children object + * (in merge_useless_child), keep the highest priority one. + * + * Always keep Machine/NUMANode/PU/PCIDev/OSDev + * then Core + * then Package + * then Cache, + * then Instruction Caches + * then always drop Group/Misc/Bridge. + * + * Some type won't actually ever be involved in such merging. + */ +/***** Make sure you update this array when changing the list of types. *****/ +static const int obj_type_priority[] = { + /* first entry is HWLOC_OBJ_MACHINE */ 90, + /* next entry is HWLOC_OBJ_PACKAGE */ 40, + /* next entry is HWLOC_OBJ_CORE */ 60, + /* next entry is HWLOC_OBJ_PU */ 100, + /* next entry is HWLOC_OBJ_L1CACHE */ 20, + /* next entry is HWLOC_OBJ_L2CACHE */ 20, + /* next entry is HWLOC_OBJ_L3CACHE */ 20, + /* next entry is HWLOC_OBJ_L4CACHE */ 20, + /* next entry is HWLOC_OBJ_L5CACHE */ 20, + /* next entry is HWLOC_OBJ_L1ICACHE */ 19, + /* next entry is HWLOC_OBJ_L2ICACHE */ 19, + /* next entry is HWLOC_OBJ_L3ICACHE */ 19, + /* next entry is HWLOC_OBJ_GROUP */ 0, + /* next entry is HWLOC_OBJ_NUMANODE */ 100, + /* next entry is HWLOC_OBJ_BRIDGE */ 0, + /* next entry is HWLOC_OBJ_PCI_DEVICE */ 100, + /* next entry is HWLOC_OBJ_OS_DEVICE */ 100, + /* next entry is HWLOC_OBJ_MISC */ 0 +}; + +int hwloc_compare_types (hwloc_obj_type_t type1, hwloc_obj_type_t type2) +{ + unsigned order1 = obj_type_order[type1]; + unsigned order2 = obj_type_order[type2]; + + /* only normal objects are comparable. others are only comparable with machine */ + if (!hwloc__obj_type_is_normal(type1) + && hwloc__obj_type_is_normal(type2) && type2 != HWLOC_OBJ_MACHINE) + return HWLOC_TYPE_UNORDERED; + if (!hwloc__obj_type_is_normal(type2) + && hwloc__obj_type_is_normal(type1) && type1 != HWLOC_OBJ_MACHINE) + return HWLOC_TYPE_UNORDERED; + + return order1 - order2; +} + +enum hwloc_obj_cmp_e { + HWLOC_OBJ_EQUAL = HWLOC_BITMAP_EQUAL, /**< \brief Equal */ + HWLOC_OBJ_INCLUDED = HWLOC_BITMAP_INCLUDED, /**< \brief Strictly included into */ + HWLOC_OBJ_CONTAINS = HWLOC_BITMAP_CONTAINS, /**< \brief Strictly contains */ + HWLOC_OBJ_INTERSECTS = HWLOC_BITMAP_INTERSECTS, /**< \brief Intersects, but no inclusion! */ + HWLOC_OBJ_DIFFERENT = HWLOC_BITMAP_DIFFERENT /**< \brief No intersection */ +}; + +static enum hwloc_obj_cmp_e +hwloc_type_cmp(hwloc_obj_t obj1, hwloc_obj_t obj2) +{ + hwloc_obj_type_t type1 = obj1->type; + hwloc_obj_type_t type2 = obj2->type; + int compare; + + compare = hwloc_compare_types(type1, type2); + if (compare == HWLOC_TYPE_UNORDERED) + return HWLOC_OBJ_DIFFERENT; /* we cannot do better */ + if (compare > 0) + return HWLOC_OBJ_INCLUDED; + if (compare < 0) + return HWLOC_OBJ_CONTAINS; + + if (obj1->type == HWLOC_OBJ_GROUP + && (obj1->attr->group.kind != obj2->attr->group.kind + || obj1->attr->group.subkind != obj2->attr->group.subkind)) + return HWLOC_OBJ_DIFFERENT; /* we cannot do better */ + + return HWLOC_OBJ_EQUAL; +} + +/* + * How to compare objects based on cpusets. + */ + +static int +hwloc_obj_cmp_sets(hwloc_obj_t obj1, hwloc_obj_t obj2) +{ + hwloc_bitmap_t set1, set2; + int res = HWLOC_OBJ_DIFFERENT; + + assert(!hwloc__obj_type_is_special(obj1->type)); + assert(!hwloc__obj_type_is_special(obj2->type)); + + /* compare cpusets first */ + if (obj1->complete_cpuset && obj2->complete_cpuset) { + set1 = obj1->complete_cpuset; + set2 = obj2->complete_cpuset; + } else { + set1 = obj1->cpuset; + set2 = obj2->cpuset; + } + if (set1 && set2 && !hwloc_bitmap_iszero(set1) && !hwloc_bitmap_iszero(set2)) { + res = hwloc_bitmap_compare_inclusion(set1, set2); + if (res == HWLOC_OBJ_INTERSECTS) + return HWLOC_OBJ_INTERSECTS; + } + + /* then compare nodesets, and combine the results */ + if (obj1->complete_nodeset && obj2->complete_nodeset) { + set1 = obj1->complete_nodeset; + set2 = obj2->complete_nodeset; + } else { + set1 = obj1->nodeset; + set2 = obj2->nodeset; + } + if (set1 && set2 && !hwloc_bitmap_iszero(set1) && !hwloc_bitmap_iszero(set2)) { + int noderes = hwloc_bitmap_compare_inclusion(set1, set2); + /* deal with conflicting cpusets/nodesets inclusions */ + if (noderes == HWLOC_OBJ_INCLUDED) { + if (res == HWLOC_OBJ_CONTAINS) + /* contradicting order for cpusets and nodesets */ + return HWLOC_OBJ_INTERSECTS; + res = HWLOC_OBJ_INCLUDED; + + } else if (noderes == HWLOC_OBJ_CONTAINS) { + if (res == HWLOC_OBJ_INCLUDED) + /* contradicting order for cpusets and nodesets */ + return HWLOC_OBJ_INTERSECTS; + res = HWLOC_OBJ_CONTAINS; + + } else if (noderes == HWLOC_OBJ_INTERSECTS) { + return HWLOC_OBJ_INTERSECTS; + + } else { + /* nodesets are different, keep the cpuset order */ + + } + } + + return res; +} + +/* Compare object cpusets based on complete_cpuset if defined (always correctly ordered), + * or fallback to the main cpusets (only correctly ordered during early insert before disallowed bits are cleared). + * + * This is the sane way to compare object among a horizontal level. + */ +int +hwloc__object_cpusets_compare_first(hwloc_obj_t obj1, hwloc_obj_t obj2) +{ + if (obj1->complete_cpuset && obj2->complete_cpuset) + return hwloc_bitmap_compare_first(obj1->complete_cpuset, obj2->complete_cpuset); + else if (obj1->cpuset && obj2->cpuset) + return hwloc_bitmap_compare_first(obj1->cpuset, obj2->cpuset); + else if (obj1->complete_nodeset && obj2->complete_nodeset) + return hwloc_bitmap_compare_first(obj1->complete_nodeset, obj2->complete_nodeset); + else if (obj1->nodeset && obj2->nodeset) + return hwloc_bitmap_compare_first(obj1->nodeset, obj2->nodeset); + return 0; +} + +/* format the obj info to print in error messages */ +static void +hwloc__report_error_format_obj(char *buf, size_t buflen, hwloc_obj_t obj) +{ + char typestr[64]; + char *cpusetstr; + char *nodesetstr = NULL; + hwloc_obj_type_snprintf(typestr, sizeof(typestr), obj, 0); + hwloc_bitmap_asprintf(&cpusetstr, obj->cpuset); + if (obj->nodeset) /* may be missing during insert */ + hwloc_bitmap_asprintf(&nodesetstr, obj->nodeset); + if (obj->os_index != HWLOC_UNKNOWN_INDEX) + snprintf(buf, buflen, "%s (P#%u cpuset %s%s%s)", + typestr, obj->os_index, cpusetstr, + nodesetstr ? " nodeset " : "", + nodesetstr ? nodesetstr : ""); + else + snprintf(buf, buflen, "%s (cpuset %s%s%s)", + typestr, cpusetstr, + nodesetstr ? " nodeset " : "", + nodesetstr ? nodesetstr : ""); + free(cpusetstr); + free(nodesetstr); +} + +/* + * How to insert objects into the topology. + * + * Note: during detection, only the first_child and next_sibling pointers are + * kept up to date. Others are computed only once topology detection is + * complete. + */ + +/* merge new object attributes in old. + * use old if defined, otherwise use new. + */ +static void +merge_insert_equal(hwloc_obj_t new, hwloc_obj_t old) +{ + if (old->os_index == HWLOC_UNKNOWN_INDEX) + old->os_index = new->os_index; + + if (new->infos_count) { + /* FIXME: dedup */ + hwloc__move_infos(&old->infos, &old->infos_count, + &new->infos, &new->infos_count); + } + + if (new->name && !old->name) { + old->name = new->name; + new->name = NULL; + } + if (new->subtype && !old->subtype) { + old->subtype = new->subtype; + new->subtype = NULL; + } + + /* Ignore userdata. It will be NULL before load(). + * It may be non-NULL if alloc+insert_group() after load(). + */ + + switch(new->type) { + case HWLOC_OBJ_NUMANODE: + if (new->attr->numanode.local_memory && !old->attr->numanode.local_memory) { + /* no memory in old, use new memory */ + old->attr->numanode.local_memory = new->attr->numanode.local_memory; + free(old->attr->numanode.page_types); + old->attr->numanode.page_types_len = new->attr->numanode.page_types_len; + old->attr->numanode.page_types = new->attr->numanode.page_types; + new->attr->numanode.page_types = NULL; + new->attr->numanode.page_types_len = 0; + } + /* old->attr->numanode.total_memory will be updated by propagate_total_memory() */ + break; + case HWLOC_OBJ_L1CACHE: + case HWLOC_OBJ_L2CACHE: + case HWLOC_OBJ_L3CACHE: + case HWLOC_OBJ_L4CACHE: + case HWLOC_OBJ_L5CACHE: + case HWLOC_OBJ_L1ICACHE: + case HWLOC_OBJ_L2ICACHE: + case HWLOC_OBJ_L3ICACHE: + if (!old->attr->cache.size) + old->attr->cache.size = new->attr->cache.size; + if (!old->attr->cache.linesize) + old->attr->cache.size = new->attr->cache.linesize; + if (!old->attr->cache.associativity) + old->attr->cache.size = new->attr->cache.linesize; + break; + default: + break; + } +} + +/* returns the result of merge, or NULL if not merged */ +static __hwloc_inline hwloc_obj_t +hwloc__insert_try_merge_group(hwloc_obj_t old, hwloc_obj_t new) +{ + if (new->type == HWLOC_OBJ_GROUP && old->type == HWLOC_OBJ_GROUP) { + /* which group do we keep? */ + if (new->attr->group.dont_merge) { + if (old->attr->group.dont_merge) + /* nobody wants to be merged */ + return NULL; + + /* keep the new one, it doesn't want to be merged */ + hwloc_replace_linked_object(old, new); + return new; + + } else { + if (old->attr->group.dont_merge) + /* keep the old one, it doesn't want to be merged */ + return old; + + /* compare subkinds to decice who to keep */ + if (new->attr->group.kind < old->attr->group.kind) + hwloc_replace_linked_object(old, new); + return old; + } + } + + if (new->type == HWLOC_OBJ_GROUP && !new->attr->group.dont_merge) { + + if (old->type == HWLOC_OBJ_PU && new->attr->group.kind == HWLOC_GROUP_KIND_MEMORY) + /* Never merge Memory groups with PU, we don't want to attach Memory under PU */ + return NULL; + + /* Remove the Group now. The normal ignore code path wouldn't tell us whether the Group was removed or not, + * while some callers need to know (at least hwloc_topology_insert_group()). + */ + return old; + + } else if (old->type == HWLOC_OBJ_GROUP && !old->attr->group.dont_merge) { + + if (new->type == HWLOC_OBJ_PU && old->attr->group.kind == HWLOC_GROUP_KIND_MEMORY) + /* Never merge Memory groups with PU, we don't want to attach Memory under PU */ + return NULL; + + /* Replace the Group with the new object contents + * and let the caller free the new object + */ + hwloc_replace_linked_object(old, new); + return old; + + } else { + /* cannot merge */ + return NULL; + } +} + +/* Try to insert OBJ in CUR, recurse if needed. + * Returns the object if it was inserted, + * the remaining object it was merged, + * NULL if failed to insert. + */ +static struct hwloc_obj * +hwloc___insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t cur, hwloc_obj_t obj, + hwloc_report_error_t report_error) +{ + hwloc_obj_t child, next_child = NULL; + /* These will always point to the pointer to their next last child. */ + hwloc_obj_t *cur_children = &cur->first_child; + hwloc_obj_t *obj_children = &obj->first_child; + /* Pointer where OBJ should be put */ + hwloc_obj_t *putp = NULL; /* OBJ position isn't found yet */ + + assert(!hwloc__obj_type_is_memory(obj->type)); + + /* Iteration with prefetching to be completely safe against CHILD removal. + * The list is already sorted by cpuset, and there's no intersection between siblings. + */ + for (child = cur->first_child, child ? next_child = child->next_sibling : NULL; + child; + child = next_child, child ? next_child = child->next_sibling : NULL) { + + int res = hwloc_obj_cmp_sets(obj, child); + int setres = res; + + if (res == HWLOC_OBJ_EQUAL) { + hwloc_obj_t merged = hwloc__insert_try_merge_group(child, obj); + if (merged) + return merged; + /* otherwise compare actual types to decide of the inclusion */ + res = hwloc_type_cmp(obj, child); + } + + switch (res) { + case HWLOC_OBJ_EQUAL: + /* Two objects with same type. + * Groups are handled above. + */ + merge_insert_equal(obj, child); + /* Already present, no need to insert. */ + return child; + + case HWLOC_OBJ_INCLUDED: + /* OBJ is strictly contained is some child of CUR, go deeper. */ + return hwloc___insert_object_by_cpuset(topology, child, obj, report_error); + + case HWLOC_OBJ_INTERSECTS: + if (report_error) { + char childstr[512]; + char objstr[512]; + char msg[1100]; + hwloc__report_error_format_obj(objstr, sizeof(objstr), obj); + hwloc__report_error_format_obj(childstr, sizeof(childstr), child); + snprintf(msg, sizeof(msg), "%s intersects with %s without inclusion!", objstr, childstr); + report_error(msg, __LINE__); + } + goto putback; + + case HWLOC_OBJ_DIFFERENT: + /* OBJ should be a child of CUR before CHILD, mark its position if not found yet. */ + if (!putp && hwloc__object_cpusets_compare_first(obj, child) < 0) + /* Don't insert yet, there could be intersect errors later */ + putp = cur_children; + /* Advance cur_children. */ + cur_children = &child->next_sibling; + break; + + case HWLOC_OBJ_CONTAINS: + /* OBJ contains CHILD, remove CHILD from CUR */ + *cur_children = child->next_sibling; + child->next_sibling = NULL; + /* Put CHILD in OBJ */ + *obj_children = child; + obj_children = &child->next_sibling; + child->parent = obj; + if (setres == HWLOC_OBJ_EQUAL) { + obj->memory_first_child = child->memory_first_child; + child->memory_first_child = NULL; + } + break; + } + } + /* cur/obj_children points to last CUR/OBJ child next_sibling pointer, which must be NULL. */ + assert(!*obj_children); + assert(!*cur_children); + + /* Put OBJ where it belongs, or in last in CUR's children. */ + if (!putp) + putp = cur_children; + obj->next_sibling = *putp; + *putp = obj; + obj->parent = cur; + + topology->modified = 1; + return obj; + + putback: + /* Put-back OBJ children in CUR and return an error. */ + if (putp) + cur_children = putp; /* No need to try to insert before where OBJ was supposed to go */ + else + cur_children = &cur->first_child; /* Start from the beginning */ + /* We can insert in order, but there can be holes in the middle. */ + while ((child = obj->first_child) != NULL) { + /* Remove from OBJ */ + obj->first_child = child->next_sibling; + obj->parent = cur; + /* Find child position in CUR, and insert. */ + while (*cur_children && hwloc__object_cpusets_compare_first(*cur_children, child) < 0) + cur_children = &(*cur_children)->next_sibling; + child->next_sibling = *cur_children; + *cur_children = child; + } + return NULL; +} + +/* this differs from hwloc_get_obj_covering_cpuset() by: + * - not looking at the parent cpuset first, which means we can insert + * below root even if root PU bits are not set yet (PU are inserted later). + * - returning the first child that exactly matches instead of walking down in case + * of identical children. + */ +static struct hwloc_obj * +hwloc__find_obj_covering_memory_cpuset(struct hwloc_topology *topology, hwloc_obj_t parent, hwloc_bitmap_t cpuset) +{ + hwloc_obj_t child = hwloc_get_child_covering_cpuset(topology, cpuset, parent); + if (!child) + return parent; + if (child && hwloc_bitmap_isequal(child->cpuset, cpuset)) + return child; + return hwloc__find_obj_covering_memory_cpuset(topology, child, cpuset); +} + +static struct hwloc_obj * +hwloc__find_insert_memory_parent(struct hwloc_topology *topology, hwloc_obj_t obj, + hwloc_report_error_t report_error) +{ + hwloc_obj_t parent, group, result; + + if (hwloc_bitmap_iszero(obj->cpuset)) { + /* CPU-less go in dedicated group below root */ + parent = topology->levels[0][0]; + + } else { + /* find the highest obj covering the cpuset */ + parent = hwloc__find_obj_covering_memory_cpuset(topology, topology->levels[0][0], obj->cpuset); + if (!parent) { + /* fallback to root */ + parent = hwloc_get_root_obj(topology); + } + + if (parent->type == HWLOC_OBJ_PU) { + /* Never attach to PU, try parent */ + parent = parent->parent; + assert(parent); + } + + /* TODO: if root->cpuset was updated earlier, we would be sure whether the group will remain identical to root */ + if (parent != topology->levels[0][0] && hwloc_bitmap_isequal(parent->cpuset, obj->cpuset)) + /* that parent is fine */ + return parent; + } + + if (!hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_GROUP)) + /* even if parent isn't perfect, we don't want an intermediate group */ + return parent; + + /* need to insert an intermediate group for attaching the NUMA node */ + group = hwloc_alloc_setup_object(topology, HWLOC_OBJ_GROUP, HWLOC_UNKNOWN_INDEX); + if (!group) + /* failed to create the group, fallback to larger parent */ + return parent; + + group->attr->group.kind = HWLOC_GROUP_KIND_MEMORY; + group->cpuset = hwloc_bitmap_dup(obj->cpuset); + group->complete_cpuset = hwloc_bitmap_dup(obj->complete_cpuset); + /* we could duplicate nodesets too but hwloc__insert_object_by_cpuset() + * doesn't actually need it. and it could prevent future calls from reusing + * that groups for other NUMA nodes. + */ + if (!group->cpuset != !obj->cpuset + || !group->complete_cpuset != !obj->complete_cpuset) { + /* failed to create the group, fallback to larger parent */ + hwloc_free_unlinked_object(group); + return parent; + } + + result = hwloc__insert_object_by_cpuset(topology, parent, group, report_error); + if (!result) { + /* failed to insert, fallback to larger parent */ + return parent; + } + + assert(result == group); + return group; +} + +/*attach the given memory object below the given normal parent. */ +struct hwloc_obj * +hwloc__attach_memory_object(struct hwloc_topology *topology, hwloc_obj_t parent, + hwloc_obj_t obj, + hwloc_report_error_t report_error __hwloc_attribute_unused) +{ + hwloc_obj_t *cur_children; + + assert(parent); + assert(hwloc__obj_type_is_normal(parent->type)); + +#if 0 + /* TODO: enable this instead of hack in fixup_sets once NUMA nodes are inserted late */ + /* copy the parent cpuset in case it's larger than expected. + * we could also keep the cpuset smaller than the parent and say that a normal-parent + * can have multiple memory children with smaller cpusets. + * However, the user decided the ignore Groups, so hierarchy/locality loss is expected. + */ + hwloc_bitmap_copy(obj->cpuset, parent->cpuset); +#endif + + /* only NUMA nodes are memory for now, just append to the end of the list */ + assert(obj->type == HWLOC_OBJ_NUMANODE); + assert(obj->nodeset); + cur_children = &parent->memory_first_child; + while (*cur_children) { + /* TODO check that things are inserted in order. + * it's OK for KNL, the only user so far + */ + cur_children = &(*cur_children)->next_sibling; + } + *cur_children = obj; + obj->next_sibling = NULL; + + /* Initialize the complete nodeset if needed */ + if (!obj->complete_nodeset) { + obj->complete_nodeset = hwloc_bitmap_dup(obj->nodeset); + } + + /* Add the bit to the top sets, and to the parent CPU-side object */ + if (obj->type == HWLOC_OBJ_NUMANODE) { + if (hwloc_bitmap_isset(obj->nodeset, obj->os_index)) + hwloc_bitmap_set(topology->levels[0][0]->nodeset, obj->os_index); + hwloc_bitmap_set(topology->levels[0][0]->complete_nodeset, obj->os_index); + } + + topology->modified = 1; + return obj; +} + +/* insertion routine that lets you change the error reporting callback */ +struct hwloc_obj * +hwloc__insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t root, + hwloc_obj_t obj, + hwloc_report_error_t report_error) +{ + struct hwloc_obj *result; + +#ifdef HWLOC_DEBUG + assert(!hwloc__obj_type_is_special(obj->type)); + + /* we need at least one non-NULL set (normal or complete, cpuset or nodeset) */ + assert(obj->cpuset || obj->complete_cpuset || obj->nodeset || obj->complete_nodeset); + /* we support the case where all of them are empty. + * it may happen when hwloc__find_insert_memory_parent() + * inserts a Group for a CPU-less NUMA-node. + */ +#endif + + if (hwloc__obj_type_is_memory(obj->type)) { + if (!root) { + root = hwloc__find_insert_memory_parent(topology, obj, report_error); + if (!root) { + hwloc_free_unlinked_object(obj); + return NULL; + } + } + return hwloc__attach_memory_object(topology, root, obj, report_error); + } + + if (!root) + /* Start at the top. */ + root = topology->levels[0][0]; + + result = hwloc___insert_object_by_cpuset(topology, root, obj, report_error); + if (result && result->type == HWLOC_OBJ_PU) { + /* Add the bit to the top sets */ + if (hwloc_bitmap_isset(result->cpuset, result->os_index)) + hwloc_bitmap_set(topology->levels[0][0]->cpuset, result->os_index); + hwloc_bitmap_set(topology->levels[0][0]->complete_cpuset, result->os_index); + } + if (result != obj) { + /* either failed to insert, or got merged, free the original object */ + hwloc_free_unlinked_object(obj); + } + return result; +} + +/* the default insertion routine warns in case of error. + * it's used by most backends */ +struct hwloc_obj * +hwloc_insert_object_by_cpuset(struct hwloc_topology *topology, hwloc_obj_t obj) +{ + return hwloc__insert_object_by_cpuset(topology, NULL, obj, hwloc_report_os_error); +} + +void +hwloc_insert_object_by_parent(struct hwloc_topology *topology, hwloc_obj_t parent, hwloc_obj_t obj) +{ + hwloc_obj_t *current; + + if (obj->type == HWLOC_OBJ_MISC) { + /* Append to the end of the Misc list */ + for (current = &parent->misc_first_child; *current; current = &(*current)->next_sibling); + } else if (hwloc__obj_type_is_io(obj->type)) { + /* Append to the end of the I/O list */ + for (current = &parent->io_first_child; *current; current = &(*current)->next_sibling); + } else if (hwloc__obj_type_is_memory(obj->type)) { + /* Append to the end of the memory list */ + for (current = &parent->memory_first_child; *current; current = &(*current)->next_sibling); + /* Add the bit to the top sets */ + if (obj->type == HWLOC_OBJ_NUMANODE) { + if (hwloc_bitmap_isset(obj->nodeset, obj->os_index)) + hwloc_bitmap_set(topology->levels[0][0]->nodeset, obj->os_index); + hwloc_bitmap_set(topology->levels[0][0]->complete_nodeset, obj->os_index); + } + } else { + /* Append to the end of the list. + * The caller takes care of inserting children in the right cpuset order, without intersection between them. + * Duplicating doesn't need to check the order since the source topology is supposed to be OK already. + * XML reorders if needed, and fails on intersecting siblings. + * Other callers just insert random objects such as I/O or Misc, no cpuset issue there. + */ + for (current = &parent->first_child; *current; current = &(*current)->next_sibling); + /* Add the bit to the top sets */ + if (obj->type == HWLOC_OBJ_PU) { + if (hwloc_bitmap_isset(obj->cpuset, obj->os_index)) + hwloc_bitmap_set(topology->levels[0][0]->cpuset, obj->os_index); + hwloc_bitmap_set(topology->levels[0][0]->complete_cpuset, obj->os_index); + } + } + + *current = obj; + obj->parent = parent; + obj->next_sibling = NULL; + topology->modified = 1; +} + +hwloc_obj_t +hwloc_alloc_setup_object(hwloc_topology_t topology, + hwloc_obj_type_t type, unsigned os_index) +{ + struct hwloc_obj *obj = hwloc_tma_malloc(topology->tma, sizeof(*obj)); + memset(obj, 0, sizeof(*obj)); + obj->type = type; + obj->os_index = os_index; + obj->gp_index = topology->next_gp_index++; + obj->attr = hwloc_tma_malloc(topology->tma, sizeof(*obj->attr)); + memset(obj->attr, 0, sizeof(*obj->attr)); + /* do not allocate the cpuset here, let the caller do it */ + return obj; +} + +hwloc_obj_t +hwloc_topology_alloc_group_object(struct hwloc_topology *topology) +{ + if (!topology->is_loaded) { + /* this could actually work, see insert() below */ + errno = EINVAL; + return NULL; + } + return hwloc_alloc_setup_object(topology, HWLOC_OBJ_GROUP, HWLOC_UNKNOWN_INDEX); +} + +static void hwloc_propagate_symmetric_subtree(hwloc_topology_t topology, hwloc_obj_t root); +static void propagate_total_memory(hwloc_obj_t obj); +static void hwloc_set_group_depth(hwloc_topology_t topology); + +hwloc_obj_t +hwloc_topology_insert_group_object(struct hwloc_topology *topology, hwloc_obj_t obj) +{ + hwloc_obj_t res, root; + int cmp; + + if (!topology->is_loaded) { + /* this could actually work, we would just need to disable connect_children/levels below */ + hwloc_free_unlinked_object(obj); + errno = EINVAL; + return NULL; + } + + if (topology->type_filter[HWLOC_OBJ_GROUP] == HWLOC_TYPE_FILTER_KEEP_NONE) { + hwloc_free_unlinked_object(obj); + errno = EINVAL; + return NULL; + } + + root = hwloc_get_root_obj(topology); + if (obj->cpuset) + hwloc_bitmap_and(obj->cpuset, obj->cpuset, root->cpuset); + if (obj->complete_cpuset) + hwloc_bitmap_and(obj->complete_cpuset, obj->complete_cpuset, root->complete_cpuset); + if (obj->nodeset) + hwloc_bitmap_and(obj->nodeset, obj->nodeset, root->nodeset); + if (obj->complete_nodeset) + hwloc_bitmap_and(obj->complete_nodeset, obj->complete_nodeset, root->complete_nodeset); + + if ((!obj->cpuset || hwloc_bitmap_iszero(obj->cpuset)) + && (!obj->complete_cpuset || hwloc_bitmap_iszero(obj->complete_cpuset)) + && (!obj->nodeset || hwloc_bitmap_iszero(obj->nodeset)) + && (!obj->complete_nodeset || hwloc_bitmap_iszero(obj->complete_nodeset))) { + hwloc_free_unlinked_object(obj); + errno = EINVAL; + return NULL; + } + + cmp = hwloc_obj_cmp_sets(obj, root); + if (cmp == HWLOC_OBJ_INCLUDED) { + res = hwloc__insert_object_by_cpuset(topology, NULL, obj, NULL /* do not show errors on stdout */); + } else { + /* just merge root */ + res = root; + } + + if (!res) + return NULL; + if (res != obj) + /* merged */ + return res; + + /* properly inserted */ + hwloc_obj_add_children_sets(obj); + if (hwloc_topology_reconnect(topology, 0) < 0) + return NULL; + + hwloc_propagate_symmetric_subtree(topology, topology->levels[0][0]); + hwloc_set_group_depth(topology); + +#ifndef HWLOC_DEBUG + if (getenv("HWLOC_DEBUG_CHECK")) +#endif + hwloc_topology_check(topology); + + return obj; +} + +hwloc_obj_t +hwloc_topology_insert_misc_object(struct hwloc_topology *topology, hwloc_obj_t parent, const char *name) +{ + hwloc_obj_t obj; + + if (topology->type_filter[HWLOC_OBJ_MISC] == HWLOC_TYPE_FILTER_KEEP_NONE) { + errno = EINVAL; + return NULL; + } + + if (!topology->is_loaded) { + errno = EINVAL; + return NULL; + } + + obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_MISC, HWLOC_UNKNOWN_INDEX); + if (name) + obj->name = strdup(name); + + hwloc_insert_object_by_parent(topology, parent, obj); + + /* FIXME: only connect misc parent children and misc level, + * but this API is likely not performance critical anyway + */ + hwloc_topology_reconnect(topology, 0); + +#ifndef HWLOC_DEBUG + if (getenv("HWLOC_DEBUG_CHECK")) +#endif + hwloc_topology_check(topology); + + return obj; +} + +/* assuming set is included in the topology complete_cpuset + * and all objects have a proper complete_cpuset, + * return the best one containing set. + * if some object are equivalent (same complete_cpuset), return the highest one. + */ +static hwloc_obj_t +hwloc_get_highest_obj_covering_complete_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set) +{ + hwloc_obj_t current = hwloc_get_root_obj(topology); + hwloc_obj_t child; + + if (hwloc_bitmap_isequal(set, current->complete_cpuset)) + /* root cpuset is exactly what we want, no need to look at children, we want the highest */ + return current; + + recurse: + /* find the right child */ + for_each_child(child, current) { + if (hwloc_bitmap_isequal(set, child->complete_cpuset)) + /* child puset is exactly what we want, no need to look at children, we want the highest */ + return child; + if (!hwloc_bitmap_iszero(child->complete_cpuset) && hwloc_bitmap_isincluded(set, child->complete_cpuset)) + break; + } + + if (child) { + current = child; + goto recurse; + } + + /* no better child */ + return current; +} + +hwloc_obj_t +hwloc_find_insert_io_parent_by_complete_cpuset(struct hwloc_topology *topology, hwloc_cpuset_t cpuset) +{ + hwloc_obj_t group_obj, largeparent, parent; + + /* restrict to the existing complete cpuset to avoid errors later */ + hwloc_bitmap_and(cpuset, cpuset, hwloc_topology_get_complete_cpuset(topology)); + if (hwloc_bitmap_iszero(cpuset)) + /* remaining cpuset is empty, invalid */ + return NULL; + + largeparent = hwloc_get_highest_obj_covering_complete_cpuset(topology, cpuset); + if (hwloc_bitmap_isequal(largeparent->complete_cpuset, cpuset) + || !hwloc_filter_check_keep_object_type(topology, HWLOC_OBJ_GROUP)) + /* Found a valid object (normal case) */ + return largeparent; + + /* we need to insert an intermediate group */ + group_obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_GROUP, HWLOC_UNKNOWN_INDEX); + if (!group_obj) + /* Failed to insert the exact Group, fallback to largeparent */ + return largeparent; + + group_obj->complete_cpuset = hwloc_bitmap_dup(cpuset); + hwloc_bitmap_and(cpuset, cpuset, hwloc_topology_get_topology_cpuset(topology)); + group_obj->cpuset = hwloc_bitmap_dup(cpuset); + group_obj->attr->group.kind = HWLOC_GROUP_KIND_IO; + parent = hwloc__insert_object_by_cpuset(topology, largeparent, group_obj, hwloc_report_os_error); + if (!parent) + /* Failed to insert the Group, maybe a conflicting cpuset */ + return largeparent; + + /* Group couldn't get merged or we would have gotten the right largeparent earlier */ + assert(parent == group_obj); + + /* Group inserted without being merged, everything OK, setup its sets */ + hwloc_obj_add_children_sets(group_obj); + + return parent; +} + +static int hwloc_memory_page_type_compare(const void *_a, const void *_b) +{ + const struct hwloc_memory_page_type_s *a = _a; + const struct hwloc_memory_page_type_s *b = _b; + /* consider 0 as larger so that 0-size page_type go to the end */ + if (!b->size) + return -1; + /* don't cast a-b in int since those are ullongs */ + if (b->size == a->size) + return 0; + return a->size < b->size ? -1 : 1; +} + +/* Propagate memory counts */ +static void +propagate_total_memory(hwloc_obj_t obj) +{ + hwloc_obj_t child; + unsigned i; + + /* reset total before counting local and children memory */ + obj->total_memory = 0; + + /* Propagate memory up. */ + for_each_child(child, obj) { + propagate_total_memory(child); + obj->total_memory += child->total_memory; + } + for_each_memory_child(child, obj) { + propagate_total_memory(child); + obj->total_memory += child->total_memory; + } + /* No memory under I/O or Misc */ + + if (obj->type == HWLOC_OBJ_NUMANODE) { + obj->total_memory += obj->attr->numanode.local_memory; + + /* By the way, sort the page_type array. + * Cannot do it on insert since some backends (e.g. XML) add page_types after inserting the object. + */ + qsort(obj->attr->numanode.page_types, obj->attr->numanode.page_types_len, sizeof(*obj->attr->numanode.page_types), hwloc_memory_page_type_compare); + /* Ignore 0-size page_types, they are at the end */ + for(i=obj->attr->numanode.page_types_len; i>=1; i--) + if (obj->attr->numanode.page_types[i-1].size) + break; + obj->attr->numanode.page_types_len = i; + } +} + +/* Now that root sets are ready, propagate them to children + * by allocating missing sets and restricting existing ones. + */ +static void +fixup_sets(hwloc_obj_t obj) +{ + int in_memory_list; + hwloc_obj_t child; + + child = obj->first_child; + in_memory_list = 0; + /* iterate over normal children first, we'll come back for memory children later */ + + iterate: + while (child) { + /* our cpuset must be included in our parent's one */ + hwloc_bitmap_and(child->cpuset, child->cpuset, obj->cpuset); + hwloc_bitmap_and(child->nodeset, child->nodeset, obj->nodeset); + /* our complete_cpuset must be included in our parent's one, but can be larger than our cpuset */ + if (child->complete_cpuset) { + hwloc_bitmap_and(child->complete_cpuset, child->complete_cpuset, obj->complete_cpuset); + } else { + child->complete_cpuset = hwloc_bitmap_dup(child->cpuset); + } + if (child->complete_nodeset) { + hwloc_bitmap_and(child->complete_nodeset, child->complete_nodeset, obj->complete_nodeset); + } else { + child->complete_nodeset = hwloc_bitmap_dup(child->nodeset); + } + + fixup_sets(child); + child = child->next_sibling; + } + + /* switch to memory children list if any */ + if (!in_memory_list && obj->memory_first_child) { + child = obj->memory_first_child; + in_memory_list = 1; + goto iterate; + } + + /* No sets in I/O or Misc */ +} + +/* Setup object cpusets/nodesets by OR'ing its children. */ +int +hwloc_obj_add_other_obj_sets(hwloc_obj_t dst, hwloc_obj_t src) +{ +#define ADD_OTHER_OBJ_SET(_dst, _src, _set) \ + if ((_src)->_set) { \ + if (!(_dst)->_set) \ + (_dst)->_set = hwloc_bitmap_alloc(); \ + hwloc_bitmap_or((_dst)->_set, (_dst)->_set, (_src)->_set); \ + } + ADD_OTHER_OBJ_SET(dst, src, cpuset); + ADD_OTHER_OBJ_SET(dst, src, complete_cpuset); + ADD_OTHER_OBJ_SET(dst, src, nodeset); + ADD_OTHER_OBJ_SET(dst, src, complete_nodeset); + return 0; +} + +int +hwloc_obj_add_children_sets(hwloc_obj_t obj) +{ + hwloc_obj_t child; + for_each_child(child, obj) { + hwloc_obj_add_other_obj_sets(obj, child); + } + /* No need to look at Misc children, they contain no PU. */ + return 0; +} + +/* CPU objects are inserted by cpusets, we know their cpusets are properly included. + * We just need fixup_sets() to make sure they aren't too wide. + * + * Memory objects are inserted by cpusets to find their CPU parent, + * but nodesets are only used inside the memory hierarchy below that parent. + * Thus we need to propagate nodesets to CPU-side parents and children. + * + * A memory object nodeset consists of NUMA nodes below it. + * A normal object nodeset consists in NUMA nodes attached to any + * of its children or parents. + */ +static void +propagate_nodeset(hwloc_obj_t obj) +{ + hwloc_obj_t child; + + /* Start our nodeset from the parent one. + * It was emptied at root, and it's being filled with local nodes + * in that branch of the tree as we recurse down. + */ + if (!obj->nodeset) + obj->nodeset = hwloc_bitmap_alloc(); + if (obj->parent) + hwloc_bitmap_copy(obj->nodeset, obj->parent->nodeset); + else + hwloc_bitmap_zero(obj->nodeset); + + /* Don't clear complete_nodeset, just make sure it contains nodeset. + * We cannot clear the complete_nodeset at root and rebuild it down because + * some bits may correspond to offline/disallowed NUMA nodes missing in the topology. + */ + if (!obj->complete_nodeset) + obj->complete_nodeset = hwloc_bitmap_dup(obj->nodeset); + else + hwloc_bitmap_or(obj->complete_nodeset, obj->complete_nodeset, obj->nodeset); + + /* now add our local nodeset */ + for_each_memory_child(child, obj) { + /* FIXME rather recurse in the memory hierarchy */ + + /* first, update children complete_nodeset if needed */ + if (!child->complete_nodeset) + child->complete_nodeset = hwloc_bitmap_dup(child->nodeset); + else + hwloc_bitmap_or(child->complete_nodeset, child->complete_nodeset, child->nodeset); + + /* add memory children nodesets to ours */ + hwloc_bitmap_or(obj->nodeset, obj->nodeset, child->nodeset); + hwloc_bitmap_or(obj->complete_nodeset, obj->complete_nodeset, child->complete_nodeset); + + /* by the way, copy our cpusets to memory children */ + if (child->cpuset) + hwloc_bitmap_copy(child->cpuset, obj->cpuset); + else + child->cpuset = hwloc_bitmap_dup(obj->cpuset); + if (child->complete_cpuset) + hwloc_bitmap_copy(child->complete_cpuset, obj->complete_cpuset); + else + child->complete_cpuset = hwloc_bitmap_dup(obj->complete_cpuset); + } + + /* Propagate our nodeset to CPU children. */ + for_each_child(child, obj) { + propagate_nodeset(child); + } + + /* Propagate CPU children specific nodesets back to us. + * + * We cannot merge these two loops because we don't want to first child + * nodeset to be propagated back to us and then down to the second child. + * Each child may have its own local nodeset, + * each of them is propagated to us, but not to other children. + */ + for_each_child(child, obj) { + hwloc_bitmap_or(obj->nodeset, obj->nodeset, child->nodeset); + hwloc_bitmap_or(obj->complete_nodeset, obj->complete_nodeset, child->complete_nodeset); + } + + /* No nodeset under I/O or Misc */ + +} + +static void +remove_unused_sets(hwloc_topology_t topology, hwloc_obj_t obj) +{ + hwloc_obj_t child; + + hwloc_bitmap_and(obj->cpuset, obj->cpuset, topology->allowed_cpuset); + hwloc_bitmap_and(obj->nodeset, obj->nodeset, topology->allowed_nodeset); + + for_each_child(child, obj) + remove_unused_sets(topology, child); + for_each_memory_child(child, obj) + remove_unused_sets(topology, child); + /* No cpuset under I/O or Misc */ +} + +static void +hwloc__filter_bridges(hwloc_topology_t topology, hwloc_obj_t root, unsigned depth) +{ + hwloc_obj_t child, *pchild; + + /* filter I/O children and recurse */ + for_each_io_child_safe(child, root, pchild) { + enum hwloc_type_filter_e filter = topology->type_filter[child->type]; + + /* recurse into grand-children */ + hwloc__filter_bridges(topology, child, depth+1); + + child->attr->bridge.depth = depth; + + if (child->type == HWLOC_OBJ_BRIDGE + && filter == HWLOC_TYPE_FILTER_KEEP_IMPORTANT + && !child->io_first_child) { + unlink_and_free_single_object(pchild); + topology->modified = 1; + } + } +} + +static void +hwloc_filter_bridges(hwloc_topology_t topology, hwloc_obj_t parent) +{ + hwloc_obj_t child = parent->first_child; + while (child) { + hwloc_filter_bridges(topology, child); + child = child->next_sibling; + } + + hwloc__filter_bridges(topology, parent, 0); +} + +void +hwloc__reorder_children(hwloc_obj_t parent) +{ + /* move the children list on the side */ + hwloc_obj_t *prev, child, children = parent->first_child; + parent->first_child = NULL; + while (children) { + /* dequeue child */ + child = children; + children = child->next_sibling; + /* find where to enqueue it */ + prev = &parent->first_child; + while (*prev && hwloc__object_cpusets_compare_first(child, *prev) > 0) + prev = &((*prev)->next_sibling); + /* enqueue */ + child->next_sibling = *prev; + *prev = child; + } + /* No ordering to enforce for Misc or I/O children. */ +} + +/* Remove all normal children whose cpuset is empty, + * and memory children whose nodeset is empty. + * Also don't remove objects that have I/O children, but ignore Misc. + */ +static void +remove_empty(hwloc_topology_t topology, hwloc_obj_t *pobj) +{ + hwloc_obj_t obj = *pobj, child, *pchild; + + for_each_child_safe(child, obj, pchild) + remove_empty(topology, pchild); + for_each_memory_child_safe(child, obj, pchild) + remove_empty(topology, pchild); + /* No cpuset under I/O or Misc */ + + if (obj->first_child /* only remove if all children were removed above, so that we don't remove parents of NUMAnode */ + || obj->memory_first_child /* only remove if no memory attached there */ + || obj->io_first_child /* only remove if no I/O is attached there */) + /* ignore Misc */ + return; + + if (hwloc__obj_type_is_normal(obj->type)) { + if (!hwloc_bitmap_iszero(obj->cpuset)) + return; + } else { + assert(hwloc__obj_type_is_memory(obj->type)); + if (!hwloc_bitmap_iszero(obj->nodeset)) + return; + } + + hwloc_debug("%s", "\nRemoving empty object "); + hwloc_debug_print_object(0, obj); + unlink_and_free_single_object(pobj); + topology->modified = 1; +} + +/* reset type depth before modifying levels (either reconnecting or filtering/keep_structure) */ +static void +hwloc_reset_normal_type_depths(hwloc_topology_t topology) +{ + unsigned i; + for (i=HWLOC_OBJ_TYPE_MIN; i<=HWLOC_OBJ_GROUP; i++) + topology->type_depth[i] = HWLOC_TYPE_DEPTH_UNKNOWN; + /* type contiguity is asserted in topology_check() */ +} + +static int +hwloc_dont_merge_group_level(hwloc_topology_t topology, unsigned i) +{ + unsigned j; + + /* Don't merge some groups in that level? */ + for(j=0; jlevel_nbobjects[i]; j++) + if (topology->levels[i][j]->attr->group.dont_merge) + return 1; + + return 0; +} + +/* compare i-th and i-1-th levels structure */ +static int +hwloc_compare_levels_structure(hwloc_topology_t topology, unsigned i) +{ + int checkmemory = (topology->levels[i][0]->type == HWLOC_OBJ_PU); + unsigned j; + + if (topology->level_nbobjects[i-1] != topology->level_nbobjects[i]) + return -1; + + for(j=0; jlevel_nbobjects[i]; j++) { + if (topology->levels[i-1][j]->arity != 1) + return -1; + if (checkmemory && topology->levels[i-1][j]->memory_arity) + /* don't merge PUs if there's memory above */ + return -1; + } + /* same number of objects with arity 1 above, no problem */ + return 0; +} + +/* return > 0 if any level was removed, which means reconnect is needed */ +static void +hwloc_filter_levels_keep_structure(hwloc_topology_t topology) +{ + unsigned i, j; + int res = 0; + + /* start from the bottom since we'll remove intermediate levels */ + for(i=topology->nb_levels-1; i>0; i--) { + int replacechild = 0, replaceparent = 0; + hwloc_obj_t obj1 = topology->levels[i-1][0]; + hwloc_obj_t obj2 = topology->levels[i][0]; + hwloc_obj_type_t type1 = obj1->type; + hwloc_obj_type_t type2 = obj2->type; + + /* Check whether parents and/or children can be replaced */ + if (topology->type_filter[type1] == HWLOC_TYPE_FILTER_KEEP_STRUCTURE) { + /* Parents can be ignored in favor of children. */ + replaceparent = 1; + if (type1 == HWLOC_OBJ_GROUP && hwloc_dont_merge_group_level(topology, i-1)) + replaceparent = 0; + } + if (topology->type_filter[type2] == HWLOC_TYPE_FILTER_KEEP_STRUCTURE) { + /* Children can be ignored in favor of parents. */ + replacechild = 1; + if (type1 == HWLOC_OBJ_GROUP && hwloc_dont_merge_group_level(topology, i)) + replacechild = 0; + } + if (!replacechild && !replaceparent) + /* no ignoring */ + continue; + /* Decide which one to actually replace */ + if (replaceparent && replacechild) { + /* If both may be replaced, look at obj_type_priority */ + if (obj_type_priority[type1] >= obj_type_priority[type2]) + replaceparent = 0; + else + replacechild = 0; + } + /* Are these levels actually identical? */ + if (hwloc_compare_levels_structure(topology, i) < 0) + continue; + hwloc_debug("may merge levels #%u=%s and #%u=%s\n", + i-1, hwloc_obj_type_string(type1), i, hwloc_obj_type_string(type2)); + + /* OK, remove intermediate objects from the tree. */ + for(j=0; jlevel_nbobjects[i]; j++) { + hwloc_obj_t parent = topology->levels[i-1][j]; + hwloc_obj_t child = topology->levels[i][j]; + unsigned k; + if (replacechild) { + /* move child's children to parent */ + parent->first_child = child->first_child; + parent->last_child = child->last_child; + parent->arity = child->arity; + free(parent->children); + parent->children = child->children; + child->children = NULL; + /* update children parent */ + for(k=0; karity; k++) + parent->children[k]->parent = parent; + /* append child memory/io/misc children to parent */ + if (child->memory_first_child) { + append_siblings_list(&parent->memory_first_child, child->memory_first_child, parent); + parent->memory_arity += child->memory_arity; + } + if (child->io_first_child) { + append_siblings_list(&parent->io_first_child, child->io_first_child, parent); + parent->io_arity += child->io_arity; + } + if (child->misc_first_child) { + append_siblings_list(&parent->misc_first_child, child->misc_first_child, parent); + parent->misc_arity += child->misc_arity; + } + hwloc_free_unlinked_object(child); + } else { + /* replace parent with child in grand-parent */ + if (parent->parent) { + parent->parent->children[parent->sibling_rank] = child; + child->sibling_rank = parent->sibling_rank; + if (!parent->sibling_rank) { + parent->parent->first_child = child; + /* child->prev_sibling was already NULL, child was single */ + } else { + child->prev_sibling = parent->parent->children[parent->sibling_rank-1]; + child->prev_sibling->next_sibling = child; + } + if (parent->sibling_rank == parent->parent->arity-1) { + parent->parent->last_child = child; + /* child->next_sibling was already NULL, child was single */ + } else { + child->next_sibling = parent->parent->children[parent->sibling_rank+1]; + child->next_sibling->prev_sibling = child; + } + /* update child parent */ + child->parent = parent->parent; + } else { + /* make child the new root */ + topology->levels[0][0] = child; + child->parent = NULL; + } + /* prepend parent memory/io/misc children to child */ + if (parent->memory_first_child) { + prepend_siblings_list(&child->memory_first_child, parent->memory_first_child, child); + child->memory_arity += parent->memory_arity; + } + if (parent->io_first_child) { + prepend_siblings_list(&child->io_first_child, parent->io_first_child, child); + child->io_arity += parent->io_arity; + } + if (parent->misc_first_child) { + prepend_siblings_list(&child->misc_first_child, parent->misc_first_child, child); + child->misc_arity += parent->misc_arity; + } + hwloc_free_unlinked_object(parent); + /* prev/next_sibling will be updated below in another loop */ + } + } + if (replaceparent && i>1) { + /* Update sibling list within modified parent->parent arrays */ + for(j=0; jlevel_nbobjects[i]; j++) { + hwloc_obj_t child = topology->levels[i][j]; + unsigned rank = child->sibling_rank; + child->prev_sibling = rank > 0 ? child->parent->children[rank-1] : NULL; + child->next_sibling = rank < child->parent->arity-1 ? child->parent->children[rank+1] : NULL; + } + } + + /* Update levels so that the next reconnect isn't confused */ + if (replaceparent) { + /* Removing level i-1, so move levels [i..nb_levels-1] to [i-1..] */ + free(topology->levels[i-1]); + memmove(&topology->levels[i-1], + &topology->levels[i], + (topology->nb_levels-i)*sizeof(topology->levels[i])); + memmove(&topology->level_nbobjects[i-1], + &topology->level_nbobjects[i], + (topology->nb_levels-i)*sizeof(topology->level_nbobjects[i])); + hwloc_debug("removed parent level %s at depth %u\n", + hwloc_obj_type_string(type1), i-1); + } else { + /* Removing level i, so move levels [i+1..nb_levels-1] and later to [i..] */ + free(topology->levels[i]); + memmove(&topology->levels[i], + &topology->levels[i+1], + (topology->nb_levels-1-i)*sizeof(topology->levels[i])); + memmove(&topology->level_nbobjects[i], + &topology->level_nbobjects[i+1], + (topology->nb_levels-1-i)*sizeof(topology->level_nbobjects[i])); + hwloc_debug("removed child level %s at depth %u\n", + hwloc_obj_type_string(type2), i); + } + topology->level_nbobjects[topology->nb_levels-1] = 0; + topology->levels[topology->nb_levels-1] = NULL; + topology->nb_levels--; + + res++; + } + + if (res > 0) { + /* Update object and type depths if some levels were removed */ + hwloc_reset_normal_type_depths(topology); + for(i=0; inb_levels; i++) { + hwloc_obj_type_t type = topology->levels[i][0]->type; + for(j=0; jlevel_nbobjects[i]; j++) + topology->levels[i][j]->depth = (int)i; + if (topology->type_depth[type] == HWLOC_TYPE_DEPTH_UNKNOWN) + topology->type_depth[type] = (int)i; + else + topology->type_depth[type] = HWLOC_TYPE_DEPTH_MULTIPLE; + } + } +} + +static void +hwloc_propagate_symmetric_subtree(hwloc_topology_t topology, hwloc_obj_t root) +{ + hwloc_obj_t child; + unsigned arity = root->arity; + int ok; + + /* assume we're not symmetric by default */ + root->symmetric_subtree = 0; + + /* if no child, we are symmetric */ + if (!arity) + goto good; + + /* FIXME ignore memory just like I/O and Misc? */ + + /* look at normal children only, I/O and Misc are ignored. + * return if any child is not symmetric. + */ + ok = 1; + for_each_child(child, root) { + hwloc_propagate_symmetric_subtree(topology, child); + if (!child->symmetric_subtree) + ok = 0; + } + if (!ok) + return; + /* Misc and I/O children do not care about symmetric_subtree */ + + /* if single child is symmetric, we're good */ + if (arity == 1) + goto good; + + /* now check that children subtrees are identical. + * just walk down the first child in each tree and compare their depth and arities + */ +{ + HWLOC_VLA(hwloc_obj_t, array, arity); + memcpy(array, root->children, arity * sizeof(*array)); + while (1) { + unsigned i; + /* check current level arities and depth */ + for(i=1; idepth != array[0]->depth + || array[i]->arity != array[0]->arity) { + return; + } + if (!array[0]->arity) + /* no more children level, we're ok */ + break; + /* look at first child of each element now */ + for(i=0; ifirst_child; + } +} + + /* everything went fine, we're symmetric */ + good: + root->symmetric_subtree = 1; +} + +static void hwloc_set_group_depth(hwloc_topology_t topology) +{ + unsigned groupdepth = 0; + unsigned i, j; + for(i=0; inb_levels; i++) + if (topology->levels[i][0]->type == HWLOC_OBJ_GROUP) { + for (j = 0; j < topology->level_nbobjects[i]; j++) + topology->levels[i][j]->attr->group.depth = groupdepth; + groupdepth++; + } +} + +/* + * Initialize handy pointers in the whole topology. + * The topology only had first_child and next_sibling pointers. + * When this funtions return, all parent/children pointers are initialized. + * The remaining fields (levels, cousins, logical_index, depth, ...) will + * be setup later in hwloc_connect_levels(). + * + * Can be called several times, so may have to update the array. + */ +static void +hwloc_connect_children(hwloc_obj_t parent) +{ + unsigned n, oldn = parent->arity; + hwloc_obj_t child, prev_child; + int ok; + + /* Main children list */ + + ok = 1; + prev_child = NULL; + for (n = 0, child = parent->first_child; + child; + n++, prev_child = child, child = child->next_sibling) { + child->sibling_rank = n; + child->prev_sibling = prev_child; + /* already OK in the array? */ + if (n >= oldn || parent->children[n] != child) + ok = 0; + /* recurse */ + hwloc_connect_children(child); + } + parent->last_child = prev_child; + parent->arity = n; + if (!n) { + /* no need for an array anymore */ + free(parent->children); + parent->children = NULL; + goto memory; + } + if (ok) + /* array is already OK (even if too large) */ + goto memory; + + /* alloc a larger array if needed */ + if (oldn < n) { + free(parent->children); + parent->children = malloc(n * sizeof(*parent->children)); + } + /* refill */ + for (n = 0, child = parent->first_child; + child; + n++, child = child->next_sibling) { + parent->children[n] = child; + } + + + + memory: + /* Memory children list */ + + prev_child = NULL; + for (n = 0, child = parent->memory_first_child; + child; + n++, prev_child = child, child = child->next_sibling) { + child->parent = parent; + child->sibling_rank = n; + child->prev_sibling = prev_child; + hwloc_connect_children(child); + } + parent->memory_arity = n; + + /* I/O children list */ + + prev_child = NULL; + for (n = 0, child = parent->io_first_child; + child; + n++, prev_child = child, child = child->next_sibling) { + child->parent = parent; + child->sibling_rank = n; + child->prev_sibling = prev_child; + hwloc_connect_children(child); + } + parent->io_arity = n; + + /* Misc children list */ + + prev_child = NULL; + for (n = 0, child = parent->misc_first_child; + child; + n++, prev_child = child, child = child->next_sibling) { + child->parent = parent; + child->sibling_rank = n; + child->prev_sibling = prev_child; + hwloc_connect_children(child); + } + parent->misc_arity = n; +} + +/* + * Check whether there is an object below ROOT that has the same type as OBJ + */ +static int +find_same_type(hwloc_obj_t root, hwloc_obj_t obj) +{ + hwloc_obj_t child; + + if (hwloc_type_cmp(root, obj) == HWLOC_OBJ_EQUAL) + return 1; + + for_each_child (child, root) + if (find_same_type(child, obj)) + return 1; + + return 0; +} + +/* traverse the array of current object and compare them with top_obj. + * if equal, take the object and put its children into the remaining objs. + * if not equal, put the object into the remaining objs. + */ +static unsigned +hwloc_level_take_objects(hwloc_obj_t top_obj, + hwloc_obj_t *current_objs, unsigned n_current_objs, + hwloc_obj_t *taken_objs, unsigned n_taken_objs __hwloc_attribute_unused, + hwloc_obj_t *remaining_objs, unsigned n_remaining_objs __hwloc_attribute_unused) +{ + unsigned taken_i = 0; + unsigned new_i = 0; + unsigned i, j; + + for (i = 0; i < n_current_objs; i++) + if (hwloc_type_cmp(top_obj, current_objs[i]) == HWLOC_OBJ_EQUAL) { + /* Take it, add main children. */ + taken_objs[taken_i++] = current_objs[i]; + for (j = 0; j < current_objs[i]->arity; j++) + remaining_objs[new_i++] = current_objs[i]->children[j]; + } else { + /* Leave it. */ + remaining_objs[new_i++] = current_objs[i]; + } + +#ifdef HWLOC_DEBUG + /* Make sure we didn't mess up. */ + assert(taken_i == n_taken_objs); + assert(new_i == n_current_objs - n_taken_objs + n_remaining_objs); +#endif + + return new_i; +} + +static int +hwloc_build_level_from_list(struct hwloc_special_level_s *slevel) +{ + unsigned i, nb; + struct hwloc_obj * obj; + + /* count */ + obj = slevel->first; + i = 0; + while (obj) { + i++; + obj = obj->next_cousin; + } + nb = i; + + if (nb) { + /* allocate and fill level */ + slevel->objs = malloc(nb * sizeof(struct hwloc_obj *)); + obj = slevel->first; + i = 0; + while (obj) { + obj->logical_index = i; + slevel->objs[i] = obj; + i++; + obj = obj->next_cousin; + } + } + + slevel->nbobjs = nb; + return 0; +} + +static void +hwloc_append_special_object(struct hwloc_special_level_s *level, hwloc_obj_t obj) +{ + if (level->first) { + obj->prev_cousin = level->last; + obj->prev_cousin->next_cousin = obj; + level->last = obj; + } else { + obj->prev_cousin = NULL; + level->first = level->last = obj; + } +} + +/* Append special objects to their lists */ +static void +hwloc_list_special_objects(hwloc_topology_t topology, hwloc_obj_t obj) +{ + hwloc_obj_t child; + + if (obj->type == HWLOC_OBJ_NUMANODE) { + obj->next_cousin = NULL; + obj->depth = HWLOC_TYPE_DEPTH_NUMANODE; + /* Insert the main NUMA node list */ + hwloc_append_special_object(&topology->slevels[HWLOC_SLEVEL_NUMANODE], obj); + + /* Recurse */ + for_each_memory_child(child, obj) + hwloc_list_special_objects(topology, child); + for_each_misc_child(child, obj) + hwloc_list_special_objects(topology, child); + + } else if (obj->type == HWLOC_OBJ_MISC) { + obj->next_cousin = NULL; + obj->depth = HWLOC_TYPE_DEPTH_MISC; + /* Insert the main Misc list */ + hwloc_append_special_object(&topology->slevels[HWLOC_SLEVEL_MISC], obj); + /* Recurse, Misc only have Misc children */ + for_each_misc_child(child, obj) + hwloc_list_special_objects(topology, child); + + } else if (hwloc__obj_type_is_io(obj->type)) { + obj->next_cousin = NULL; + + if (obj->type == HWLOC_OBJ_BRIDGE) { + obj->depth = HWLOC_TYPE_DEPTH_BRIDGE; + /* Insert in the main bridge list */ + hwloc_append_special_object(&topology->slevels[HWLOC_SLEVEL_BRIDGE], obj); + + } else if (obj->type == HWLOC_OBJ_PCI_DEVICE) { + obj->depth = HWLOC_TYPE_DEPTH_PCI_DEVICE; + /* Insert in the main pcidev list */ + hwloc_append_special_object(&topology->slevels[HWLOC_SLEVEL_PCIDEV], obj); + + } else if (obj->type == HWLOC_OBJ_OS_DEVICE) { + obj->depth = HWLOC_TYPE_DEPTH_OS_DEVICE; + /* Insert in the main osdev list */ + hwloc_append_special_object(&topology->slevels[HWLOC_SLEVEL_OSDEV], obj); + } + /* Recurse, I/O only have I/O and Misc children */ + for_each_io_child(child, obj) + hwloc_list_special_objects(topology, child); + for_each_misc_child(child, obj) + hwloc_list_special_objects(topology, child); + + } else { + /* Recurse */ + for_each_child(child, obj) + hwloc_list_special_objects(topology, child); + for_each_memory_child(child, obj) + hwloc_list_special_objects(topology, child); + for_each_io_child(child, obj) + hwloc_list_special_objects(topology, child); + for_each_misc_child(child, obj) + hwloc_list_special_objects(topology, child); + } +} + +/* Build I/O levels */ +static void +hwloc_connect_io_misc_levels(hwloc_topology_t topology) +{ + unsigned i; + + for(i=0; islevels[i].objs); + memset(&topology->slevels, 0, sizeof(topology->slevels)); + + hwloc_list_special_objects(topology, topology->levels[0][0]); + + for(i=0; islevels[i]); +} + +/* + * Do the remaining work that hwloc_connect_children() did not do earlier. + * Requires object arity and children list to be properly initialized (by hwloc_connect_children()). + */ +static int +hwloc_connect_levels(hwloc_topology_t topology) +{ + unsigned l, i=0; + hwloc_obj_t *objs, *taken_objs, *new_objs, top_obj, root; + unsigned n_objs, n_taken_objs, n_new_objs; + + /* reset non-root levels (root was initialized during init and will not change here) */ + for(l=1; lnb_levels; l++) + free(topology->levels[l]); + memset(topology->levels+1, 0, (topology->nb_levels-1)*sizeof(*topology->levels)); + memset(topology->level_nbobjects+1, 0, (topology->nb_levels-1)*sizeof(*topology->level_nbobjects)); + topology->nb_levels = 1; + + /* initialize all non-IO/non-Misc depths to unknown */ + hwloc_reset_normal_type_depths(topology); + + /* initialize root type depth */ + root = topology->levels[0][0]; + root->depth = 0; + topology->type_depth[root->type] = 0; + /* root level */ + root->logical_index = 0; + root->prev_cousin = NULL; + root->next_cousin = NULL; + /* root as a child of nothing */ + root->parent = NULL; + root->sibling_rank = 0; + root->prev_sibling = NULL; + root->next_sibling = NULL; + + /* Start with children of the whole system. */ + n_objs = topology->levels[0][0]->arity; + objs = malloc(n_objs * sizeof(objs[0])); + if (!objs) { + errno = ENOMEM; + return -1; + } + memcpy(objs, topology->levels[0][0]->children, n_objs*sizeof(objs[0])); + + /* Keep building levels while there are objects left in OBJS. */ + while (n_objs) { + /* At this point, the objs array contains only objects that may go into levels */ + + /* First find which type of object is the topmost. + * Don't use PU if there are other types since we want to keep PU at the bottom. + */ + + /* Look for the first non-PU object, and use the first PU if we really find nothing else */ + for (i = 0; i < n_objs; i++) + if (objs[i]->type != HWLOC_OBJ_PU) + break; + top_obj = i == n_objs ? objs[0] : objs[i]; + + /* See if this is actually the topmost object */ + for (i = 0; i < n_objs; i++) { + if (hwloc_type_cmp(top_obj, objs[i]) != HWLOC_OBJ_EQUAL) { + if (find_same_type(objs[i], top_obj)) { + /* OBJS[i] is strictly above an object of the same type as TOP_OBJ, so it + * is above TOP_OBJ. */ + top_obj = objs[i]; + } + } + } + + /* Now peek all objects of the same type, build a level with that and + * replace them with their children. */ + + /* First count them. */ + n_taken_objs = 0; + n_new_objs = 0; + for (i = 0; i < n_objs; i++) + if (hwloc_type_cmp(top_obj, objs[i]) == HWLOC_OBJ_EQUAL) { + n_taken_objs++; + n_new_objs += objs[i]->arity; + } + + /* New level. */ + taken_objs = malloc((n_taken_objs + 1) * sizeof(taken_objs[0])); + /* New list of pending objects. */ + if (n_objs - n_taken_objs + n_new_objs) { + new_objs = malloc((n_objs - n_taken_objs + n_new_objs) * sizeof(new_objs[0])); + } else { +#ifdef HWLOC_DEBUG + assert(!n_new_objs); + assert(n_objs == n_taken_objs); +#endif + new_objs = NULL; + } + + n_new_objs = hwloc_level_take_objects(top_obj, + objs, n_objs, + taken_objs, n_taken_objs, + new_objs, n_new_objs); + + /* Ok, put numbers in the level and link cousins. */ + for (i = 0; i < n_taken_objs; i++) { + taken_objs[i]->depth = (int) topology->nb_levels; + taken_objs[i]->logical_index = i; + if (i) { + taken_objs[i]->prev_cousin = taken_objs[i-1]; + taken_objs[i-1]->next_cousin = taken_objs[i]; + } + } + taken_objs[0]->prev_cousin = NULL; + taken_objs[n_taken_objs-1]->next_cousin = NULL; + + /* One more level! */ + hwloc_debug("--- %s level", hwloc_obj_type_string(top_obj->type)); + hwloc_debug(" has number %u\n\n", topology->nb_levels); + + if (topology->type_depth[top_obj->type] == HWLOC_TYPE_DEPTH_UNKNOWN) + topology->type_depth[top_obj->type] = (int) topology->nb_levels; + else + topology->type_depth[top_obj->type] = HWLOC_TYPE_DEPTH_MULTIPLE; /* mark as unknown */ + + taken_objs[n_taken_objs] = NULL; + + if (topology->nb_levels == topology->nb_levels_allocated) { + /* extend the arrays of levels */ + void *tmplevels, *tmpnbobjs; + tmplevels = realloc(topology->levels, + 2 * topology->nb_levels_allocated * sizeof(*topology->levels)); + tmpnbobjs = realloc(topology->level_nbobjects, + 2 * topology->nb_levels_allocated * sizeof(*topology->level_nbobjects)); + if (!tmplevels || !tmpnbobjs) { + fprintf(stderr, "hwloc failed to realloc level arrays to %u\n", topology->nb_levels_allocated * 2); + + /* if one realloc succeeded, make sure the caller will free the new buffer */ + if (tmplevels) + topology->levels = tmplevels; + if (tmpnbobjs) + topology->level_nbobjects = tmpnbobjs; + /* the realloc that failed left topology->level_foo untouched, will be freed by the caller */ + + free(objs); + free(taken_objs); + free(new_objs); + errno = ENOMEM; + return -1; + } + topology->levels = tmplevels; + topology->level_nbobjects = tmpnbobjs; + memset(topology->levels + topology->nb_levels_allocated, + 0, topology->nb_levels_allocated * sizeof(*topology->levels)); + memset(topology->level_nbobjects + topology->nb_levels_allocated, + 0, topology->nb_levels_allocated * sizeof(*topology->level_nbobjects)); + topology->nb_levels_allocated *= 2; + } + /* add the new level */ + topology->level_nbobjects[topology->nb_levels] = n_taken_objs; + topology->levels[topology->nb_levels] = taken_objs; + + topology->nb_levels++; + + free(objs); + + /* Switch to new_objs */ + objs = new_objs; + n_objs = n_new_objs; + } + + /* It's empty now. */ + free(objs); + + return 0; +} + +int +hwloc_topology_reconnect(struct hwloc_topology *topology, unsigned long flags) +{ + if (flags) { + errno = EINVAL; + return -1; + } + if (!topology->modified) + return 0; + + hwloc_connect_children(topology->levels[0][0]); + + if (hwloc_connect_levels(topology) < 0) + return -1; + + hwloc_connect_io_misc_levels(topology); + + topology->modified = 0; + + return 0; +} + +void hwloc_alloc_root_sets(hwloc_obj_t root) +{ + /* + * All sets are initially NULL. + * + * At least one backend should call this function to initialize all sets at once. + * XML uses it lazily in case only some sets were given in the XML import. + * + * Other backends can check root->cpuset != NULL to see if somebody + * discovered things before them. + */ + if (!root->cpuset) + root->cpuset = hwloc_bitmap_alloc(); + if (!root->complete_cpuset) + root->complete_cpuset = hwloc_bitmap_alloc(); + if (!root->nodeset) + root->nodeset = hwloc_bitmap_alloc(); + if (!root->complete_nodeset) + root->complete_nodeset = hwloc_bitmap_alloc(); +} + +/* Main discovery loop */ +static int +hwloc_discover(struct hwloc_topology *topology) +{ + struct hwloc_backend *backend; + + topology->modified = 0; /* no need to reconnect yet */ + + topology->allowed_cpuset = hwloc_bitmap_alloc_full(); + topology->allowed_nodeset = hwloc_bitmap_alloc_full(); + + /* discover() callbacks should use hwloc_insert to add objects initialized + * through hwloc_alloc_setup_object. + * For node levels, nodeset and memory must be initialized. + * For cache levels, memory and type/depth must be initialized. + * For group levels, depth must be initialized. + */ + + /* There must be at least a PU object for each logical processor, at worse + * produced by hwloc_setup_pu_level() + */ + + /* To be able to just use hwloc_insert_object_by_cpuset to insert the object + * in the topology according to the cpuset, the cpuset field must be + * initialized. + */ + + /* A priori, All processors are visible in the topology, and allowed + * for the application. + * + * - If some processors exist but topology information is unknown for them + * (and thus the backend couldn't create objects for them), they should be + * added to the complete_cpuset field of the lowest object where the object + * could reside. + * + * - If some processors are not allowed for the application (e.g. for + * administration reasons), they should be dropped from the allowed_cpuset + * field. + * + * The same applies to the node sets complete_nodeset and allowed_cpuset. + * + * If such field doesn't exist yet, it can be allocated, and initialized to + * zero (for complete), or to full (for allowed). The values are + * automatically propagated to the whole tree after detection. + */ + + /* + * Discover CPUs first + */ + backend = topology->backends; + while (NULL != backend) { + if (backend->component->type != HWLOC_DISC_COMPONENT_TYPE_CPU + && backend->component->type != HWLOC_DISC_COMPONENT_TYPE_GLOBAL) + /* not yet */ + goto next_cpubackend; + if (!backend->discover) + goto next_cpubackend; + backend->discover(backend); + hwloc_debug_print_objects(0, topology->levels[0][0]); + +next_cpubackend: + backend = backend->next; + } + + /* One backend should have called hwloc_alloc_root_sets() + * and set bits during PU and NUMA insert. + */ + if (!topology->levels[0][0]->cpuset || hwloc_bitmap_iszero(topology->levels[0][0]->cpuset)) { + hwloc_debug("%s", "No PU added by any CPU and global backend\n"); + errno = EINVAL; + return -1; + } + + if (topology->binding_hooks.get_allowed_resources && topology->is_thissystem) { + const char *env = getenv("HWLOC_THISSYSTEM_ALLOWED_RESOURCES"); + if ((env && atoi(env)) + || (topology->flags & HWLOC_TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES)) + topology->binding_hooks.get_allowed_resources(topology); + } + + /* If there's no NUMA node, add one with all the memory. + * root->complete_nodeset wouldn't be empty if any NUMA was ever added: + * - insert_by_cpuset() adds bits whe PU/NUMA are added. + * - XML takes care of sanitizing nodesets. + */ + if (hwloc_bitmap_iszero(topology->levels[0][0]->complete_nodeset)) { + hwloc_obj_t node; + hwloc_debug("%s", "\nAdd missing single NUMA node\n"); + node = hwloc_alloc_setup_object(topology, HWLOC_OBJ_NUMANODE, 0); + node->cpuset = hwloc_bitmap_dup(topology->levels[0][0]->cpuset); + node->nodeset = hwloc_bitmap_alloc(); + /* other nodesets will be filled below */ + hwloc_bitmap_set(node->nodeset, 0); + memcpy(&node->attr->numanode, &topology->machine_memory, sizeof(topology->machine_memory)); + memset(&topology->machine_memory, 0, sizeof(topology->machine_memory)); + hwloc_insert_object_by_cpuset(topology, node); + } else { + /* if we're sure we found all NUMA nodes without their sizes (x86 backend?), + * we could split topology->total_memory in all of them. + */ + free(topology->machine_memory.page_types); + memset(&topology->machine_memory, 0, sizeof(topology->machine_memory)); + } + + hwloc_debug("%s", "\nFixup root sets\n"); + hwloc_bitmap_and(topology->levels[0][0]->cpuset, topology->levels[0][0]->cpuset, topology->levels[0][0]->complete_cpuset); + hwloc_bitmap_and(topology->levels[0][0]->nodeset, topology->levels[0][0]->nodeset, topology->levels[0][0]->complete_nodeset); + + hwloc_bitmap_and(topology->allowed_cpuset, topology->allowed_cpuset, topology->levels[0][0]->cpuset); + hwloc_bitmap_and(topology->allowed_nodeset, topology->allowed_nodeset, topology->levels[0][0]->nodeset); + + hwloc_debug("%s", "\nPropagate sets\n"); + /* cpuset are already there thanks to the _by_cpuset insertion, + * but nodeset have to be propagated below and above NUMA nodes + */ + propagate_nodeset(topology->levels[0][0]); + /* now fixup parent/children sets */ + fixup_sets(topology->levels[0][0]); + + hwloc_debug_print_objects(0, topology->levels[0][0]); + + if (!(topology->flags & HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM)) { + hwloc_debug("%s", "\nRemoving unauthorized sets from all sets\n"); + remove_unused_sets(topology, topology->levels[0][0]); + hwloc_debug_print_objects(0, topology->levels[0][0]); + } + + /* see if we should ignore the root now that we know how many children it has */ + if (!hwloc_filter_check_keep_object(topology, topology->levels[0][0]) + && topology->levels[0][0]->first_child && !topology->levels[0][0]->first_child->next_sibling) { + hwloc_obj_t oldroot = topology->levels[0][0]; + hwloc_obj_t newroot = oldroot->first_child; + /* switch to the new root */ + newroot->parent = NULL; + topology->levels[0][0] = newroot; + /* move oldroot memory/io/misc children before newroot children */ + if (oldroot->memory_first_child) + prepend_siblings_list(&newroot->memory_first_child, oldroot->memory_first_child, newroot); + if (oldroot->io_first_child) + prepend_siblings_list(&newroot->io_first_child, oldroot->io_first_child, newroot); + if (oldroot->misc_first_child) + prepend_siblings_list(&newroot->misc_first_child, oldroot->misc_first_child, newroot); + /* destroy oldroot and use the new one */ + hwloc_free_unlinked_object(oldroot); + } + + /* + * All object cpusets and nodesets are properly set now. + */ + + /* Now connect handy pointers to make remaining discovery easier. */ + hwloc_debug("%s", "\nOk, finished tweaking, now connect\n"); + if (hwloc_topology_reconnect(topology, 0) < 0) + return -1; + hwloc_debug_print_objects(0, topology->levels[0][0]); + + /* + * Additional discovery with other backends + */ + + backend = topology->backends; + while (NULL != backend) { + if (backend->component->type == HWLOC_DISC_COMPONENT_TYPE_CPU + || backend->component->type == HWLOC_DISC_COMPONENT_TYPE_GLOBAL) + /* already done above */ + goto next_noncpubackend; + if (!backend->discover) + goto next_noncpubackend; + backend->discover(backend); + hwloc_debug_print_objects(0, topology->levels[0][0]); + +next_noncpubackend: + backend = backend->next; + } + + hwloc_pci_belowroot_apply_locality(topology); + + hwloc_debug("%s", "\nNow reconnecting\n"); + hwloc_debug_print_objects(0, topology->levels[0][0]); + + /* Remove some stuff */ + + hwloc_debug("%s", "\nRemoving bridge objects if needed\n"); + hwloc_filter_bridges(topology, topology->levels[0][0]); + hwloc_debug_print_objects(0, topology->levels[0][0]); + + hwloc_debug("%s", "\nRemoving empty objects\n"); + remove_empty(topology, &topology->levels[0][0]); + if (!topology->levels[0][0]) { + fprintf(stderr, "Topology became empty, aborting!\n"); + return -1; + } + if (hwloc_bitmap_iszero(topology->levels[0][0]->cpuset)) { + fprintf(stderr, "Topology does not contain any PU, aborting!\n"); + return -1; + } + if (hwloc_bitmap_iszero(topology->levels[0][0]->nodeset)) { + fprintf(stderr, "Topology does not contain any NUMA node, aborting!\n"); + return -1; + } + hwloc_debug_print_objects(0, topology->levels[0][0]); + + /* Reconnect things after all these changes. + * Often needed because of Groups inserted for I/Os. + * And required for KEEP_STRUCTURE below. + */ + if (hwloc_topology_reconnect(topology, 0) < 0) + return -1; + + hwloc_debug("%s", "\nRemoving levels with HWLOC_TYPE_FILTER_KEEP_STRUCTURE\n"); + hwloc_filter_levels_keep_structure(topology); + hwloc_debug_print_objects(0, topology->levels[0][0]); + + /* accumulate children memory in total_memory fields (only once parent is set) */ + hwloc_debug("%s", "\nPropagate total memory up\n"); + propagate_total_memory(topology->levels[0][0]); + + /* setup the symmetric_subtree attribute */ + hwloc_propagate_symmetric_subtree(topology, topology->levels[0][0]); + + /* apply group depths */ + hwloc_set_group_depth(topology); + + /* add some identification attributes if not loading from XML */ + if (topology->backends + && strcmp(topology->backends->component->name, "xml")) { + char *value; + /* add a hwlocVersion */ + hwloc_obj_add_info(topology->levels[0][0], "hwlocVersion", HWLOC_VERSION); + /* add a ProcessName */ + value = hwloc_progname(topology); + if (value) { + hwloc_obj_add_info(topology->levels[0][0], "ProcessName", value); + free(value); + } + } + + return 0; +} + +/* To be called before discovery is actually launched, + * Resets everything in case a previous load initialized some stuff. + */ +void +hwloc_topology_setup_defaults(struct hwloc_topology *topology) +{ + struct hwloc_obj *root_obj; + + /* reset support */ + memset(&topology->binding_hooks, 0, sizeof(topology->binding_hooks)); + memset(topology->support.discovery, 0, sizeof(*topology->support.discovery)); + memset(topology->support.cpubind, 0, sizeof(*topology->support.cpubind)); + memset(topology->support.membind, 0, sizeof(*topology->support.membind)); + + /* Only the System object on top by default */ + topology->next_gp_index = 1; /* keep 0 as an invalid value */ + topology->nb_levels = 1; /* there's at least SYSTEM */ + topology->levels[0] = hwloc_tma_malloc (topology->tma, sizeof (hwloc_obj_t)); + topology->level_nbobjects[0] = 1; + + /* Machine-wide memory */ + topology->machine_memory.local_memory = 0; + topology->machine_memory.page_types_len = 0; + topology->machine_memory.page_types = NULL; + + /* Allowed stuff */ + topology->allowed_cpuset = NULL; + topology->allowed_nodeset = NULL; + + /* NULLify other special levels */ + memset(&topology->slevels, 0, sizeof(topology->slevels)); + /* assert the indexes of special levels */ + HWLOC_BUILD_ASSERT(HWLOC_SLEVEL_NUMANODE == HWLOC_SLEVEL_FROM_DEPTH(HWLOC_TYPE_DEPTH_NUMANODE)); + HWLOC_BUILD_ASSERT(HWLOC_SLEVEL_MISC == HWLOC_SLEVEL_FROM_DEPTH(HWLOC_TYPE_DEPTH_MISC)); + HWLOC_BUILD_ASSERT(HWLOC_SLEVEL_BRIDGE == HWLOC_SLEVEL_FROM_DEPTH(HWLOC_TYPE_DEPTH_BRIDGE)); + HWLOC_BUILD_ASSERT(HWLOC_SLEVEL_PCIDEV == HWLOC_SLEVEL_FROM_DEPTH(HWLOC_TYPE_DEPTH_PCI_DEVICE)); + HWLOC_BUILD_ASSERT(HWLOC_SLEVEL_OSDEV == HWLOC_SLEVEL_FROM_DEPTH(HWLOC_TYPE_DEPTH_OS_DEVICE)); + + /* sane values to type_depth */ + hwloc_reset_normal_type_depths(topology); + topology->type_depth[HWLOC_OBJ_NUMANODE] = HWLOC_TYPE_DEPTH_NUMANODE; + topology->type_depth[HWLOC_OBJ_MISC] = HWLOC_TYPE_DEPTH_MISC; + topology->type_depth[HWLOC_OBJ_BRIDGE] = HWLOC_TYPE_DEPTH_BRIDGE; + topology->type_depth[HWLOC_OBJ_PCI_DEVICE] = HWLOC_TYPE_DEPTH_PCI_DEVICE; + topology->type_depth[HWLOC_OBJ_OS_DEVICE] = HWLOC_TYPE_DEPTH_OS_DEVICE; + + /* Create the actual machine object, but don't touch its attributes yet + * since the OS backend may still change the object into something else + * (for instance System) + */ + root_obj = hwloc_alloc_setup_object(topology, HWLOC_OBJ_MACHINE, 0); + topology->levels[0][0] = root_obj; +} + +static void hwloc__topology_filter_init(struct hwloc_topology *topology); + +/* This function may use a tma, it cannot free() or realloc() */ +static int +hwloc__topology_init (struct hwloc_topology **topologyp, + unsigned nblevels, + struct hwloc_tma *tma) +{ + struct hwloc_topology *topology; + + topology = hwloc_tma_malloc (tma, sizeof (struct hwloc_topology)); + if(!topology) + return -1; + + topology->tma = tma; + + hwloc_components_init(); /* uses malloc without tma, but won't need it since dup() caller already took a reference */ + hwloc_backends_init(topology); + hwloc_pci_discovery_init(topology); /* make sure both dup() and load() get sane variables */ + + /* Setup topology context */ + topology->is_loaded = 0; + topology->flags = 0; + topology->is_thissystem = 1; + topology->pid = 0; + topology->userdata = NULL; + topology->topology_abi = HWLOC_TOPOLOGY_ABI; + topology->adopted_shmem_addr = NULL; + topology->adopted_shmem_length = 0; + + topology->support.discovery = hwloc_tma_malloc(tma, sizeof(*topology->support.discovery)); + topology->support.cpubind = hwloc_tma_malloc(tma, sizeof(*topology->support.cpubind)); + topology->support.membind = hwloc_tma_malloc(tma, sizeof(*topology->support.membind)); + + topology->nb_levels_allocated = nblevels; /* enough for default 9 levels = Mach+Pack+NUMA+L3+L2+L1d+L1i+Co+PU */ + topology->levels = hwloc_tma_calloc(tma, topology->nb_levels_allocated * sizeof(*topology->levels)); + topology->level_nbobjects = hwloc_tma_calloc(tma, topology->nb_levels_allocated * sizeof(*topology->level_nbobjects)); + + hwloc__topology_filter_init(topology); + + hwloc_internal_distances_init(topology); + + topology->userdata_export_cb = NULL; + topology->userdata_import_cb = NULL; + topology->userdata_not_decoded = 0; + + /* Make the topology look like something coherent but empty */ + hwloc_topology_setup_defaults(topology); + + *topologyp = topology; + return 0; +} + +int +hwloc_topology_init (struct hwloc_topology **topologyp) +{ + return hwloc__topology_init(topologyp, + 16, /* 16 is enough for default 9 levels = Mach+Pack+NUMA+L3+L2+L1d+L1i+Co+PU */ + NULL); /* no TMA for normal topologies, too many allocations to fix */ +} + +int +hwloc_topology_set_pid(struct hwloc_topology *topology __hwloc_attribute_unused, + hwloc_pid_t pid __hwloc_attribute_unused) +{ + if (topology->is_loaded) { + errno = EBUSY; + return -1; + } + + /* this does *not* change the backend */ +#ifdef HWLOC_LINUX_SYS + topology->pid = pid; + return 0; +#else /* HWLOC_LINUX_SYS */ + errno = ENOSYS; + return -1; +#endif /* HWLOC_LINUX_SYS */ +} + +int +hwloc_topology_set_synthetic(struct hwloc_topology *topology, const char *description) +{ + if (topology->is_loaded) { + errno = EBUSY; + return -1; + } + + return hwloc_disc_component_force_enable(topology, + 0 /* api */, + -1, "synthetic", + description, NULL, NULL); +} + +int +hwloc_topology_set_xml(struct hwloc_topology *topology, + const char *xmlpath) +{ + if (topology->is_loaded) { + errno = EBUSY; + return -1; + } + + return hwloc_disc_component_force_enable(topology, + 0 /* api */, + -1, "xml", + xmlpath, NULL, NULL); +} + +int +hwloc_topology_set_xmlbuffer(struct hwloc_topology *topology, + const char *xmlbuffer, + int size) +{ + if (topology->is_loaded) { + errno = EBUSY; + return -1; + } + + return hwloc_disc_component_force_enable(topology, + 0 /* api */, + -1, "xml", NULL, + xmlbuffer, (void*) (uintptr_t) size); +} + +int +hwloc_topology_set_flags (struct hwloc_topology *topology, unsigned long flags) +{ + if (topology->is_loaded) { + /* actually harmless */ + errno = EBUSY; + return -1; + } + + if (flags & ~(HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM|HWLOC_TOPOLOGY_FLAG_IS_THISSYSTEM|HWLOC_TOPOLOGY_FLAG_THISSYSTEM_ALLOWED_RESOURCES)) { + errno = EINVAL; + return -1; + } + + topology->flags = flags; + return 0; +} + +unsigned long +hwloc_topology_get_flags (struct hwloc_topology *topology) +{ + return topology->flags; +} + +static void +hwloc__topology_filter_init(struct hwloc_topology *topology) +{ + hwloc_obj_type_t type; + /* Only ignore useless cruft by default */ + for(type = HWLOC_OBJ_TYPE_MIN; type < HWLOC_OBJ_TYPE_MAX; type++) + topology->type_filter[type] = HWLOC_TYPE_FILTER_KEEP_ALL; + topology->type_filter[HWLOC_OBJ_L1ICACHE] = HWLOC_TYPE_FILTER_KEEP_NONE; + topology->type_filter[HWLOC_OBJ_L2ICACHE] = HWLOC_TYPE_FILTER_KEEP_NONE; + topology->type_filter[HWLOC_OBJ_L3ICACHE] = HWLOC_TYPE_FILTER_KEEP_NONE; + topology->type_filter[HWLOC_OBJ_GROUP] = HWLOC_TYPE_FILTER_KEEP_STRUCTURE; + topology->type_filter[HWLOC_OBJ_MISC] = HWLOC_TYPE_FILTER_KEEP_NONE; + topology->type_filter[HWLOC_OBJ_BRIDGE] = HWLOC_TYPE_FILTER_KEEP_NONE; + topology->type_filter[HWLOC_OBJ_PCI_DEVICE] = HWLOC_TYPE_FILTER_KEEP_NONE; + topology->type_filter[HWLOC_OBJ_OS_DEVICE] = HWLOC_TYPE_FILTER_KEEP_NONE; +} + +static int +hwloc__topology_set_type_filter(struct hwloc_topology *topology, hwloc_obj_type_t type, enum hwloc_type_filter_e filter) +{ + if (type == HWLOC_OBJ_PU || type == HWLOC_OBJ_NUMANODE || type == HWLOC_OBJ_MACHINE) { + if (filter != HWLOC_TYPE_FILTER_KEEP_ALL) { + /* we need the Machine, PU and NUMA levels */ + errno = EINVAL; + return -1; + } + } else if (hwloc__obj_type_is_special(type)) { + if (filter == HWLOC_TYPE_FILTER_KEEP_STRUCTURE) { + /* I/O and Misc are outside of the main topology structure, makes no sense. */ + errno = EINVAL; + return -1; + } + } else if (type == HWLOC_OBJ_GROUP) { + if (filter == HWLOC_TYPE_FILTER_KEEP_ALL) { + /* Groups are always ignored, at least keep_structure */ + errno = EINVAL; + return -1; + } + } + + /* "important" just means "all" for non-I/O non-Misc */ + if (!hwloc__obj_type_is_special(type) && filter == HWLOC_TYPE_FILTER_KEEP_IMPORTANT) + filter = HWLOC_TYPE_FILTER_KEEP_ALL; + + topology->type_filter[type] = filter; + return 0; +} + +int +hwloc_topology_set_type_filter(struct hwloc_topology *topology, hwloc_obj_type_t type, enum hwloc_type_filter_e filter) +{ + HWLOC_BUILD_ASSERT(HWLOC_OBJ_TYPE_MIN == 0); + if ((unsigned) type >= HWLOC_OBJ_TYPE_MAX) { + errno = EINVAL; + return -1; + } + if (topology->is_loaded) { + errno = EBUSY; + return -1; + } + return hwloc__topology_set_type_filter(topology, type, filter); +} + +int +hwloc_topology_set_all_types_filter(struct hwloc_topology *topology, enum hwloc_type_filter_e filter) +{ + hwloc_obj_type_t type; + if (topology->is_loaded) { + errno = EBUSY; + return -1; + } + for(type = HWLOC_OBJ_TYPE_MIN; type < HWLOC_OBJ_TYPE_MAX; type++) + hwloc__topology_set_type_filter(topology, type, filter); + return 0; +} + +int +hwloc_topology_set_cache_types_filter(hwloc_topology_t topology, enum hwloc_type_filter_e filter) +{ + unsigned i; + for(i=HWLOC_OBJ_L1CACHE; i= HWLOC_OBJ_TYPE_MAX) { + errno = EINVAL; + return -1; + } + *filterp = topology->type_filter[type]; + return 0; +} + +void +hwloc_topology_clear (struct hwloc_topology *topology) +{ + /* no need to set to NULL after free() since callers will call setup_defaults() or just destroy the rest of the topology */ + unsigned l; + hwloc_internal_distances_destroy(topology); + hwloc_free_object_and_children(topology->levels[0][0]); + hwloc_bitmap_free(topology->allowed_cpuset); + hwloc_bitmap_free(topology->allowed_nodeset); + for (l=0; lnb_levels; l++) + free(topology->levels[l]); + for(l=0; lslevels[l].objs); + free(topology->machine_memory.page_types); +} + +void +hwloc_topology_destroy (struct hwloc_topology *topology) +{ + if (topology->adopted_shmem_addr) { + hwloc__topology_disadopt(topology); + return; + } + + hwloc_backends_disable_all(topology); + hwloc_components_fini(); + + hwloc_topology_clear(topology); + + free(topology->levels); + free(topology->level_nbobjects); + + free(topology->support.discovery); + free(topology->support.cpubind); + free(topology->support.membind); + free(topology); +} + +int +hwloc_topology_load (struct hwloc_topology *topology) +{ + int err; + + if (topology->is_loaded) { + errno = EBUSY; + return -1; + } + + hwloc_internal_distances_prepare(topology); + + if (getenv("HWLOC_XML_USERDATA_NOT_DECODED")) + topology->userdata_not_decoded = 1; + + /* Ignore variables if HWLOC_COMPONENTS is set. It will be processed later */ + if (!getenv("HWLOC_COMPONENTS")) { + /* Only apply variables if we have not changed the backend yet. + * Only the first one will be kept. + * Check for FSROOT first since it's for debugging so likely needs to override everything else. + * Check for XML last (that's the one that may be set system-wide by administrators) + * so that it's only used if other variables are not set, + * to allow users to override easily. + */ + if (!topology->backends) { + const char *fsroot_path_env = getenv("HWLOC_FSROOT"); + if (fsroot_path_env) + hwloc_disc_component_force_enable(topology, + 1 /* env force */, + HWLOC_DISC_COMPONENT_TYPE_CPU, "linux", + NULL /* backend will getenv again */, NULL, NULL); + } + if (!topology->backends) { + const char *cpuid_path_env = getenv("HWLOC_CPUID_PATH"); + if (cpuid_path_env) + hwloc_disc_component_force_enable(topology, + 1 /* env force */, + HWLOC_DISC_COMPONENT_TYPE_CPU, "x86", + NULL /* backend will getenv again */, NULL, NULL); + } + if (!topology->backends) { + const char *synthetic_env = getenv("HWLOC_SYNTHETIC"); + if (synthetic_env) + hwloc_disc_component_force_enable(topology, + 1 /* env force */, + -1, "synthetic", + synthetic_env, NULL, NULL); + } + if (!topology->backends) { + const char *xmlpath_env = getenv("HWLOC_XMLFILE"); + if (xmlpath_env) + hwloc_disc_component_force_enable(topology, + 1 /* env force */, + -1, "xml", + xmlpath_env, NULL, NULL); + } + } + + /* instantiate all possible other backends now */ + hwloc_disc_components_enable_others(topology); + /* now that backends are enabled, update the thissystem flag and some callbacks */ + hwloc_backends_is_thissystem(topology); + hwloc_backends_find_callbacks(topology); + /* + * Now set binding hooks according to topology->is_thissystem + * and what the native OS backend offers. + */ + hwloc_set_binding_hooks(topology); + + hwloc_pci_discovery_prepare(topology); + + /* actual topology discovery */ + err = hwloc_discover(topology); + if (err < 0) + goto out; + + hwloc_pci_discovery_exit(topology); + +#ifndef HWLOC_DEBUG + if (getenv("HWLOC_DEBUG_CHECK")) +#endif + hwloc_topology_check(topology); + + /* Mark distances objs arrays as invalid since we may have removed objects + * from the topology after adding the distances (remove_empty, etc). + * It would be hard to actually verify whether it's needed. + */ + hwloc_internal_distances_invalidate_cached_objs(topology); + /* And refresh distances so that multithreaded concurrent distances_get() + * don't refresh() concurrently (disallowed). + */ + hwloc_internal_distances_refresh(topology); + + topology->is_loaded = 1; + return 0; + + out: + hwloc_pci_discovery_exit(topology); + hwloc_topology_clear(topology); + hwloc_topology_setup_defaults(topology); + hwloc_backends_disable_all(topology); + return -1; +} + +/* adjust object cpusets according the given droppedcpuset, + * drop object whose cpuset becomes empty and that have no children, + * and propagate NUMA node removal as nodeset changes in parents. + */ +static void +restrict_object_by_cpuset(hwloc_topology_t topology, unsigned long flags, hwloc_obj_t *pobj, + hwloc_bitmap_t droppedcpuset, hwloc_bitmap_t droppednodeset) +{ + hwloc_obj_t obj = *pobj, child, *pchild; + int modified = 0; + + if (hwloc_bitmap_intersects(obj->complete_cpuset, droppedcpuset)) { + hwloc_bitmap_andnot(obj->cpuset, obj->cpuset, droppedcpuset); + hwloc_bitmap_andnot(obj->complete_cpuset, obj->complete_cpuset, droppedcpuset); + modified = 1; + } else { + if ((flags & HWLOC_RESTRICT_FLAG_REMOVE_CPULESS) + && hwloc_bitmap_iszero(obj->complete_cpuset)) { + /* we're empty, there's a NUMAnode below us, it'll be removed this time */ + modified = 1; + } + /* nodeset cannot intersect unless cpuset intersects or is empty */ + if (droppednodeset) + assert(!hwloc_bitmap_intersects(obj->complete_nodeset, droppednodeset) + || hwloc_bitmap_iszero(obj->complete_cpuset)); + } + if (droppednodeset) { + hwloc_bitmap_andnot(obj->nodeset, obj->nodeset, droppednodeset); + hwloc_bitmap_andnot(obj->complete_nodeset, obj->complete_nodeset, droppednodeset); + } + + if (modified) { + for_each_child_safe(child, obj, pchild) + restrict_object_by_cpuset(topology, flags, pchild, droppedcpuset, droppednodeset); + /* if some hwloc_bitmap_first(child->complete_cpuset) changed, children might need to be reordered */ + hwloc__reorder_children(obj); + + for_each_memory_child_safe(child, obj, pchild) + restrict_object_by_cpuset(topology, flags, pchild, droppedcpuset, droppednodeset); + /* local NUMA nodes have the same cpusets, no need to reorder them */ + + /* Nothing to restrict under I/O or Misc */ + } + + if (!obj->first_child && !obj->memory_first_child /* arity not updated before connect_children() */ + && hwloc_bitmap_iszero(obj->cpuset) + && (obj->type != HWLOC_OBJ_NUMANODE || (flags & HWLOC_RESTRICT_FLAG_REMOVE_CPULESS))) { + /* remove object */ + hwloc_debug("%s", "\nRemoving object during restrict"); + hwloc_debug_print_object(0, obj); + + if (!(flags & HWLOC_RESTRICT_FLAG_ADAPT_IO)) { + hwloc_free_object_siblings_and_children(obj->io_first_child); + obj->io_first_child = NULL; + } + if (!(flags & HWLOC_RESTRICT_FLAG_ADAPT_MISC)) { + hwloc_free_object_siblings_and_children(obj->misc_first_child); + obj->misc_first_child = NULL; + } + assert(!obj->first_child); + assert(!obj->memory_first_child); + unlink_and_free_single_object(pobj); + topology->modified = 1; + } +} + +int +hwloc_topology_restrict(struct hwloc_topology *topology, hwloc_const_cpuset_t cpuset, unsigned long flags) +{ + hwloc_bitmap_t droppedcpuset, droppednodeset; + + if (!topology->is_loaded) { + errno = EINVAL; + return -1; + } + + if (flags & ~(HWLOC_RESTRICT_FLAG_REMOVE_CPULESS + |HWLOC_RESTRICT_FLAG_ADAPT_MISC|HWLOC_RESTRICT_FLAG_ADAPT_IO)) { + errno = EINVAL; + return -1; + } + + /* make sure we'll keep something in the topology */ + if (!hwloc_bitmap_intersects(cpuset, topology->allowed_cpuset)) { + errno = EINVAL; /* easy failure, just don't touch the topology */ + return -1; + } + + droppedcpuset = hwloc_bitmap_alloc(); + droppednodeset = hwloc_bitmap_alloc(); + if (!droppedcpuset || !droppednodeset) { + hwloc_bitmap_free(droppedcpuset); + hwloc_bitmap_free(droppednodeset); + return -1; + } + + /* cpuset to clear */ + hwloc_bitmap_not(droppedcpuset, cpuset); + /* nodeset to clear */ + if (flags & HWLOC_RESTRICT_FLAG_REMOVE_CPULESS) { + hwloc_obj_t node = hwloc_get_obj_by_type(topology, HWLOC_OBJ_NUMANODE, 0); + do { + /* node will be removed if nodeset gets or was empty */ + if (hwloc_bitmap_iszero(node->cpuset) + || hwloc_bitmap_isincluded(node->cpuset, droppedcpuset)) + hwloc_bitmap_set(droppednodeset, node->os_index); + node = node->next_cousin; + } while (node); + + /* check we're not removing all NUMA nodes */ + if (hwloc_bitmap_isincluded(topology->allowed_nodeset, droppednodeset)) { + errno = EINVAL; /* easy failure, just don't touch the topology */ + hwloc_bitmap_free(droppedcpuset); + hwloc_bitmap_free(droppednodeset); + return -1; + } + } + /* remove nodeset if empty */ + if (!(flags & HWLOC_RESTRICT_FLAG_REMOVE_CPULESS) + || hwloc_bitmap_iszero(droppednodeset)) { + hwloc_bitmap_free(droppednodeset); + droppednodeset = NULL; + } + + /* now recurse to filter sets and drop things */ + restrict_object_by_cpuset(topology, flags, &topology->levels[0][0], droppedcpuset, droppednodeset); + hwloc_bitmap_andnot(topology->allowed_cpuset, topology->allowed_cpuset, droppedcpuset); + if (droppednodeset) + hwloc_bitmap_andnot(topology->allowed_nodeset, topology->allowed_nodeset, droppednodeset); + + hwloc_bitmap_free(droppedcpuset); + hwloc_bitmap_free(droppednodeset); + + if (hwloc_topology_reconnect(topology, 0) < 0) + goto out; + + /* some objects may have disappeared, we need to update distances objs arrays */ + hwloc_internal_distances_invalidate_cached_objs(topology); + + hwloc_filter_levels_keep_structure(topology); + hwloc_propagate_symmetric_subtree(topology, topology->levels[0][0]); + propagate_total_memory(topology->levels[0][0]); + +#ifndef HWLOC_DEBUG + if (getenv("HWLOC_DEBUG_CHECK")) +#endif + hwloc_topology_check(topology); + + return 0; + + out: + /* unrecoverable failure, re-init the topology */ + hwloc_topology_clear(topology); + hwloc_topology_setup_defaults(topology); + return -1; +} + +int +hwloc_topology_is_thissystem(struct hwloc_topology *topology) +{ + return topology->is_thissystem; +} + +int +hwloc_topology_get_depth(struct hwloc_topology *topology) +{ + return (int) topology->nb_levels; +} + +const struct hwloc_topology_support * +hwloc_topology_get_support(struct hwloc_topology * topology) +{ + return &topology->support; +} + +void hwloc_topology_set_userdata(struct hwloc_topology * topology, const void *userdata) +{ + topology->userdata = (void *) userdata; +} + +void * hwloc_topology_get_userdata(struct hwloc_topology * topology) +{ + return topology->userdata; +} + +hwloc_const_cpuset_t +hwloc_topology_get_complete_cpuset(hwloc_topology_t topology) +{ + return hwloc_get_root_obj(topology)->complete_cpuset; +} + +hwloc_const_cpuset_t +hwloc_topology_get_topology_cpuset(hwloc_topology_t topology) +{ + return hwloc_get_root_obj(topology)->cpuset; +} + +hwloc_const_cpuset_t +hwloc_topology_get_allowed_cpuset(hwloc_topology_t topology) +{ + return topology->allowed_cpuset; +} + +hwloc_const_nodeset_t +hwloc_topology_get_complete_nodeset(hwloc_topology_t topology) +{ + return hwloc_get_root_obj(topology)->complete_nodeset; +} + +hwloc_const_nodeset_t +hwloc_topology_get_topology_nodeset(hwloc_topology_t topology) +{ + return hwloc_get_root_obj(topology)->nodeset; +} + +hwloc_const_nodeset_t +hwloc_topology_get_allowed_nodeset(hwloc_topology_t topology) +{ + return topology->allowed_nodeset; +} + + +/**************** + * Debug Checks * + ****************/ + +#ifndef NDEBUG /* assert only enabled if !NDEBUG */ + +static void +hwloc__check_child_siblings(hwloc_obj_t parent, hwloc_obj_t *array, + unsigned arity, unsigned i, + hwloc_obj_t child, hwloc_obj_t prev) +{ + assert(child->parent == parent); + + assert(child->sibling_rank == i); + if (array) + assert(child == array[i]); + + if (prev) + assert(prev->next_sibling == child); + assert(child->prev_sibling == prev); + + if (!i) + assert(child->prev_sibling == NULL); + else + assert(child->prev_sibling != NULL); + + if (i == arity-1) + assert(child->next_sibling == NULL); + else + assert(child->next_sibling != NULL); +} + +static void +hwloc__check_object(hwloc_topology_t topology, hwloc_bitmap_t gp_indexes, hwloc_obj_t obj); + +/* check children between a parent object */ +static void +hwloc__check_normal_children(hwloc_topology_t topology, hwloc_bitmap_t gp_indexes, hwloc_obj_t parent) +{ + hwloc_obj_t child, prev; + unsigned j; + + if (!parent->arity) { + /* check whether that parent has no children for real */ + assert(!parent->children); + assert(!parent->first_child); + assert(!parent->last_child); + return; + } + /* check whether that parent has children for real */ + assert(parent->children); + assert(parent->first_child); + assert(parent->last_child); + + /* sibling checks */ + for(prev = NULL, child = parent->first_child, j = 0; + child; + prev = child, child = child->next_sibling, j++) { + /* normal child */ + assert(hwloc__obj_type_is_normal(child->type)); + /* check depth */ + assert(child->depth > parent->depth); + /* check siblings */ + hwloc__check_child_siblings(parent, parent->children, parent->arity, j, child, prev); + /* recurse */ + hwloc__check_object(topology, gp_indexes, child); + } + /* check arity */ + assert(j == parent->arity); + + assert(parent->first_child == parent->children[0]); + assert(parent->last_child == parent->children[parent->arity-1]); + + /* no normal children below a PU */ + if (parent->type == HWLOC_OBJ_PU) + assert(!parent->arity); +} + +static void +hwloc__check_children_cpusets(hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj) +{ + /* we already checked in the caller that objects have either all sets or none */ + hwloc_obj_t child; + int prev_first, prev_empty; + + if (obj->type == HWLOC_OBJ_PU) { + /* PU cpuset is just itself, with no normal children */ + assert(hwloc_bitmap_weight(obj->cpuset) == 1); + assert(hwloc_bitmap_first(obj->cpuset) == (int) obj->os_index); + assert(hwloc_bitmap_weight(obj->complete_cpuset) == 1); + assert(hwloc_bitmap_first(obj->complete_cpuset) == (int) obj->os_index); + if (!(topology->flags & HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM)) { + assert(hwloc_bitmap_isset(topology->allowed_cpuset, (int) obj->os_index)); + } + assert(!obj->arity); + } else if (hwloc__obj_type_is_memory(obj->type)) { + /* memory object cpuset is equal to its parent */ + assert(hwloc_bitmap_isequal(obj->parent->cpuset, obj->cpuset)); + assert(!obj->arity); + } else if (!hwloc__obj_type_is_special(obj->type)) { + hwloc_bitmap_t set; + /* other obj cpuset is an exclusive OR of normal children, except for PUs */ + set = hwloc_bitmap_alloc(); + for_each_child(child, obj) { + assert(!hwloc_bitmap_intersects(set, child->cpuset)); + hwloc_bitmap_or(set, set, child->cpuset); + } + assert(hwloc_bitmap_isequal(set, obj->cpuset)); + hwloc_bitmap_free(set); + } + + /* check that memory children have same cpuset */ + for_each_memory_child(child, obj) + assert(hwloc_bitmap_isequal(obj->cpuset, child->cpuset)); + + /* check that children complete_cpusets are properly ordered, empty ones may be anywhere + * (can be wrong for main cpuset since removed PUs can break the ordering). + */ + prev_first = -1; /* -1 works fine with first comparisons below */ + prev_empty = 0; /* no empty cpuset in previous children */ + for_each_child(child, obj) { + int first = hwloc_bitmap_first(child->complete_cpuset); + if (first >= 0) { + assert(!prev_empty); /* no objects with CPU after objects without CPU */ + assert(prev_first < first); + } else { + prev_empty = 1; + } + prev_first = first; + } +} + +static void +hwloc__check_memory_children(hwloc_topology_t topology, hwloc_bitmap_t gp_indexes, hwloc_obj_t parent) +{ + unsigned j; + hwloc_obj_t child, prev; + + if (!parent->memory_arity) { + /* check whether that parent has no children for real */ + assert(!parent->memory_first_child); + return; + } + /* check whether that parent has children for real */ + assert(parent->memory_first_child); + + for(prev = NULL, child = parent->memory_first_child, j = 0; + child; + prev = child, child = child->next_sibling, j++) { + assert(hwloc__obj_type_is_memory(child->type)); + /* check siblings */ + hwloc__check_child_siblings(parent, NULL, parent->memory_arity, j, child, prev); + /* only Memory and Misc children, recurse */ + assert(!child->first_child); + assert(!child->io_first_child); + hwloc__check_object(topology, gp_indexes, child); + } + /* check arity */ + assert(j == parent->memory_arity); + + /* no memory children below a NUMA node */ + if (parent->type == HWLOC_OBJ_NUMANODE) + assert(!parent->memory_arity); +} + +static void +hwloc__check_io_children(hwloc_topology_t topology, hwloc_bitmap_t gp_indexes, hwloc_obj_t parent) +{ + unsigned j; + hwloc_obj_t child, prev; + + if (!parent->io_arity) { + /* check whether that parent has no children for real */ + assert(!parent->io_first_child); + return; + } + /* check whether that parent has children for real */ + assert(parent->io_first_child); + + for(prev = NULL, child = parent->io_first_child, j = 0; + child; + prev = child, child = child->next_sibling, j++) { + /* all children must be I/O */ + assert(hwloc__obj_type_is_io(child->type)); + /* check siblings */ + hwloc__check_child_siblings(parent, NULL, parent->io_arity, j, child, prev); + /* only I/O and Misc children, recurse */ + assert(!child->first_child); + assert(!child->memory_first_child); + hwloc__check_object(topology, gp_indexes, child); + } + /* check arity */ + assert(j == parent->io_arity); +} + +static void +hwloc__check_misc_children(hwloc_topology_t topology, hwloc_bitmap_t gp_indexes, hwloc_obj_t parent) +{ + unsigned j; + hwloc_obj_t child, prev; + + if (!parent->misc_arity) { + /* check whether that parent has no children for real */ + assert(!parent->misc_first_child); + return; + } + /* check whether that parent has children for real */ + assert(parent->misc_first_child); + + for(prev = NULL, child = parent->misc_first_child, j = 0; + child; + prev = child, child = child->next_sibling, j++) { + /* all children must be Misc */ + assert(child->type == HWLOC_OBJ_MISC); + /* check siblings */ + hwloc__check_child_siblings(parent, NULL, parent->misc_arity, j, child, prev); + /* only Misc children, recurse */ + assert(!child->first_child); + assert(!child->memory_first_child); + assert(!child->io_first_child); + hwloc__check_object(topology, gp_indexes, child); + } + /* check arity */ + assert(j == parent->misc_arity); +} + +static void +hwloc__check_object(hwloc_topology_t topology, hwloc_bitmap_t gp_indexes, hwloc_obj_t obj) +{ + assert(!hwloc_bitmap_isset(gp_indexes, obj->gp_index)); + hwloc_bitmap_set(gp_indexes, obj->gp_index); + + HWLOC_BUILD_ASSERT(HWLOC_OBJ_TYPE_MIN == 0); + assert((unsigned) obj->type < HWLOC_OBJ_TYPE_MAX); + + assert(hwloc_filter_check_keep_object(topology, obj)); + + /* check that sets and depth */ + if (hwloc__obj_type_is_special(obj->type)) { + assert(!obj->cpuset); + if (obj->type == HWLOC_OBJ_BRIDGE) + assert(obj->depth == HWLOC_TYPE_DEPTH_BRIDGE); + else if (obj->type == HWLOC_OBJ_PCI_DEVICE) + assert(obj->depth == HWLOC_TYPE_DEPTH_PCI_DEVICE); + else if (obj->type == HWLOC_OBJ_OS_DEVICE) + assert(obj->depth == HWLOC_TYPE_DEPTH_OS_DEVICE); + else if (obj->type == HWLOC_OBJ_MISC) + assert(obj->depth == HWLOC_TYPE_DEPTH_MISC); + } else { + assert(obj->cpuset); + if (obj->type == HWLOC_OBJ_NUMANODE) + assert(obj->depth == HWLOC_TYPE_DEPTH_NUMANODE); + else + assert(obj->depth >= 0); + } + + /* group depth cannot be -1 anymore in v2.0+ */ + if (obj->type == HWLOC_OBJ_GROUP) { + assert(obj->attr->group.depth != (unsigned) -1); + } + + /* there's other cpusets and nodesets if and only if there's a main cpuset */ + assert(!!obj->cpuset == !!obj->complete_cpuset); + assert(!!obj->cpuset == !!obj->nodeset); + assert(!!obj->nodeset == !!obj->complete_nodeset); + + /* check that complete/inline sets are larger than the main sets */ + if (obj->cpuset) { + assert(hwloc_bitmap_isincluded(obj->cpuset, obj->complete_cpuset)); + assert(hwloc_bitmap_isincluded(obj->nodeset, obj->complete_nodeset)); + } + + /* check cache type/depth vs type */ + if (hwloc__obj_type_is_cache(obj->type)) { + if (hwloc__obj_type_is_icache(obj->type)) + assert(obj->attr->cache.type == HWLOC_OBJ_CACHE_INSTRUCTION); + else if (hwloc__obj_type_is_dcache(obj->type)) + assert(obj->attr->cache.type == HWLOC_OBJ_CACHE_DATA + || obj->attr->cache.type == HWLOC_OBJ_CACHE_UNIFIED); + else + assert(0); + assert(hwloc_cache_type_by_depth_type(obj->attr->cache.depth, obj->attr->cache.type) == obj->type); + } + + /* check children */ + hwloc__check_normal_children(topology, gp_indexes, obj); + hwloc__check_memory_children(topology, gp_indexes, obj); + hwloc__check_io_children(topology, gp_indexes, obj); + hwloc__check_misc_children(topology, gp_indexes, obj); + hwloc__check_children_cpusets(topology, obj); + /* nodesets are checked during another recursion with state below */ +} + +static void +hwloc__check_nodesets(hwloc_topology_t topology, hwloc_obj_t obj, hwloc_bitmap_t parentset) +{ + hwloc_obj_t child; + int prev_first; + + if (obj->type == HWLOC_OBJ_NUMANODE) { + /* NUMANODE nodeset is just itself, with no memory/normal children */ + assert(hwloc_bitmap_weight(obj->nodeset) == 1); + assert(hwloc_bitmap_first(obj->nodeset) == (int) obj->os_index); + assert(hwloc_bitmap_weight(obj->complete_nodeset) == 1); + assert(hwloc_bitmap_first(obj->complete_nodeset) == (int) obj->os_index); + if (!(topology->flags & HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM)) { + assert(hwloc_bitmap_isset(topology->allowed_nodeset, (int) obj->os_index)); + } + assert(!obj->arity); + assert(!obj->memory_arity); + assert(hwloc_bitmap_isincluded(obj->nodeset, parentset)); + } else { + hwloc_bitmap_t myset; + hwloc_bitmap_t childset; + + /* the local nodeset is an exclusive OR of memory children */ + myset = hwloc_bitmap_alloc(); + for_each_memory_child(child, obj) { + assert(!hwloc_bitmap_intersects(myset, child->nodeset)); + hwloc_bitmap_or(myset, myset, child->nodeset); + } + /* the local nodeset cannot intersect with parents' local nodeset */ + assert(!hwloc_bitmap_intersects(myset, parentset)); + hwloc_bitmap_or(parentset, parentset, myset); + hwloc_bitmap_free(myset); + /* parentset now contains parent+local contribution */ + + /* for each children, recurse to check/get its contribution */ + childset = hwloc_bitmap_alloc(); + for_each_child(child, obj) { + hwloc_bitmap_t set = hwloc_bitmap_dup(parentset); /* don't touch parentset, we don't want to propagate the first child contribution to other children */ + hwloc__check_nodesets(topology, child, set); + /* extract this child contribution */ + hwloc_bitmap_andnot(set, set, parentset); + /* save it */ + assert(!hwloc_bitmap_intersects(childset, set)); + hwloc_bitmap_or(childset, childset, set); + hwloc_bitmap_free(set); + } + /* combine child contribution into parentset */ + assert(!hwloc_bitmap_intersects(parentset, childset)); + hwloc_bitmap_or(parentset, parentset, childset); + hwloc_bitmap_free(childset); + /* now check that our nodeset is combination of parent, local and children */ + assert(hwloc_bitmap_isequal(obj->nodeset, parentset)); + } + + /* check that children complete_nodesets are properly ordered, empty ones may be anywhere + * (can be wrong for main nodeset since removed PUs can break the ordering). + */ + prev_first = -1; /* -1 works fine with first comparisons below */ + for_each_memory_child(child, obj) { + int first = hwloc_bitmap_first(child->complete_nodeset); + assert(prev_first < first); + prev_first = first; + } +} + +static void +hwloc__check_level(struct hwloc_topology *topology, int depth, + hwloc_obj_t first, hwloc_obj_t last) +{ + unsigned width = hwloc_get_nbobjs_by_depth(topology, depth); + struct hwloc_obj *prev = NULL; + hwloc_obj_t obj; + unsigned j; + + /* check each object of the level */ + for(j=0; jdepth == depth); + assert(obj->logical_index == j); + /* check that all objects in the level have the same type */ + if (prev) { + assert(hwloc_type_cmp(obj, prev) == HWLOC_OBJ_EQUAL); + assert(prev->next_cousin == obj); + } + assert(obj->prev_cousin == prev); + + /* check that PUs and NUMA nodes have correct cpuset/nodeset */ + if (obj->type == HWLOC_OBJ_NUMANODE) { + assert(hwloc_bitmap_weight(obj->complete_nodeset) == 1); + assert(hwloc_bitmap_first(obj->complete_nodeset) == (int) obj->os_index); + } + prev = obj; + } + if (prev) + assert(prev->next_cousin == NULL); + + if (width) { + /* check first object of the level */ + obj = hwloc_get_obj_by_depth(topology, depth, 0); + assert(obj); + assert(!obj->prev_cousin); + /* check type */ + assert(hwloc_get_depth_type(topology, depth) == obj->type); + assert(depth == hwloc_get_type_depth(topology, obj->type) + || HWLOC_TYPE_DEPTH_MULTIPLE == hwloc_get_type_depth(topology, obj->type)); + /* check last object of the level */ + obj = hwloc_get_obj_by_depth(topology, depth, width-1); + assert(obj); + assert(!obj->next_cousin); + } + + if (depth < 0) { + assert(first == hwloc_get_obj_by_depth(topology, depth, 0)); + assert(last == hwloc_get_obj_by_depth(topology, depth, width-1)); + } else { + assert(!first); + assert(!last); + } + + /* check last+1 object of the level */ + obj = hwloc_get_obj_by_depth(topology, depth, width); + assert(!obj); +} + +/* check a whole topology structure */ +void +hwloc_topology_check(struct hwloc_topology *topology) +{ + struct hwloc_obj *obj; + hwloc_bitmap_t gp_indexes, set; + hwloc_obj_type_t type; + unsigned i; + int j, depth; + + /* make sure we can use ranges to check types */ + + /* hwloc__obj_type_is_{,d,i}cache() want cache types to be ordered like this */ + HWLOC_BUILD_ASSERT(HWLOC_OBJ_L2CACHE == HWLOC_OBJ_L1CACHE + 1); + HWLOC_BUILD_ASSERT(HWLOC_OBJ_L3CACHE == HWLOC_OBJ_L2CACHE + 1); + HWLOC_BUILD_ASSERT(HWLOC_OBJ_L4CACHE == HWLOC_OBJ_L3CACHE + 1); + HWLOC_BUILD_ASSERT(HWLOC_OBJ_L5CACHE == HWLOC_OBJ_L4CACHE + 1); + HWLOC_BUILD_ASSERT(HWLOC_OBJ_L1ICACHE == HWLOC_OBJ_L5CACHE + 1); + HWLOC_BUILD_ASSERT(HWLOC_OBJ_L2ICACHE == HWLOC_OBJ_L1ICACHE + 1); + HWLOC_BUILD_ASSERT(HWLOC_OBJ_L3ICACHE == HWLOC_OBJ_L2ICACHE + 1); + + /* hwloc__obj_type_is_normal(), hwloc__obj_type_is_memory(), hwloc__obj_type_is_io(), hwloc__obj_type_is_special() + * and hwloc_reset_normal_type_depths() + * want special types to be ordered like this, after all normal types. + */ + HWLOC_BUILD_ASSERT(HWLOC_OBJ_NUMANODE + 1 == HWLOC_OBJ_BRIDGE); + HWLOC_BUILD_ASSERT(HWLOC_OBJ_BRIDGE + 1 == HWLOC_OBJ_PCI_DEVICE); + HWLOC_BUILD_ASSERT(HWLOC_OBJ_PCI_DEVICE + 1 == HWLOC_OBJ_OS_DEVICE); + HWLOC_BUILD_ASSERT(HWLOC_OBJ_OS_DEVICE + 1 == HWLOC_OBJ_MISC); + HWLOC_BUILD_ASSERT(HWLOC_OBJ_MISC + 1 == HWLOC_OBJ_TYPE_MAX); + + /* make sure order and priority arrays have the right size */ + HWLOC_BUILD_ASSERT(sizeof(obj_type_order)/sizeof(*obj_type_order) == HWLOC_OBJ_TYPE_MAX); + HWLOC_BUILD_ASSERT(sizeof(obj_order_type)/sizeof(*obj_order_type) == HWLOC_OBJ_TYPE_MAX); + HWLOC_BUILD_ASSERT(sizeof(obj_type_priority)/sizeof(*obj_type_priority) == HWLOC_OBJ_TYPE_MAX); + + /* make sure group are not entirely ignored */ + assert(topology->type_filter[HWLOC_OBJ_GROUP] != HWLOC_TYPE_FILTER_KEEP_ALL); + + /* make sure order arrays are coherent */ + for(type=HWLOC_OBJ_TYPE_MIN; typemodified); + + /* check that first level is Machine. + * Root object cannot be ignored. And Machine can only be merged into PU, + * but there must be a NUMA node below Machine, and it cannot be below PU. + */ + assert(hwloc_get_depth_type(topology, 0) == HWLOC_OBJ_MACHINE); + + /* check that last level is PU and that it doesn't have memory */ + assert(hwloc_get_depth_type(topology, depth-1) == HWLOC_OBJ_PU); + assert(hwloc_get_nbobjs_by_depth(topology, depth-1) > 0); + for(i=0; itype == HWLOC_OBJ_PU); + assert(!obj->memory_first_child); + } + /* check that other levels are not PU or Machine */ + for(j=1; j=0 || d == HWLOC_TYPE_DEPTH_UNKNOWN || d == HWLOC_TYPE_DEPTH_MULTIPLE); + } + } + + /* top-level specific checks */ + assert(hwloc_get_nbobjs_by_depth(topology, 0) == 1); + obj = hwloc_get_root_obj(topology); + assert(obj); + assert(!obj->parent); + assert(obj->cpuset); + assert(!obj->depth); + + /* check that allowed sets are larger than the main sets */ + if (topology->flags & HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM) { + assert(hwloc_bitmap_isincluded(topology->allowed_cpuset, obj->cpuset)); + assert(hwloc_bitmap_isincluded(topology->allowed_nodeset, obj->nodeset)); + } else { + assert(hwloc_bitmap_isequal(topology->allowed_cpuset, obj->cpuset)); + assert(hwloc_bitmap_isequal(topology->allowed_nodeset, obj->nodeset)); + } + + /* check each level */ + for(j=0; jslevels[j].first, topology->slevels[j].last); + + /* recurse and check the tree of children, and type-specific checks */ + gp_indexes = hwloc_bitmap_alloc(); /* TODO prealloc to topology->next_gp_index */ + hwloc__check_object(topology, gp_indexes, obj); + hwloc_bitmap_free(gp_indexes); + + /* recurse and check the nodesets of children */ + set = hwloc_bitmap_alloc(); + hwloc__check_nodesets(topology, obj, set); + hwloc_bitmap_free(set); +} + +#else /* NDEBUG */ + +void +hwloc_topology_check(struct hwloc_topology *topology __hwloc_attribute_unused) +{ +} + +#endif /* NDEBUG */ diff --git a/src/3rdparty/hwloc/src/traversal.c b/src/3rdparty/hwloc/src/traversal.c new file mode 100644 index 00000000..9c5e6268 --- /dev/null +++ b/src/3rdparty/hwloc/src/traversal.c @@ -0,0 +1,616 @@ +/* + * Copyright © 2009 CNRS + * Copyright © 2009-2018 Inria. All rights reserved. + * Copyright © 2009-2010 Université Bordeaux + * Copyright © 2009-2011 Cisco Systems, Inc. All rights reserved. + * See COPYING in top-level directory. + */ + +#include +#include +#include +#include +#include +#ifdef HAVE_STRINGS_H +#include +#endif /* HAVE_STRINGS_H */ + +int +hwloc_get_type_depth (struct hwloc_topology *topology, hwloc_obj_type_t type) +{ + HWLOC_BUILD_ASSERT(HWLOC_OBJ_TYPE_MIN == 0); + if ((unsigned) type >= HWLOC_OBJ_TYPE_MAX) + return HWLOC_TYPE_DEPTH_UNKNOWN; + else + return topology->type_depth[type]; +} + +hwloc_obj_type_t +hwloc_get_depth_type (hwloc_topology_t topology, int depth) +{ + if ((unsigned)depth >= topology->nb_levels) + switch (depth) { + case HWLOC_TYPE_DEPTH_NUMANODE: + return HWLOC_OBJ_NUMANODE; + case HWLOC_TYPE_DEPTH_BRIDGE: + return HWLOC_OBJ_BRIDGE; + case HWLOC_TYPE_DEPTH_PCI_DEVICE: + return HWLOC_OBJ_PCI_DEVICE; + case HWLOC_TYPE_DEPTH_OS_DEVICE: + return HWLOC_OBJ_OS_DEVICE; + case HWLOC_TYPE_DEPTH_MISC: + return HWLOC_OBJ_MISC; + default: + return HWLOC_OBJ_TYPE_NONE; + } + return topology->levels[depth][0]->type; +} + +int +hwloc_get_memory_parents_depth (hwloc_topology_t topology) +{ + int depth = HWLOC_TYPE_DEPTH_UNKNOWN; + /* memory leaves are always NUMA nodes for now, no need to check parents of other memory types */ + hwloc_obj_t numa = hwloc_get_obj_by_depth(topology, HWLOC_TYPE_DEPTH_NUMANODE, 0); + assert(numa); + while (numa) { + hwloc_obj_t parent = numa->parent; + /* walk-up the memory hierarchy */ + while (hwloc__obj_type_is_memory(parent->type)) + parent = parent->parent; + + if (depth == HWLOC_TYPE_DEPTH_UNKNOWN) + depth = parent->depth; + else if (depth != parent->depth) + return HWLOC_TYPE_DEPTH_MULTIPLE; + + numa = numa->next_cousin; + } + + assert(depth >= 0); + return depth; +} + +unsigned +hwloc_get_nbobjs_by_depth (struct hwloc_topology *topology, int depth) +{ + if ((unsigned)depth >= topology->nb_levels) { + unsigned l = HWLOC_SLEVEL_FROM_DEPTH(depth); + if (l < HWLOC_NR_SLEVELS) + return topology->slevels[l].nbobjs; + else + return 0; + } + return topology->level_nbobjects[depth]; +} + +struct hwloc_obj * +hwloc_get_obj_by_depth (struct hwloc_topology *topology, int depth, unsigned idx) +{ + if ((unsigned)depth >= topology->nb_levels) { + unsigned l = HWLOC_SLEVEL_FROM_DEPTH(depth); + if (l < HWLOC_NR_SLEVELS) + return idx < topology->slevels[l].nbobjs ? topology->slevels[l].objs[idx] : NULL; + else + return NULL; + } + if (idx >= topology->level_nbobjects[depth]) + return NULL; + return topology->levels[depth][idx]; +} + +int +hwloc_obj_type_is_normal(hwloc_obj_type_t type) +{ + return hwloc__obj_type_is_normal(type); +} + +int +hwloc_obj_type_is_memory(hwloc_obj_type_t type) +{ + return hwloc__obj_type_is_memory(type); +} + +int +hwloc_obj_type_is_io(hwloc_obj_type_t type) +{ + return hwloc__obj_type_is_io(type); +} + +int +hwloc_obj_type_is_cache(hwloc_obj_type_t type) +{ + return hwloc__obj_type_is_cache(type); +} + +int +hwloc_obj_type_is_dcache(hwloc_obj_type_t type) +{ + return hwloc__obj_type_is_dcache(type); +} + +int +hwloc_obj_type_is_icache(hwloc_obj_type_t type) +{ + return hwloc__obj_type_is_icache(type); +} + +unsigned hwloc_get_closest_objs (struct hwloc_topology *topology, struct hwloc_obj *src, struct hwloc_obj **objs, unsigned max) +{ + struct hwloc_obj *parent, *nextparent, **src_objs; + unsigned i,src_nbobjects; + unsigned stored = 0; + + if (!src->cpuset) + return 0; + + src_nbobjects = topology->level_nbobjects[src->depth]; + src_objs = topology->levels[src->depth]; + + parent = src; + while (stored < max) { + while (1) { + nextparent = parent->parent; + if (!nextparent) + goto out; + if (!hwloc_bitmap_isequal(parent->cpuset, nextparent->cpuset)) + break; + parent = nextparent; + } + + /* traverse src's objects and find those that are in nextparent and were not in parent */ + for(i=0; icpuset, nextparent->cpuset) + && !hwloc_bitmap_isincluded(src_objs[i]->cpuset, parent->cpuset)) { + objs[stored++] = src_objs[i]; + if (stored == max) + goto out; + } + } + parent = nextparent; + } + + out: + return stored; +} + +static int +hwloc__get_largest_objs_inside_cpuset (struct hwloc_obj *current, hwloc_const_bitmap_t set, + struct hwloc_obj ***res, int *max) +{ + int gotten = 0; + unsigned i; + + /* the caller must ensure this */ + if (*max <= 0) + return 0; + + if (hwloc_bitmap_isequal(current->cpuset, set)) { + **res = current; + (*res)++; + (*max)--; + return 1; + } + + for (i=0; iarity; i++) { + hwloc_bitmap_t subset; + int ret; + + /* split out the cpuset part corresponding to this child and see if there's anything to do */ + if (!hwloc_bitmap_intersects(set,current->children[i]->cpuset)) + continue; + + subset = hwloc_bitmap_dup(set); + hwloc_bitmap_and(subset, subset, current->children[i]->cpuset); + ret = hwloc__get_largest_objs_inside_cpuset (current->children[i], subset, res, max); + gotten += ret; + hwloc_bitmap_free(subset); + + /* if no more room to store remaining objects, return what we got so far */ + if (!*max) + break; + } + + return gotten; +} + +int +hwloc_get_largest_objs_inside_cpuset (struct hwloc_topology *topology, hwloc_const_bitmap_t set, + struct hwloc_obj **objs, int max) +{ + struct hwloc_obj *current = topology->levels[0][0]; + + if (!hwloc_bitmap_isincluded(set, current->cpuset)) + return -1; + + if (max <= 0) + return 0; + + return hwloc__get_largest_objs_inside_cpuset (current, set, &objs, &max); +} + +const char * +hwloc_obj_type_string (hwloc_obj_type_t obj) +{ + switch (obj) + { + case HWLOC_OBJ_MACHINE: return "Machine"; + case HWLOC_OBJ_MISC: return "Misc"; + case HWLOC_OBJ_GROUP: return "Group"; + case HWLOC_OBJ_NUMANODE: return "NUMANode"; + case HWLOC_OBJ_PACKAGE: return "Package"; + case HWLOC_OBJ_L1CACHE: return "L1Cache"; + case HWLOC_OBJ_L2CACHE: return "L2Cache"; + case HWLOC_OBJ_L3CACHE: return "L3Cache"; + case HWLOC_OBJ_L4CACHE: return "L4Cache"; + case HWLOC_OBJ_L5CACHE: return "L5Cache"; + case HWLOC_OBJ_L1ICACHE: return "L1iCache"; + case HWLOC_OBJ_L2ICACHE: return "L2iCache"; + case HWLOC_OBJ_L3ICACHE: return "L3iCache"; + case HWLOC_OBJ_CORE: return "Core"; + case HWLOC_OBJ_BRIDGE: return "Bridge"; + case HWLOC_OBJ_PCI_DEVICE: return "PCIDev"; + case HWLOC_OBJ_OS_DEVICE: return "OSDev"; + case HWLOC_OBJ_PU: return "PU"; + default: return "Unknown"; + } +} + +int +hwloc_type_sscanf(const char *string, hwloc_obj_type_t *typep, + union hwloc_obj_attr_u *attrp, size_t attrsize) +{ + hwloc_obj_type_t type = (hwloc_obj_type_t) -1; + unsigned depthattr = (unsigned) -1; + hwloc_obj_cache_type_t cachetypeattr = (hwloc_obj_cache_type_t) -1; /* unspecified */ + hwloc_obj_bridge_type_t ubtype = (hwloc_obj_bridge_type_t) -1; + hwloc_obj_osdev_type_t ostype = (hwloc_obj_osdev_type_t) -1; + char *end; + + /* never match the ending \0 since we want to match things like core:2 too. + * just use hwloc_strncasecmp() everywhere. + */ + + /* types without a custom depth */ + + /* osdev subtype first to avoid conflicts coproc/core etc */ + if (!hwloc_strncasecmp(string, "os", 2)) { + type = HWLOC_OBJ_OS_DEVICE; + } else if (!hwloc_strncasecmp(string, "bloc", 4)) { + type = HWLOC_OBJ_OS_DEVICE; + ostype = HWLOC_OBJ_OSDEV_BLOCK; + } else if (!hwloc_strncasecmp(string, "net", 3)) { + type = HWLOC_OBJ_OS_DEVICE; + ostype = HWLOC_OBJ_OSDEV_NETWORK; + } else if (!hwloc_strncasecmp(string, "openfab", 7)) { + type = HWLOC_OBJ_OS_DEVICE; + ostype = HWLOC_OBJ_OSDEV_OPENFABRICS; + } else if (!hwloc_strncasecmp(string, "dma", 3)) { + type = HWLOC_OBJ_OS_DEVICE; + ostype = HWLOC_OBJ_OSDEV_DMA; + } else if (!hwloc_strncasecmp(string, "gpu", 3)) { + type = HWLOC_OBJ_OS_DEVICE; + ostype = HWLOC_OBJ_OSDEV_GPU; + } else if (!hwloc_strncasecmp(string, "copro", 5) + || !hwloc_strncasecmp(string, "co-pro", 6)) { + type = HWLOC_OBJ_OS_DEVICE; + ostype = HWLOC_OBJ_OSDEV_COPROC; + + } else if (!hwloc_strncasecmp(string, "machine", 2)) { + type = HWLOC_OBJ_MACHINE; + } else if (!hwloc_strncasecmp(string, "node", 2) + || !hwloc_strncasecmp(string, "numa", 2)) { /* matches node and numanode */ + type = HWLOC_OBJ_NUMANODE; + } else if (!hwloc_strncasecmp(string, "package", 2) + || !hwloc_strncasecmp(string, "socket", 2)) { /* backward compat with v1.10 */ + type = HWLOC_OBJ_PACKAGE; + } else if (!hwloc_strncasecmp(string, "core", 2)) { + type = HWLOC_OBJ_CORE; + } else if (!hwloc_strncasecmp(string, "pu", 2)) { + type = HWLOC_OBJ_PU; + } else if (!hwloc_strncasecmp(string, "misc", 4)) { + type = HWLOC_OBJ_MISC; + + } else if (!hwloc_strncasecmp(string, "bridge", 4)) { + type = HWLOC_OBJ_BRIDGE; + } else if (!hwloc_strncasecmp(string, "hostbridge", 6)) { + type = HWLOC_OBJ_BRIDGE; + ubtype = HWLOC_OBJ_BRIDGE_HOST; + } else if (!hwloc_strncasecmp(string, "pcibridge", 5)) { + type = HWLOC_OBJ_BRIDGE; + ubtype = HWLOC_OBJ_BRIDGE_PCI; + + } else if (!hwloc_strncasecmp(string, "pci", 3)) { + type = HWLOC_OBJ_PCI_DEVICE; + + /* types with depthattr */ + } else if ((string[0] == 'l' || string[0] == 'L') && string[1] >= '0' && string[1] <= '9') { + depthattr = strtol(string+1, &end, 10); + if (*end == 'i') { + if (depthattr >= 1 && depthattr <= 3) { + type = HWLOC_OBJ_L1ICACHE + depthattr-1; + cachetypeattr = HWLOC_OBJ_CACHE_INSTRUCTION; + } else + return -1; + } else { + if (depthattr >= 1 && depthattr <= 5) { + type = HWLOC_OBJ_L1CACHE + depthattr-1; + cachetypeattr = *end == 'd' ? HWLOC_OBJ_CACHE_DATA : HWLOC_OBJ_CACHE_UNIFIED; + } else + return -1; + } + + } else if (!hwloc_strncasecmp(string, "group", 2)) { + size_t length; + type = HWLOC_OBJ_GROUP; + length = strcspn(string, "0123456789"); + if (length <= 5 && !hwloc_strncasecmp(string, "group", length) + && string[length] >= '0' && string[length] <= '9') { + depthattr = strtol(string+length, &end, 10); + } + + } else + return -1; + + *typep = type; + if (attrp) { + if (hwloc__obj_type_is_cache(type) && attrsize >= sizeof(attrp->cache)) { + attrp->cache.depth = depthattr; + attrp->cache.type = cachetypeattr; + } else if (type == HWLOC_OBJ_GROUP && attrsize >= sizeof(attrp->group)) { + attrp->group.depth = depthattr; + } else if (type == HWLOC_OBJ_BRIDGE && attrsize >= sizeof(attrp->bridge)) { + attrp->bridge.upstream_type = ubtype; + attrp->bridge.downstream_type = HWLOC_OBJ_BRIDGE_PCI; /* nothing else so far */ + } else if (type == HWLOC_OBJ_OS_DEVICE && attrsize >= sizeof(attrp->osdev)) { + attrp->osdev.type = ostype; + } + } + return 0; +} + +int +hwloc_type_sscanf_as_depth(const char *string, hwloc_obj_type_t *typep, + hwloc_topology_t topology, int *depthp) +{ + union hwloc_obj_attr_u attr; + hwloc_obj_type_t type; + int depth; + int err; + + err = hwloc_type_sscanf(string, &type, &attr, sizeof(attr)); + if (err < 0) + return err; + + depth = hwloc_get_type_depth(topology, type); + if (type == HWLOC_OBJ_GROUP + && depth == HWLOC_TYPE_DEPTH_MULTIPLE + && attr.group.depth != (unsigned)-1) { + unsigned l; + depth = HWLOC_TYPE_DEPTH_UNKNOWN; + for(l=0; lnb_levels; l++) { + if (topology->levels[l][0]->type == HWLOC_OBJ_GROUP + && topology->levels[l][0]->attr->group.depth == attr.group.depth) { + depth = (int)l; + break; + } + } + } + + if (typep) + *typep = type; + *depthp = depth; + return 0; +} + +static const char* hwloc_obj_cache_type_letter(hwloc_obj_cache_type_t type) +{ + switch (type) { + case HWLOC_OBJ_CACHE_UNIFIED: return ""; + case HWLOC_OBJ_CACHE_DATA: return "d"; + case HWLOC_OBJ_CACHE_INSTRUCTION: return "i"; + default: return "unknown"; + } +} + +int +hwloc_obj_type_snprintf(char * __hwloc_restrict string, size_t size, hwloc_obj_t obj, int verbose) +{ + hwloc_obj_type_t type = obj->type; + switch (type) { + case HWLOC_OBJ_MISC: + case HWLOC_OBJ_MACHINE: + case HWLOC_OBJ_NUMANODE: + case HWLOC_OBJ_PACKAGE: + case HWLOC_OBJ_CORE: + case HWLOC_OBJ_PU: + return hwloc_snprintf(string, size, "%s", hwloc_obj_type_string(type)); + case HWLOC_OBJ_L1CACHE: + case HWLOC_OBJ_L2CACHE: + case HWLOC_OBJ_L3CACHE: + case HWLOC_OBJ_L4CACHE: + case HWLOC_OBJ_L5CACHE: + case HWLOC_OBJ_L1ICACHE: + case HWLOC_OBJ_L2ICACHE: + case HWLOC_OBJ_L3ICACHE: + return hwloc_snprintf(string, size, "L%u%s%s", obj->attr->cache.depth, + hwloc_obj_cache_type_letter(obj->attr->cache.type), + verbose ? "Cache" : ""); + case HWLOC_OBJ_GROUP: + if (obj->attr->group.depth != (unsigned) -1) + return hwloc_snprintf(string, size, "%s%u", hwloc_obj_type_string(type), obj->attr->group.depth); + else + return hwloc_snprintf(string, size, "%s", hwloc_obj_type_string(type)); + case HWLOC_OBJ_BRIDGE: + return hwloc_snprintf(string, size, obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_PCI ? "PCIBridge" : "HostBridge"); + case HWLOC_OBJ_PCI_DEVICE: + return hwloc_snprintf(string, size, "PCI"); + case HWLOC_OBJ_OS_DEVICE: + switch (obj->attr->osdev.type) { + case HWLOC_OBJ_OSDEV_BLOCK: return hwloc_snprintf(string, size, "Block"); + case HWLOC_OBJ_OSDEV_NETWORK: return hwloc_snprintf(string, size, verbose ? "Network" : "Net"); + case HWLOC_OBJ_OSDEV_OPENFABRICS: return hwloc_snprintf(string, size, "OpenFabrics"); + case HWLOC_OBJ_OSDEV_DMA: return hwloc_snprintf(string, size, "DMA"); + case HWLOC_OBJ_OSDEV_GPU: return hwloc_snprintf(string, size, "GPU"); + case HWLOC_OBJ_OSDEV_COPROC: return hwloc_snprintf(string, size, verbose ? "Co-Processor" : "CoProc"); + default: + if (size > 0) + *string = '\0'; + return 0; + } + break; + default: + if (size > 0) + *string = '\0'; + return 0; + } +} + +int +hwloc_obj_attr_snprintf(char * __hwloc_restrict string, size_t size, hwloc_obj_t obj, const char * separator, int verbose) +{ + const char *prefix = ""; + char *tmp = string; + ssize_t tmplen = size; + int ret = 0; + int res; + + /* make sure we output at least an empty string */ + if (size) + *string = '\0'; + + /* print memory attributes */ + res = 0; + if (verbose) { + if (obj->type == HWLOC_OBJ_NUMANODE && obj->attr->numanode.local_memory) + res = hwloc_snprintf(tmp, tmplen, "%slocal=%lu%s%stotal=%lu%s", + prefix, + (unsigned long) hwloc_memory_size_printf_value(obj->attr->numanode.local_memory, verbose), + hwloc_memory_size_printf_unit(obj->attr->numanode.local_memory, verbose), + separator, + (unsigned long) hwloc_memory_size_printf_value(obj->total_memory, verbose), + hwloc_memory_size_printf_unit(obj->total_memory, verbose)); + else if (obj->total_memory) + res = hwloc_snprintf(tmp, tmplen, "%stotal=%lu%s", + prefix, + (unsigned long) hwloc_memory_size_printf_value(obj->total_memory, verbose), + hwloc_memory_size_printf_unit(obj->total_memory, verbose)); + } else { + if (obj->type == HWLOC_OBJ_NUMANODE && obj->attr->numanode.local_memory) + res = hwloc_snprintf(tmp, tmplen, "%s%lu%s", + prefix, + (unsigned long) hwloc_memory_size_printf_value(obj->attr->numanode.local_memory, verbose), + hwloc_memory_size_printf_unit(obj->attr->numanode.local_memory, verbose)); + } + if (res < 0) + return -1; + ret += res; + if (ret > 0) + prefix = separator; + if (res >= tmplen) + res = tmplen>0 ? (int)tmplen - 1 : 0; + tmp += res; + tmplen -= res; + + /* printf type-specific attributes */ + res = 0; + switch (obj->type) { + case HWLOC_OBJ_L1CACHE: + case HWLOC_OBJ_L2CACHE: + case HWLOC_OBJ_L3CACHE: + case HWLOC_OBJ_L4CACHE: + case HWLOC_OBJ_L5CACHE: + case HWLOC_OBJ_L1ICACHE: + case HWLOC_OBJ_L2ICACHE: + case HWLOC_OBJ_L3ICACHE: + if (verbose) { + char assoc[32]; + if (obj->attr->cache.associativity == -1) + snprintf(assoc, sizeof(assoc), "%sfully-associative", separator); + else if (obj->attr->cache.associativity == 0) + *assoc = '\0'; + else + snprintf(assoc, sizeof(assoc), "%sways=%d", separator, obj->attr->cache.associativity); + res = hwloc_snprintf(tmp, tmplen, "%ssize=%lu%s%slinesize=%u%s", + prefix, + (unsigned long) hwloc_memory_size_printf_value(obj->attr->cache.size, verbose), + hwloc_memory_size_printf_unit(obj->attr->cache.size, verbose), + separator, obj->attr->cache.linesize, + assoc); + } else + res = hwloc_snprintf(tmp, tmplen, "%s%lu%s", + prefix, + (unsigned long) hwloc_memory_size_printf_value(obj->attr->cache.size, verbose), + hwloc_memory_size_printf_unit(obj->attr->cache.size, verbose)); + break; + case HWLOC_OBJ_BRIDGE: + if (verbose) { + char up[128], down[64]; + /* upstream is PCI or HOST */ + if (obj->attr->bridge.upstream_type == HWLOC_OBJ_BRIDGE_PCI) { + char linkspeed[64]= ""; + if (obj->attr->pcidev.linkspeed) + snprintf(linkspeed, sizeof(linkspeed), "%slink=%.2fGB/s", separator, obj->attr->pcidev.linkspeed); + snprintf(up, sizeof(up), "busid=%04x:%02x:%02x.%01x%sid=%04x:%04x%sclass=%04x(%s)%s", + obj->attr->pcidev.domain, obj->attr->pcidev.bus, obj->attr->pcidev.dev, obj->attr->pcidev.func, separator, + obj->attr->pcidev.vendor_id, obj->attr->pcidev.device_id, separator, + obj->attr->pcidev.class_id, hwloc_pci_class_string(obj->attr->pcidev.class_id), linkspeed); + } else + *up = '\0'; + /* downstream is_PCI */ + snprintf(down, sizeof(down), "buses=%04x:[%02x-%02x]", + obj->attr->bridge.downstream.pci.domain, obj->attr->bridge.downstream.pci.secondary_bus, obj->attr->bridge.downstream.pci.subordinate_bus); + if (*up) + res = hwloc_snprintf(string, size, "%s%s%s", up, separator, down); + else + res = hwloc_snprintf(string, size, "%s", down); + } + break; + case HWLOC_OBJ_PCI_DEVICE: + if (verbose) { + char linkspeed[64]= ""; + if (obj->attr->pcidev.linkspeed) + snprintf(linkspeed, sizeof(linkspeed), "%slink=%.2fGB/s", separator, obj->attr->pcidev.linkspeed); + res = hwloc_snprintf(string, size, "busid=%04x:%02x:%02x.%01x%sid=%04x:%04x%sclass=%04x(%s)%s", + obj->attr->pcidev.domain, obj->attr->pcidev.bus, obj->attr->pcidev.dev, obj->attr->pcidev.func, separator, + obj->attr->pcidev.vendor_id, obj->attr->pcidev.device_id, separator, + obj->attr->pcidev.class_id, hwloc_pci_class_string(obj->attr->pcidev.class_id), linkspeed); + } + break; + default: + break; + } + if (res < 0) + return -1; + ret += res; + if (ret > 0) + prefix = separator; + if (res >= tmplen) + res = tmplen>0 ? (int)tmplen - 1 : 0; + tmp += res; + tmplen -= res; + + /* printf infos */ + if (verbose) { + unsigned i; + for(i=0; iinfos_count; i++) { + struct hwloc_info_s *info = &obj->infos[i]; + const char *quote = strchr(info->value, ' ') ? "\"" : ""; + res = hwloc_snprintf(tmp, tmplen, "%s%s=%s%s%s", + prefix, + info->name, + quote, info->value, quote); + if (res < 0) + return -1; + ret += res; + if (res >= tmplen) + res = tmplen>0 ? (int)tmplen - 1 : 0; + tmp += res; + tmplen -= res; + if (ret > 0) + prefix = separator; + } + } + + return ret; +} diff --git a/src/backend/cpu/cpu.cmake b/src/backend/cpu/cpu.cmake index b685d7e4..1072df08 100644 --- a/src/backend/cpu/cpu.cmake +++ b/src/backend/cpu/cpu.cmake @@ -19,12 +19,18 @@ set(SOURCES_BACKEND_CPU if (WITH_HWLOC) - find_package(HWLOC REQUIRED) + if (CMAKE_CXX_COMPILER_ID MATCHES MSVC) + add_subdirectory(src/3rdparty/hwloc) + include_directories(src/3rdparty/hwloc/include) + set(CPUID_LIB hwloc) + else() + find_package(HWLOC REQUIRED) + include_directories(${HWLOC_INCLUDE_DIR}) + set(CPUID_LIB ${HWLOC_LIBRARY}) + endif() set(WITH_LIBCPUID OFF) - include_directories(${HWLOC_INCLUDE_DIR}) - remove_definitions(/DXMRIG_FEATURE_LIBCPUID) add_definitions(/DXMRIG_FEATURE_HWLOC) @@ -32,7 +38,6 @@ if (WITH_HWLOC) add_definitions(/DXMRIG_HWLOC_DEBUG) endif() - set(CPUID_LIB "") set(SOURCES_CPUID src/backend/cpu/platform/BasicCpuInfo.cpp src/backend/cpu/platform/BasicCpuInfo.h From 2fb88f10b86c3884046f3318a89500116378ad8c Mon Sep 17 00:00:00 2001 From: XMRig Date: Sun, 28 Jul 2019 10:15:20 +0700 Subject: [PATCH 090/172] Fix hwloc deprecation warnings. --- src/crypto/common/VirtualMemory.cpp | 5 +++++ src/crypto/rx/Rx.cpp | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/src/crypto/common/VirtualMemory.cpp b/src/crypto/common/VirtualMemory.cpp index edacf0df..e3c70dfb 100644 --- a/src/crypto/common/VirtualMemory.cpp +++ b/src/crypto/common/VirtualMemory.cpp @@ -49,7 +49,12 @@ uint32_t xmrig::VirtualMemory::bindToNUMANode(int64_t affinity) const unsigned puId = static_cast(affinity); hwloc_obj_t pu = hwloc_get_pu_obj_by_os_index(topology, puId); + +# if HWLOC_API_VERSION >= 0x20000 + if (pu == nullptr || hwloc_set_membind(topology, pu->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_THREAD | HWLOC_MEMBIND_BYNODESET) < 0) { +# else if (pu == nullptr || hwloc_set_membind_nodeset(topology, pu->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_THREAD) < 0) { +# endif LOG_WARN("CPU #%02u warning: \"can't bind memory\"", puId); } diff --git a/src/crypto/rx/Rx.cpp b/src/crypto/rx/Rx.cpp index cc89fe34..5fc95006 100644 --- a/src/crypto/rx/Rx.cpp +++ b/src/crypto/rx/Rx.cpp @@ -91,7 +91,11 @@ public: hwloc_obj_t node = hwloc_get_numanode_obj_by_os_index(topology, nodeId); if (node) { if (HwlocCpuInfo::has(HwlocCpuInfo::SET_THISTHREAD_MEMBIND)) { +# if HWLOC_API_VERSION >= 0x20000 + hwloc_set_membind(topology, node->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_THREAD | HWLOC_MEMBIND_BYNODESET); +# else hwloc_set_membind_nodeset(topology, node->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_THREAD); +# endif } Platform::setThreadAffinity(static_cast(hwloc_bitmap_first(node->cpuset))); From bbcacbc1d463dbcfdf32be6c0b740898276206af Mon Sep 17 00:00:00 2001 From: XMRig Date: Sun, 28 Jul 2019 11:01:30 +0700 Subject: [PATCH 091/172] Added command line options "--randomx-init" and "--randomx-no-numa". --- src/base/kernel/interfaces/IConfig.h | 3 ++- src/core/config/Config.cpp | 3 +++ src/core/config/ConfigTransform.cpp | 9 +++++++++ src/core/config/Config_platform.h | 2 ++ src/core/config/usage.h | 5 +++++ 5 files changed, 21 insertions(+), 1 deletion(-) diff --git a/src/base/kernel/interfaces/IConfig.h b/src/base/kernel/interfaces/IConfig.h index 2697bf01..86b8067c 100644 --- a/src/base/kernel/interfaces/IConfig.h +++ b/src/base/kernel/interfaces/IConfig.h @@ -83,8 +83,9 @@ public: DryRunKey = 5000, HugePagesKey = 1009, ThreadsKey = 't', -// HardwareAESKey = 1011, AssemblyKey = 1015, + RandomXInitKey = 1022, + RandomXNumaKey = 1023, // xmrig amd OclPlatformKey = 1400, diff --git a/src/core/config/Config.cpp b/src/core/config/Config.cpp index 34221e35..44aa6029 100644 --- a/src/core/config/Config.cpp +++ b/src/core/config/Config.cpp @@ -41,7 +41,10 @@ namespace xmrig { static const char *kCPU = "cpu"; + +#ifdef XMRIG_ALGO_RANDOMX static const char *kRandomX = "randomx"; +#endif } diff --git a/src/core/config/ConfigTransform.cpp b/src/core/config/ConfigTransform.cpp index 38bb42a1..6d420739 100644 --- a/src/core/config/ConfigTransform.cpp +++ b/src/core/config/ConfigTransform.cpp @@ -36,6 +36,7 @@ static const char *kAffinity = "affinity"; static const char *kAsterisk = "*"; static const char *kCpu = "cpu"; static const char *kIntensity = "intensity"; +static const char *kRandomX = "randomx"; static inline uint64_t intensity(uint64_t av) @@ -165,6 +166,14 @@ void xmrig::ConfigTransform::transform(rapidjson::Document &doc, int key, const return set(doc, kCpu, "asm", arg); # endif +# ifdef XMRIG_ALGO_RANDOMX + case IConfig::RandomXInitKey: /* --randomx-init */ + return set(doc, kRandomX, "init", static_cast(strtol(arg, nullptr, 10))); + + case IConfig::RandomXNumaKey: /* --randomx-no-numa */ + return set(doc, kRandomX, "numa", false); +# endif + default: break; } diff --git a/src/core/config/Config_platform.h b/src/core/config/Config_platform.h index fdd15c96..b7415f4d 100644 --- a/src/core/config/Config_platform.h +++ b/src/core/config/Config_platform.h @@ -81,6 +81,8 @@ static const option options[] = { { "asm", 1, nullptr, IConfig::AssemblyKey }, { "daemon", 0, nullptr, IConfig::DaemonKey }, { "daemon-poll-interval", 1, nullptr, IConfig::DaemonPollKey }, + { "randomx-init", 1, nullptr, IConfig::RandomXInitKey }, + { "randomx-no-numa", 0, nullptr, IConfig::RandomXNumaKey }, { nullptr, 0, nullptr, 0 } }; diff --git a/src/core/config/usage.h b/src/core/config/usage.h index 2d0d5623..275a58f9 100644 --- a/src/core/config/usage.h +++ b/src/core/config/usage.h @@ -108,6 +108,11 @@ Options:\n\ --http-access-token=T access token for HTTP API\n\ --http-no-restricted enable full remote access to HTTP API (only if access token set)\n" #endif +#ifdef XMRIG_ALGO_RANDOMX +"\ + --randomx-init=N threads count to initialize RandomX dataset\n\ + --randomx-no-numa disable NUMA support for RandomX\n" +#endif "\ --dry-run test configuration and exit\n\ -h, --help display this help and exit\n\ From b915fa97f26b2836f4bdb8cdd1b45f92c7694cf6 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sun, 28 Jul 2019 11:42:36 +0700 Subject: [PATCH 092/172] Updated default config. --- src/config.json | 18 +++++++++++------- src/core/config/Config_default.h | 18 +++++++++++------- 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/src/config.json b/src/config.json index 57b2984b..af3bd32a 100644 --- a/src/config.json +++ b/src/config.json @@ -3,9 +3,20 @@ "id": null, "worker-id": null }, + "http": { + "enabled": false, + "host": "127.0.0.1", + "port": 0, + "access-token": null, + "restricted": true + }, "autosave": true, "background": false, "colors": true, + "randomx": { + "init": -1, + "numa": true + }, "cpu": { "enabled": true, "huge-pages": true, @@ -17,13 +28,6 @@ }, "donate-level": 5, "donate-over-proxy": 1, - "http": { - "enabled": false, - "host": "127.0.0.1", - "port": 0, - "access-token": null, - "restricted": true - }, "log-file": null, "pools": [ { diff --git a/src/core/config/Config_default.h b/src/core/config/Config_default.h index 06c29566..2f853446 100644 --- a/src/core/config/Config_default.h +++ b/src/core/config/Config_default.h @@ -37,9 +37,20 @@ R"===( "id": null, "worker-id": null }, + "http": { + "enabled": false, + "host": "127.0.0.1", + "port": 0, + "access-token": null, + "restricted": true + }, "autosave": true, "background": false, "colors": true, + "randomx": { + "init": -1, + "numa": true + }, "cpu": { "enabled": true, "huge-pages": true, @@ -51,13 +62,6 @@ R"===( }, "donate-level": 5, "donate-over-proxy": 1, - "http": { - "enabled": false, - "host": "127.0.0.1", - "port": 0, - "access-token": null, - "restricted": true - }, "log-file": null, "pools": [ { From 25f051abaabc0da325f1ae31c36764ec25a41eb9 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sun, 28 Jul 2019 12:35:55 +0700 Subject: [PATCH 093/172] Enable hwloc by default. --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a1779f53..7600d556 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 2.8) project(xmrig) option(WITH_LIBCPUID "Use Libcpuid" ON) -option(WITH_HWLOC "Use hwloc" OFF) +option(WITH_HWLOC "Use hwloc" ON) option(WITH_CN_LITE "CryptoNight-Lite support" ON) option(WITH_CN_HEAVY "CryptoNight-Heavy support" ON) option(WITH_CN_PICO "CryptoNight-Pico support" ON) From 30ed5b33c049a664bca6ec7f0127575d7b946c54 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sun, 28 Jul 2019 13:00:36 +0700 Subject: [PATCH 094/172] Fixed build without ASM. --- src/backend/cpu/Cpu.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/backend/cpu/Cpu.cpp b/src/backend/cpu/Cpu.cpp index 886ad15c..4d9effce 100644 --- a/src/backend/cpu/Cpu.cpp +++ b/src/backend/cpu/Cpu.cpp @@ -63,7 +63,6 @@ rapidjson::Value xmrig::Cpu::toJSON(rapidjson::Document &doc) cpu.AddMember("aes", i->hasAES(), allocator); cpu.AddMember("avx2", i->hasAVX2(), allocator); cpu.AddMember("x64", i->isX64(), allocator); - cpu.AddMember("assembly", StringRef(assembly.toString()), allocator); cpu.AddMember("l2", static_cast(i->L2()), allocator); cpu.AddMember("l3", static_cast(i->L3()), allocator); cpu.AddMember("cores", static_cast(i->cores()), allocator); @@ -72,6 +71,12 @@ rapidjson::Value xmrig::Cpu::toJSON(rapidjson::Document &doc) cpu.AddMember("nodes", static_cast(i->nodes()), allocator); cpu.AddMember("backend", StringRef(i->backend()), allocator); +# ifdef XMRIG_FEATURE_ASM + cpu.AddMember("assembly", StringRef(assembly.toString()), allocator); +# else + cpu.AddMember("assembly", "none", allocator); +# endif + return cpu; } From ef2454b0251515d562e602c2b8b05ab5ee49817a Mon Sep 17 00:00:00 2001 From: xmrig Date: Sun, 28 Jul 2019 13:30:17 +0700 Subject: [PATCH 095/172] Update CPU.md --- doc/CPU.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/doc/CPU.md b/doc/CPU.md index 82c0a752..b93651f4 100644 --- a/doc/CPU.md +++ b/doc/CPU.md @@ -58,3 +58,19 @@ Example below demonstrate all primary ideas of flexible profiles configuration: ### Intensity This option was known as `low_power_mode`, possible values is range from 1 to 5, for convinient if value 1 used, possible omit this option and specify CPU thread config by only one number: CPU affinity, instead of object. +## Shared options + +#### `enabled` +Enable (`true`) or disable (`false`) CPU backend, by default `true`. + +#### `huge-pages` +Enable (`true`) or disable (`false`) huge pages support, by default `true`. + +#### `hw-aes` +Force enable (`true`) or disable (`false`) hardware AES support. Default value `null` means miner autodetect this feature. Usually don't need change this option, this option useful for some rare cases when miner can't detect hardware AES, but it available. If you force enable this option, but your hardware not support it, miner will crash. + +#### `priority` +Mining threads priority, value from `1` (lowest priority) to `5` (highest possible priority). Default value `null` means miner don't change threads priority at all. + +#### `asm` +Enable/configure or disable ASM optimizations. Possible values: `true`, `false`, `"intel"`, `"ryzen"`, `"bulldozer"`. From ab02bd9847021e0b03146edfaf9d495b633942cc Mon Sep 17 00:00:00 2001 From: XMRig Date: Sun, 28 Jul 2019 14:42:29 +0700 Subject: [PATCH 096/172] Fixed miner freeze when switch between RandomX variants. --- src/backend/common/interfaces/IBackend.h | 1 + src/backend/cpu/CpuBackend.cpp | 9 +++++++++ src/backend/cpu/CpuBackend.h | 1 + src/core/Miner.cpp | 4 ++++ 4 files changed, 15 insertions(+) diff --git a/src/backend/common/interfaces/IBackend.h b/src/backend/common/interfaces/IBackend.h index e19e00ba..2ec8bf04 100644 --- a/src/backend/common/interfaces/IBackend.h +++ b/src/backend/common/interfaces/IBackend.h @@ -52,6 +52,7 @@ public: virtual const Hashrate *hashrate() const = 0; virtual const String &profileName() const = 0; virtual const String &type() const = 0; + virtual void prepare(const Job &nextJob) = 0; virtual void printHashrate(bool details) = 0; virtual void setJob(const Job &job) = 0; virtual void start(IWorker *worker) = 0; diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp index 3ab06b46..005ff1de 100644 --- a/src/backend/cpu/CpuBackend.cpp +++ b/src/backend/cpu/CpuBackend.cpp @@ -170,6 +170,15 @@ const xmrig::String &xmrig::CpuBackend::type() const } +void xmrig::CpuBackend::prepare(const Job &nextJob) +{ + if (nextJob.algorithm().family() == Algorithm::RANDOM_X && nextJob.algorithm() != d_ptr->algo) { + d_ptr->workers.stop(); + d_ptr->threads.clear(); + } +} + + void xmrig::CpuBackend::printHashrate(bool details) { if (!details || !hashrate()) { diff --git a/src/backend/cpu/CpuBackend.h b/src/backend/cpu/CpuBackend.h index 613e7cb6..9e71c247 100644 --- a/src/backend/cpu/CpuBackend.h +++ b/src/backend/cpu/CpuBackend.h @@ -49,6 +49,7 @@ protected: const Hashrate *hashrate() const override; const String &profileName() const override; const String &type() const override; + void prepare(const Job &nextJob) override; void printHashrate(bool details) override; void setJob(const Job &job) override; void start(IWorker *worker) override; diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index 9d977d12..e93036c1 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -352,6 +352,10 @@ void xmrig::Miner::setEnabled(bool enabled) void xmrig::Miner::setJob(const Job &job, bool donate) { + for (IBackend *backend : d_ptr->backends) { + backend->prepare(job); + } + uv_rwlock_wrlock(&d_ptr->rwlock); const uint8_t index = donate ? 1 : 0; From 28cb6f67e1a1e97e7062d95a1e2ad118a299dba9 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sun, 28 Jul 2019 16:15:11 +0700 Subject: [PATCH 097/172] Store highest hashrate separately for each algorithm. --- src/core/Miner.cpp | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index e93036c1..64bd54c8 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -193,7 +193,7 @@ public: total.PushBack(Hashrate::normalize(t[2]), allocator); hashrate.AddMember("total", total, allocator); - hashrate.AddMember("highest", Hashrate::normalize(maxHashrate), allocator); + hashrate.AddMember("highest", Hashrate::normalize(maxHashrate[algorithm]), allocator); if (version == 1) { hashrate.AddMember("threads", threads, allocator); @@ -217,12 +217,13 @@ public: # endif + Algorithm algorithm; Algorithms algorithms; bool active = false; bool enabled = true; Controller *controller; - double maxHashrate = 0.0; Job job; + mutable std::map maxHashrate; std::vector backends; String userJobId; Timer *timer = nullptr; @@ -318,10 +319,10 @@ void xmrig::Miner::printHashrate(bool details) } LOG_INFO(WHITE_BOLD("speed") " 10s/60s/15m " CYAN_BOLD("%s") CYAN(" %s %s ") CYAN_BOLD("H/s") " max " CYAN_BOLD("%s H/s"), - Hashrate::format(speed[0], num, sizeof(num) / 4), - Hashrate::format(speed[1], num + 8, sizeof(num) / 4), - Hashrate::format(speed[2], num + 8 * 2, sizeof(num) / 4 ), - Hashrate::format(d_ptr->maxHashrate, num + 8 * 3, sizeof(num) / 4) + Hashrate::format(speed[0], num, sizeof(num) / 4), + Hashrate::format(speed[1], num + 8, sizeof(num) / 4), + Hashrate::format(speed[2], num + 8 * 2, sizeof(num) / 4 ), + Hashrate::format(d_ptr->maxHashrate[d_ptr->algorithm], num + 8 * 3, sizeof(num) / 4) ); } @@ -352,6 +353,8 @@ void xmrig::Miner::setEnabled(bool enabled) void xmrig::Miner::setJob(const Job &job, bool donate) { + d_ptr->algorithm = job.algorithm(); + for (IBackend *backend : d_ptr->backends) { backend->prepare(job); } @@ -420,7 +423,7 @@ void xmrig::Miner::onTimer(const Timer *) } } - d_ptr->maxHashrate = std::max(d_ptr->maxHashrate, maxHashrate); + d_ptr->maxHashrate[d_ptr->algorithm] = std::max(d_ptr->maxHashrate[d_ptr->algorithm], maxHashrate); if ((d_ptr->ticks % (d_ptr->controller->config()->printTime() * 2)) == 0) { printHashrate(false); From bde89abc157314e58b523e9a20ece161e696e9d6 Mon Sep 17 00:00:00 2001 From: xmrig Date: Sun, 28 Jul 2019 17:46:39 +0700 Subject: [PATCH 098/172] Update CHANGELOG.md --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 11cc23d8..8a480f8d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# v2.99.2-beta +- [#1077](https://github.com/xmrig/xmrig/issues/1077) Added NUMA support via **hwloc**. +- Fixed miner freeze when switch between RandomX variants. +- Fixed dataset initialization speed on Linux if thread affinity was used. + # v2.99.1-beta - [#1072](https://github.com/xmrig/xmrig/issues/1072) Fixed RandomX `seed_hash` re-initialization. From 4f1dee14ba8e9da57492a35ae4dd7ba51a34cc64 Mon Sep 17 00:00:00 2001 From: XMRig Date: Mon, 29 Jul 2019 08:55:03 +0700 Subject: [PATCH 099/172] v2.99.2-beta --- src/version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/version.h b/src/version.h index d1dfc781..61621ad7 100644 --- a/src/version.h +++ b/src/version.h @@ -28,7 +28,7 @@ #define APP_ID "xmrig" #define APP_NAME "XMRig" #define APP_DESC "XMRig CPU miner" -#define APP_VERSION "2.99.2-evo" +#define APP_VERSION "2.99.2-beta" #define APP_DOMAIN "xmrig.com" #define APP_SITE "www.xmrig.com" #define APP_COPYRIGHT "Copyright (C) 2016-2019 xmrig.com" From c13c62bf48e94b7f1a41a6268394e0fe20a070e3 Mon Sep 17 00:00:00 2001 From: XMRig Date: Mon, 29 Jul 2019 12:51:19 +0700 Subject: [PATCH 100/172] v2.99.3-evo --- src/version.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/version.h b/src/version.h index 61621ad7..1d6f2591 100644 --- a/src/version.h +++ b/src/version.h @@ -28,7 +28,7 @@ #define APP_ID "xmrig" #define APP_NAME "XMRig" #define APP_DESC "XMRig CPU miner" -#define APP_VERSION "2.99.2-beta" +#define APP_VERSION "2.99.3-evo" #define APP_DOMAIN "xmrig.com" #define APP_SITE "www.xmrig.com" #define APP_COPYRIGHT "Copyright (C) 2016-2019 xmrig.com" @@ -36,7 +36,7 @@ #define APP_VER_MAJOR 2 #define APP_VER_MINOR 99 -#define APP_VER_PATCH 2 +#define APP_VER_PATCH 3 #ifdef _MSC_VER # if (_MSC_VER >= 1920) From 0ae1e5f1d41e71bd4d277b95b1284539cf06603e Mon Sep 17 00:00:00 2001 From: XMRig Date: Mon, 29 Jul 2019 15:06:46 +0700 Subject: [PATCH 101/172] Sync changes. --- src/api/Api.cpp | 14 ++++++++------ src/api/Api.h | 2 +- src/crypto/common/Algorithm.cpp | 15 +++++++++++++-- 3 files changed, 22 insertions(+), 9 deletions(-) diff --git a/src/api/Api.cpp b/src/api/Api.cpp index 9151aa7e..4c8e7323 100644 --- a/src/api/Api.cpp +++ b/src/api/Api.cpp @@ -40,7 +40,6 @@ #include "base/tools/Chrono.h" #include "core/config/Config.h" #include "core/Controller.h" -#include "crypto/common/Algorithm.h" #include "crypto/common/keccak.h" #include "version.h" @@ -54,8 +53,8 @@ xmrig::Api::Api(Base *base) : m_base(base), m_id(), m_workerId(), - m_httpd(nullptr), - m_timestamp(Chrono::steadyMSecs()) + m_timestamp(Chrono::currentMSecsSinceEpoch()), + m_httpd(nullptr) { base->addListener(this); @@ -120,7 +119,7 @@ void xmrig::Api::exec(IApiRequest &request) request.accept(); request.reply().AddMember("id", StringRef(m_id), allocator); request.reply().AddMember("worker_id", StringRef(m_workerId), allocator); - request.reply().AddMember("uptime", (Chrono::steadyMSecs() - m_timestamp) / 1000, allocator); + request.reply().AddMember("uptime", (Chrono::currentMSecsSinceEpoch() - m_timestamp) / 1000, allocator); Value features(kArrayType); # ifdef XMRIG_FEATURE_API @@ -135,6 +134,9 @@ void xmrig::Api::exec(IApiRequest &request) # ifdef XMRIG_FEATURE_LIBCPUID features.PushBack("cpuid", allocator); # endif +# ifdef XMRIG_FEATURE_HWLOC + features.PushBack("hwloc", allocator); +# endif # ifdef XMRIG_FEATURE_TLS features.PushBack("tls", allocator); # endif @@ -181,8 +183,8 @@ void xmrig::Api::genId(const String &id) memcpy(input + sizeof(uint16_t), interfaces[i].phys_addr, addrSize); memcpy(input + sizeof(uint16_t) + addrSize, APP_KIND, strlen(APP_KIND)); - xmrig::keccak(input, inSize, hash); - xmrig::Buffer::toHex(hash, 8, m_id); + keccak(input, inSize, hash); + Buffer::toHex(hash, 8, m_id); delete [] input; break; diff --git a/src/api/Api.h b/src/api/Api.h index f2ed3926..334609c9 100644 --- a/src/api/Api.h +++ b/src/api/Api.h @@ -69,9 +69,9 @@ private: Base *m_base; char m_id[32]; char m_workerId[128]; + const uint64_t m_timestamp; Httpd *m_httpd; std::vector m_listeners; - uint64_t m_timestamp; }; diff --git a/src/crypto/common/Algorithm.cpp b/src/crypto/common/Algorithm.cpp index 2c259d32..c7990052 100644 --- a/src/crypto/common/Algorithm.cpp +++ b/src/crypto/common/Algorithm.cpp @@ -32,7 +32,6 @@ #include "crypto/cn/CnAlgo.h" #include "crypto/common/Algorithm.h" -#include "crypto/rx/RxAlgo.h" #include "rapidjson/document.h" @@ -140,7 +139,19 @@ size_t xmrig::Algorithm::memory() const # ifdef XMRIG_ALGO_RANDOMX if (f == RANDOM_X) { - return RxAlgo::l3(m_id); + constexpr size_t oneMiB = 0x100000; + + switch (m_id) { + case RX_0: + case RX_LOKI: + return oneMiB * 2; + + case RX_WOW: + return oneMiB; + + default: + break; + } } # endif From 6b3b1c3fc412981e51892bafd62b3dc4c1f87e7d Mon Sep 17 00:00:00 2001 From: XMRig Date: Mon, 29 Jul 2019 19:24:53 +0700 Subject: [PATCH 102/172] #1082 Fixed hwloc autoconfig with AMD FX CPUs. --- src/backend/cpu/platform/HwlocCpuInfo.cpp | 28 +++++++++++++---------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/src/backend/cpu/platform/HwlocCpuInfo.cpp b/src/backend/cpu/platform/HwlocCpuInfo.cpp index eee59a3a..48a17f23 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.cpp +++ b/src/backend/cpu/platform/HwlocCpuInfo.cpp @@ -84,6 +84,15 @@ static inline void findByType(hwloc_obj_t obj, hwloc_obj_type_t type, func lambd } +static inline std::vector findByType(hwloc_obj_t obj, hwloc_obj_type_t type) +{ + std::vector out; + findByType(obj, type, [&out](hwloc_obj_t found) { out.emplace_back(found); }); + + return out; +} + + static inline size_t countByType(hwloc_topology_t topology, hwloc_obj_type_t type) { const int count = hwloc_get_nbobjs_by_type(topology, type); @@ -132,8 +141,7 @@ xmrig::HwlocCpuInfo::HwlocCpuInfo() : BasicCpuInfo(), } # endif - std::vector packages; - findByType(hwloc_get_root_obj(m_topology), HWLOC_OBJ_PACKAGE, [&packages](hwloc_obj_t found) { packages.emplace_back(found); }); + const std::vector packages = findByType(hwloc_get_root_obj(m_topology), HWLOC_OBJ_PACKAGE); if (packages.size()) { const char *value = hwloc_obj_get_info_by_name(packages[0], "CPUModel"); if (value) { @@ -249,14 +257,9 @@ void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorith if (cacheHashes >= PUs) { for (hwloc_obj_t core : cores) { - if (core->arity == 0) { - continue; - } - - for (unsigned i = 0; i < core->arity; ++i) { - if (core->children[i]->type == HWLOC_OBJ_PU) { - threads.push_back(CpuThread(1, core->children[i]->os_index)); - } + const std::vector units = findByType(core, HWLOC_OBJ_PU); + for (hwloc_obj_t pu : units) { + threads.push_back(CpuThread(1, pu->os_index)); } } @@ -268,7 +271,8 @@ void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorith bool allocated_pu = false; for (hwloc_obj_t core : cores) { - if (core->arity <= pu_id || core->children[pu_id]->type != HWLOC_OBJ_PU) { + const std::vector units = findByType(core, HWLOC_OBJ_PU); + if (units.size() <= pu_id) { continue; } @@ -276,7 +280,7 @@ void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorith PUs--; allocated_pu = true; - threads.push_back(CpuThread(1, core->children[pu_id]->os_index)); + threads.push_back(CpuThread(1, units[pu_id]->os_index)); if (cacheHashes == 0) { break; From bd739d217b9be4aacca1a4d5537ba6d60a57992c Mon Sep 17 00:00:00 2001 From: XMRig Date: Tue, 30 Jul 2019 09:06:50 +0700 Subject: [PATCH 103/172] Added command line option --export-topology. --- src/base/kernel/Entry.cpp | 46 ++++++++++++++++++++++++++++++++++++++- src/base/kernel/Entry.h | 3 ++- src/core/config/usage.h | 4 ++++ 3 files changed, 51 insertions(+), 2 deletions(-) diff --git a/src/base/kernel/Entry.cpp b/src/base/kernel/Entry.cpp index da225e40..f9e97c2d 100644 --- a/src/base/kernel/Entry.cpp +++ b/src/base/kernel/Entry.cpp @@ -41,6 +41,9 @@ #include "version.h" +namespace xmrig { + + static int showVersion() { printf(APP_NAME " " APP_VERSION "\n built on " __DATE__ @@ -92,6 +95,36 @@ static int showVersion() } +#ifdef XMRIG_FEATURE_HWLOC +static int exportTopology(const Process &process) +{ + const String path = process.location(Process::ExeLocation, "topology.xml"); + + hwloc_topology_t topology; + hwloc_topology_init(&topology); + hwloc_topology_load(topology); + +# if HWLOC_API_VERSION >= 0x20000 + if (hwloc_topology_export_xml(topology, path, 0) == -1) { +# else + if (hwloc_topology_export_xml(topology, path) == -1) { +# endif + printf("failed to export hwloc topology.\n"); + } + else { + printf("hwloc topology successfully exported to \"%s\"\n", path.data()); + } + + hwloc_topology_destroy(topology); + + return 0; +} +#endif + + +} // namespace xmrig + + xmrig::Entry::Id xmrig::Entry::get(const Process &process) { const Arguments &args = process.arguments(); @@ -103,11 +136,17 @@ xmrig::Entry::Id xmrig::Entry::get(const Process &process) return Version; } +# ifdef XMRIG_FEATURE_HWLOC + if (args.hasArg("--export-topology")) { + return Topo; + } +# endif + return Default; } -int xmrig::Entry::exec(const Process &, Id id) +int xmrig::Entry::exec(const Process &process, Id id) { switch (id) { case Usage: @@ -117,6 +156,11 @@ int xmrig::Entry::exec(const Process &, Id id) case Version: return showVersion(); +# ifdef XMRIG_FEATURE_HWLOC + case Topo: + return exportTopology(process); +# endif + default: break; } diff --git a/src/base/kernel/Entry.h b/src/base/kernel/Entry.h index 0208ecdb..c0bde080 100644 --- a/src/base/kernel/Entry.h +++ b/src/base/kernel/Entry.h @@ -38,7 +38,8 @@ public: enum Id { Default, Usage, - Version + Version, + Topo }; static Id get(const Process &process); diff --git a/src/core/config/usage.h b/src/core/config/usage.h index 275a58f9..b41ec6db 100644 --- a/src/core/config/usage.h +++ b/src/core/config/usage.h @@ -113,6 +113,10 @@ Options:\n\ --randomx-init=N threads count to initialize RandomX dataset\n\ --randomx-no-numa disable NUMA support for RandomX\n" #endif +#ifdef XMRIG_FEATURE_HWLOC +"\ + --export-topology export hwloc topology to a XML file and exit\n" +#endif "\ --dry-run test configuration and exit\n\ -h, --help display this help and exit\n\ From aabf183462d4a19c1dbb93447f54afea31d2a2cc Mon Sep 17 00:00:00 2001 From: XMRig Date: Tue, 30 Jul 2019 09:17:54 +0700 Subject: [PATCH 104/172] Added fallback to basic auto configuration. --- src/backend/cpu/platform/HwlocCpuInfo.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/backend/cpu/platform/HwlocCpuInfo.cpp b/src/backend/cpu/platform/HwlocCpuInfo.cpp index 48a17f23..98f55989 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.cpp +++ b/src/backend/cpu/platform/HwlocCpuInfo.cpp @@ -33,6 +33,7 @@ #include "backend/cpu/platform/HwlocCpuInfo.h" +#include "base/io/log/Log.h" namespace xmrig { @@ -201,6 +202,12 @@ xmrig::CpuThreads xmrig::HwlocCpuInfo::threads(const Algorithm &algorithm) const processTopLevelCache(cache, algorithm, threads); } + if (threads.empty()) { + LOG_WARN("hwloc auto configuration for algorithm \"%s\" failed.", algorithm.shortName()); + + return BasicCpuInfo::threads(algorithm); + } + return threads; } From 5a88ed7eadcf732eaf5b322b98be6a6e140550f5 Mon Sep 17 00:00:00 2001 From: xmrig Date: Tue, 30 Jul 2019 09:25:39 +0700 Subject: [PATCH 105/172] Update CHANGELOG.md --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a480f8d..c1cab4a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# v2.99.3-beta +- [#1082](https://github.com/xmrig/xmrig/issues/1082) Fixed hwloc auto configuration on AMD FX CPUs. +- Added command line option `--export-topology` for export hwloc topology to a XML file. + # v2.99.2-beta - [#1077](https://github.com/xmrig/xmrig/issues/1077) Added NUMA support via **hwloc**. - Fixed miner freeze when switch between RandomX variants. From 5351288fbca44bbbbfe7f2654134ab7bdf93f9aa Mon Sep 17 00:00:00 2001 From: XMRig Date: Tue, 30 Jul 2019 09:28:10 +0700 Subject: [PATCH 106/172] v2.99.3-beta --- src/version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/version.h b/src/version.h index 1d6f2591..38c467df 100644 --- a/src/version.h +++ b/src/version.h @@ -28,7 +28,7 @@ #define APP_ID "xmrig" #define APP_NAME "XMRig" #define APP_DESC "XMRig CPU miner" -#define APP_VERSION "2.99.3-evo" +#define APP_VERSION "2.99.3-beta" #define APP_DOMAIN "xmrig.com" #define APP_SITE "www.xmrig.com" #define APP_COPYRIGHT "Copyright (C) 2016-2019 xmrig.com" From 1c7ca3a0a77fb1f7d227e1c6eee1a0a30f127afe Mon Sep 17 00:00:00 2001 From: XMRig Date: Tue, 30 Jul 2019 19:40:03 +0700 Subject: [PATCH 107/172] v2.99.4-evo --- src/version.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/version.h b/src/version.h index 38c467df..350814be 100644 --- a/src/version.h +++ b/src/version.h @@ -28,7 +28,7 @@ #define APP_ID "xmrig" #define APP_NAME "XMRig" #define APP_DESC "XMRig CPU miner" -#define APP_VERSION "2.99.3-beta" +#define APP_VERSION "2.99.4-evo" #define APP_DOMAIN "xmrig.com" #define APP_SITE "www.xmrig.com" #define APP_COPYRIGHT "Copyright (C) 2016-2019 xmrig.com" @@ -36,7 +36,7 @@ #define APP_VER_MAJOR 2 #define APP_VER_MINOR 99 -#define APP_VER_PATCH 3 +#define APP_VER_PATCH 4 #ifdef _MSC_VER # if (_MSC_VER >= 1920) From 962f0cdd8ee5fe8ba287b47d4b491fca55776828 Mon Sep 17 00:00:00 2001 From: XMRig Date: Tue, 30 Jul 2019 21:25:27 +0700 Subject: [PATCH 108/172] Use std::thread and std:mutex instead of uv_thread_t and uv_mutex_t. --- src/backend/common/Thread.h | 8 ++-- src/core/Miner.cpp | 8 ++++ src/crypto/rx/Rx.cpp | 78 ++++++++++++++++--------------------- src/crypto/rx/Rx.h | 3 +- 4 files changed, 47 insertions(+), 50 deletions(-) diff --git a/src/backend/common/Thread.h b/src/backend/common/Thread.h index 36367ece..b7165915 100644 --- a/src/backend/common/Thread.h +++ b/src/backend/common/Thread.h @@ -26,7 +26,7 @@ #define XMRIG_THREAD_H -#include +#include #include "backend/common/interfaces/IWorker.h" @@ -43,21 +43,21 @@ class Thread { public: inline Thread(IBackend *backend, size_t index, const T &config) : m_index(index), m_config(config), m_backend(backend) {} - inline ~Thread() { uv_thread_join(&m_thread); delete m_worker; } + inline ~Thread() { m_thread.join(); delete m_worker; } inline const T &config() const { return m_config; } inline IBackend *backend() const { return m_backend; } inline IWorker *worker() const { return m_worker; } inline size_t index() const { return m_index; } inline void setWorker(IWorker *worker) { m_worker = worker; } - inline void start(void (*callback) (void *)) { uv_thread_create(&m_thread, callback, this); } + inline void start(void (*callback) (void *)) { m_thread = std::thread(callback, this); } private: const size_t m_index = 0; const T m_config; IBackend *m_backend; IWorker *m_worker = nullptr; - uv_thread_t m_thread; + std::thread m_thread; }; diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index 64bd54c8..1bca9fc8 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -59,6 +59,10 @@ public: inline MinerPrivate(Controller *controller) : controller(controller) { uv_rwlock_init(&rwlock); + +# ifdef XMRIG_ALGO_RANDOMX + Rx::init(); +# endif } @@ -71,6 +75,10 @@ public: for (IBackend *backend : backends) { delete backend; } + +# ifdef XMRIG_ALGO_RANDOMX + Rx::destroy(); +# endif } diff --git a/src/crypto/rx/Rx.cpp b/src/crypto/rx/Rx.cpp index 5fc95006..676eca2c 100644 --- a/src/crypto/rx/Rx.cpp +++ b/src/crypto/rx/Rx.cpp @@ -26,8 +26,8 @@ #include +#include #include -#include #ifdef XMRIG_FEATURE_HWLOC @@ -50,7 +50,11 @@ namespace xmrig { +class RxPrivate; + + static const char *tag = BLUE_BG(WHITE_BOLD_S " rx ") " "; +static RxPrivate *d_ptr = nullptr; class RxPrivate @@ -58,7 +62,6 @@ class RxPrivate public: inline RxPrivate() { - uv_mutex_init(&mutex); } @@ -69,21 +72,15 @@ public: } datasets.clear(); - - uv_mutex_destroy(&mutex); } - inline void lock() { uv_mutex_lock(&mutex); } - inline void unlock() { uv_mutex_unlock(&mutex); } - - - static void allocate(RxPrivate *self, uint32_t nodeId) + static void allocate(uint32_t nodeId) { const uint64_t ts = Chrono::steadyMSecs(); # ifdef XMRIG_FEATURE_HWLOC - if (self->numa) { + if (d_ptr->numa) { hwloc_topology_t topology; hwloc_topology_init(&topology); hwloc_topology_load(topology); @@ -113,8 +110,8 @@ public: RxCache::size() / 1024 / 1024 ); - RxDataset *dataset = new RxDataset(self->hugePages); - self->datasets[nodeId] = dataset; + RxDataset *dataset = new RxDataset(d_ptr->hugePages); + d_ptr->datasets[nodeId] = dataset; if (dataset->get() != nullptr) { const auto hugePages = dataset->hugePages(); @@ -140,40 +137,33 @@ public: bool hugePages = true; bool numa = true; std::map datasets; - uv_mutex_t mutex; + std::mutex mutex; }; -static RxPrivate *d_ptr = new RxPrivate(); - - } // namespace xmrig bool xmrig::Rx::isReady(const Job &job, uint32_t nodeId) { - d_ptr->lock(); - const bool rc = isReady(job.seedHash(), job.algorithm(), d_ptr->numa ? nodeId : 0); - d_ptr->unlock(); + std::lock_guard lock(d_ptr->mutex); - return rc; + return isReady(job.seedHash(), job.algorithm(), d_ptr->numa ? nodeId : 0); } xmrig::RxDataset *xmrig::Rx::dataset(uint32_t nodeId) { - d_ptr->lock(); - RxDataset *dataset = d_ptr->datasets[d_ptr->numa ? nodeId : 0]; - d_ptr->unlock(); + std::lock_guard lock(d_ptr->mutex); - return dataset; + return d_ptr->datasets[d_ptr->numa ? nodeId : 0]; } std::pair xmrig::Rx::hugePages() { std::pair pages(0, 0); - d_ptr->lock(); + std::lock_guard lock(d_ptr->mutex); for (auto const &item : d_ptr->datasets) { if (!item.second) { @@ -185,19 +175,31 @@ std::pair xmrig::Rx::hugePages() pages.second += p.second; } - d_ptr->unlock(); - return pages; } +void xmrig::Rx::destroy() +{ + delete d_ptr; + + d_ptr = nullptr; +} + + +void xmrig::Rx::init() +{ + d_ptr = new RxPrivate(); +} + + void xmrig::Rx::init(const Job &job, int initThreads, bool hugePages, bool numa) { if (job.algorithm().family() != Algorithm::RANDOM_X) { return; } - d_ptr->lock(); + std::lock_guard lock(d_ptr->mutex); size_t ready = 0; @@ -208,8 +210,6 @@ void xmrig::Rx::init(const Job &job, int initThreads, bool hugePages, bool numa) } if (!d_ptr->datasets.empty() && ready == d_ptr->datasets.size()) { - d_ptr->unlock(); - return; } @@ -231,16 +231,6 @@ void xmrig::Rx::init(const Job &job, int initThreads, bool hugePages, bool numa) std::thread thread(initDataset, 0, job.seedHash(), job.algorithm(), threads); thread.detach(); } - - d_ptr->unlock(); -} - - -void xmrig::Rx::stop() -{ - delete d_ptr; - - d_ptr = nullptr; } @@ -252,19 +242,19 @@ bool xmrig::Rx::isReady(const uint8_t *seed, const Algorithm &algorithm, uint32_ void xmrig::Rx::initDataset(uint32_t nodeId, const uint8_t *seed, const Algorithm &algorithm, uint32_t threads) { - d_ptr->lock(); + std::lock_guard lock(d_ptr->mutex); RxDataset *dataset = d_ptr->datasets[nodeId]; if (!dataset) { # ifdef XMRIG_FEATURE_HWLOC if (d_ptr->numa) { - std::thread thread(RxPrivate::allocate, d_ptr, nodeId); + std::thread thread(RxPrivate::allocate, nodeId); thread.join(); } else # endif { - RxPrivate::allocate(d_ptr, nodeId); + RxPrivate::allocate(nodeId); } dataset = d_ptr->datasets[nodeId]; @@ -295,6 +285,4 @@ void xmrig::Rx::initDataset(uint32_t nodeId, const uint8_t *seed, const Algorith LOG_INFO("%s" CYAN_BOLD("#%u") GREEN(" init done") BLACK_BOLD(" (%" PRIu64 " ms)"), tag, nodeId, Chrono::steadyMSecs() - ts); } - - d_ptr->unlock(); } diff --git a/src/crypto/rx/Rx.h b/src/crypto/rx/Rx.h index 43e4be1c..c484c3b4 100644 --- a/src/crypto/rx/Rx.h +++ b/src/crypto/rx/Rx.h @@ -47,8 +47,9 @@ public: static bool isReady(const Job &job, uint32_t nodeId); static RxDataset *dataset(uint32_t nodeId); static std::pair hugePages(); + static void destroy(); + static void init(); static void init(const Job &job, int initThreads, bool hugePages, bool numa); - static void stop(); private: static bool isReady(const uint8_t *seed, const Algorithm &algorithm, uint32_t nodeId); From c138161ee23c40d2758683fe409ecf9980accea4 Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 31 Jul 2019 09:29:09 +0700 Subject: [PATCH 109/172] Replacement for PR #1087 --- src/backend/cpu/platform/HwlocCpuInfo.cpp | 18 +++++++++++++++++- src/crypto/common/VirtualMemory.cpp | 4 ++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/src/backend/cpu/platform/HwlocCpuInfo.cpp b/src/backend/cpu/platform/HwlocCpuInfo.cpp index 98f55989..23ee554f 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.cpp +++ b/src/backend/cpu/platform/HwlocCpuInfo.cpp @@ -32,6 +32,12 @@ #include +#if HWLOC_API_VERSION < 0x00010b00 +# define HWLOC_OBJ_PACKAGE HWLOC_OBJ_SOCKET +# define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE +#endif + + #include "backend/cpu/platform/HwlocCpuInfo.h" #include "base/io/log/Log.h" @@ -152,7 +158,17 @@ xmrig::HwlocCpuInfo::HwlocCpuInfo() : BasicCpuInfo(), # endif hwloc_obj_t root = hwloc_get_root_obj(m_topology); - snprintf(m_backend, sizeof m_backend, "hwloc/%s", hwloc_obj_get_info_by_name(root, "hwlocVersion")); + +# if HWLOC_API_VERSION >= 0x00010b00 + const char *version = hwloc_obj_get_info_by_name(root, "hwlocVersion"); + if (version) { + snprintf(m_backend, sizeof m_backend, "hwloc/%s", version); + } + else +# endif + { + snprintf(m_backend, sizeof m_backend, "hwloc"); + } findCache(root, 2, 3, [this](hwloc_obj_t found) { this->m_cache[found->attr->cache.depth] += found->attr->cache.size; }); diff --git a/src/crypto/common/VirtualMemory.cpp b/src/crypto/common/VirtualMemory.cpp index e3c70dfb..5f7c4551 100644 --- a/src/crypto/common/VirtualMemory.cpp +++ b/src/crypto/common/VirtualMemory.cpp @@ -28,6 +28,10 @@ #ifdef XMRIG_FEATURE_HWLOC # include # include "backend/cpu/platform/HwlocCpuInfo.h" +# +# if HWLOC_API_VERSION < 0x00010b00 +# define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE +# endif #endif From ab0d3b8919b8b07c61e25bd868c8bee950fce00f Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 31 Jul 2019 09:38:27 +0700 Subject: [PATCH 110/172] Fixed ARM build with hwloc. --- src/backend/cpu/cpu.cmake | 4 +++- src/backend/cpu/platform/BasicCpuInfo_arm.cpp | 6 +++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/backend/cpu/cpu.cmake b/src/backend/cpu/cpu.cmake index 1072df08..2ae73db7 100644 --- a/src/backend/cpu/cpu.cmake +++ b/src/backend/cpu/cpu.cmake @@ -39,7 +39,6 @@ if (WITH_HWLOC) endif() set(SOURCES_CPUID - src/backend/cpu/platform/BasicCpuInfo.cpp src/backend/cpu/platform/BasicCpuInfo.h src/backend/cpu/platform/HwlocCpuInfo.cpp src/backend/cpu/platform/HwlocCpuInfo.h @@ -66,7 +65,10 @@ else() set(SOURCES_CPUID src/backend/cpu/platform/BasicCpuInfo.h ) +endif() + +if (NOT WITH_LIBCPUID) if (XMRIG_ARM) set(SOURCES_CPUID ${SOURCES_CPUID} src/backend/cpu/platform/BasicCpuInfo_arm.cpp) else() diff --git a/src/backend/cpu/platform/BasicCpuInfo_arm.cpp b/src/backend/cpu/platform/BasicCpuInfo_arm.cpp index 1e6c3cb7..b241e197 100644 --- a/src/backend/cpu/platform/BasicCpuInfo_arm.cpp +++ b/src/backend/cpu/platform/BasicCpuInfo_arm.cpp @@ -36,10 +36,10 @@ xmrig::BasicCpuInfo::BasicCpuInfo() : - m_aes(false), m_brand(), - m_avx2(false), - m_threads(std::thread::hardware_concurrency()) + m_threads(std::thread::hardware_concurrency()), + m_aes(false), + m_avx2(false) { # ifdef XMRIG_ARMv8 memcpy(m_brand, "ARMv8", 5); From 97453d986f818e8da28a21416b7c3bd066b5acb7 Mon Sep 17 00:00:00 2001 From: SChernykh Date: Wed, 31 Jul 2019 10:01:32 +0200 Subject: [PATCH 111/172] Fix Mac OS X compilation Thanks to @cjdelisle --- src/crypto/randomx/jit_compiler_x86_static.S | 24 ++++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/crypto/randomx/jit_compiler_x86_static.S b/src/crypto/randomx/jit_compiler_x86_static.S index 9cffaab5..67d2bdbc 100644 --- a/src/crypto/randomx/jit_compiler_x86_static.S +++ b/src/crypto/randomx/jit_compiler_x86_static.S @@ -68,9 +68,9 @@ DECL(randomx_program_prologue): #else #include "asm/program_prologue_linux.inc" #endif - movapd xmm13, xmmword ptr mantissaMask[rip] - movapd xmm14, xmmword ptr exp240[rip] - movapd xmm15, xmmword ptr scaleMask[rip] + movapd xmm13, xmmword ptr [mantissaMask+rip] + movapd xmm14, xmmword ptr [exp240+rip] + movapd xmm15, xmmword ptr [scaleMask+rip] jmp DECL(randomx_program_loop_begin) .balign 64 @@ -177,26 +177,26 @@ DECL(randomx_sshash_end): DECL(randomx_sshash_init): lea r8, [rbx+1] #include "asm/program_sshash_prefetch.inc" - imul r8, qword ptr r0_mul[rip] - mov r9, qword ptr r1_add[rip] + imul r8, qword ptr [r0_mul+rip] + mov r9, qword ptr [r1_add+rip] xor r9, r8 - mov r10, qword ptr r2_add[rip] + mov r10, qword ptr [r2_add+rip] xor r10, r8 - mov r11, qword ptr r3_add[rip] + mov r11, qword ptr [r3_add+rip] xor r11, r8 - mov r12, qword ptr r4_add[rip] + mov r12, qword ptr [r4_add+rip] xor r12, r8 - mov r13, qword ptr r5_add[rip] + mov r13, qword ptr [r5_add+rip] xor r13, r8 - mov r14, qword ptr r6_add[rip] + mov r14, qword ptr [r6_add+rip] xor r14, r8 - mov r15, qword ptr r7_add[rip] + mov r15, qword ptr [r7_add+rip] xor r15, r8 jmp DECL(randomx_program_end) .balign 64 #include "asm/program_sshash_constants.inc" - + .balign 64 DECL(randomx_program_end): nop From 84ff8af4bd4a6a12fc4af4664e94ba9617904bd4 Mon Sep 17 00:00:00 2001 From: XMRig Date: Thu, 1 Aug 2019 20:37:05 +0700 Subject: [PATCH 112/172] Optimized RandomX initialization and switching. --- src/backend/cpu/CpuWorker.cpp | 11 +- src/base/io/log/Log.cpp | 17 +-- src/base/tools/Buffer.h | 15 ++- src/core/Miner.cpp | 2 +- src/crypto/rx/Rx.cpp | 239 +++++++++++++++++++--------------- src/crypto/rx/Rx.h | 7 +- src/crypto/rx/RxCache.h | 3 +- src/crypto/rx/RxDataset.cpp | 16 +-- src/crypto/rx/RxDataset.h | 3 +- 9 files changed, 156 insertions(+), 157 deletions(-) diff --git a/src/backend/cpu/CpuWorker.cpp b/src/backend/cpu/CpuWorker.cpp index cd804199..000d7061 100644 --- a/src/backend/cpu/CpuWorker.cpp +++ b/src/backend/cpu/CpuWorker.cpp @@ -82,7 +82,9 @@ xmrig::CpuWorker::~CpuWorker() template void xmrig::CpuWorker::allocateRandomX_VM() { - while (!Rx::isReady(m_job.currentJob(), m_node)) { + RxDataset *dataset = Rx::dataset(m_job.currentJob(), m_node); + + while (dataset == nullptr) { std::this_thread::sleep_for(std::chrono::milliseconds(200)); if (Nonce::sequence(Nonce::CPU) == 0) { @@ -90,13 +92,6 @@ void xmrig::CpuWorker::allocateRandomX_VM() } } - RxDataset *dataset = Rx::dataset(m_node); - assert(dataset != nullptr); - - if (!dataset) { - return; - } - if (!m_vm) { m_vm = new RxVm(dataset, m_memory->scratchpad(), !m_hwAES); } diff --git a/src/base/io/log/Log.cpp b/src/base/io/log/Log.cpp index 22972a7e..4e3bd5a5 100644 --- a/src/base/io/log/Log.cpp +++ b/src/base/io/log/Log.cpp @@ -31,6 +31,7 @@ #include +#include #include #include #include @@ -69,14 +70,11 @@ public: inline LogPrivate() : m_buf() { - uv_mutex_init(&m_mutex); } inline ~LogPrivate() { - uv_mutex_destroy(&m_mutex); - for (ILogBackend *backend : m_backends) { delete backend; } @@ -91,13 +89,14 @@ public: size_t size = 0; size_t offset = 0; - lock(); + std::lock_guard lock(m_mutex); + timestamp(level, size, offset); color(level, size); const int rc = vsnprintf(m_buf + size, sizeof (m_buf) - offset - 32, fmt, args); if (rc < 0) { - return unlock(); + return; } size += std::min(static_cast(rc), sizeof (m_buf) - offset - 32); @@ -119,16 +118,10 @@ public: fputs(txt.c_str(), stdout); fflush(stdout); } - - unlock(); } private: - inline void lock() { uv_mutex_lock(&m_mutex); } - inline void unlock() { uv_mutex_unlock(&m_mutex); } - - inline void timestamp(Log::Level level, size_t &size, size_t &offset) { if (level == Log::NONE) { @@ -192,8 +185,8 @@ private: char m_buf[4096]; + std::mutex m_mutex; std::vector m_backends; - uv_mutex_t m_mutex; }; diff --git a/src/base/tools/Buffer.h b/src/base/tools/Buffer.h index 6b720357..28f92b9e 100644 --- a/src/base/tools/Buffer.h +++ b/src/base/tools/Buffer.h @@ -43,17 +43,20 @@ public: ~Buffer(); - inline char *data() { return m_data; } - inline const char *data() const { return m_data; } - inline size_t size() const { return m_size; } - inline void from(const Buffer &other) { from(other.data(), other.size()); } + inline bool isEqual(const Buffer &other) const { return m_size == other.m_size && (m_size == 0 || memcmp(m_data, other.m_data, m_size) == 0); } + inline char *data() { return m_data; } + inline const char *data() const { return m_data; } + inline size_t size() const { return m_size; } + inline void from(const Buffer &other) { from(other.data(), other.size()); } void from(const char *data, size_t size); - inline Buffer &operator=(const Buffer &other) { from(other); return *this; } - inline Buffer &operator=(Buffer &&other) { move(std::move(other)); return *this; } + inline bool operator!=(const Buffer &other) const { return !isEqual(other); } + inline bool operator==(const Buffer &other) const { return isEqual(other); } + inline Buffer &operator=(Buffer &&other) { move(std::move(other)); return *this; } + inline Buffer &operator=(const Buffer &other) { from(other); return *this; } static Buffer allocUnsafe(size_t size); diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index 1bca9fc8..ab4a8ef6 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -380,7 +380,7 @@ void xmrig::Miner::setJob(const Job &job, bool donate) } # ifdef XMRIG_ALGO_RANDOMX - Rx::init(job, + Rx::init(d_ptr->job, d_ptr->controller->config()->rx().threads(), d_ptr->controller->config()->cpu().isHugePages(), d_ptr->controller->config()->rx().isNUMA() diff --git a/src/crypto/rx/Rx.cpp b/src/crypto/rx/Rx.cpp index 676eca2c..5f5414c9 100644 --- a/src/crypto/rx/Rx.cpp +++ b/src/crypto/rx/Rx.cpp @@ -43,6 +43,7 @@ #include "base/tools/Buffer.h" #include "base/tools/Chrono.h" #include "crypto/rx/Rx.h" +#include "crypto/rx/RxAlgo.h" #include "crypto/rx/RxCache.h" #include "crypto/rx/RxDataset.h" @@ -57,11 +58,50 @@ static const char *tag = BLUE_BG(WHITE_BOLD_S " rx ") " "; static RxPrivate *d_ptr = nullptr; +#ifdef XMRIG_FEATURE_HWLOC +static void bindToNUMANode(uint32_t nodeId) +{ + hwloc_topology_t topology; + hwloc_topology_init(&topology); + hwloc_topology_load(topology); + + hwloc_obj_t node = hwloc_get_numanode_obj_by_os_index(topology, nodeId); + if (node) { + if (HwlocCpuInfo::has(HwlocCpuInfo::SET_THISTHREAD_MEMBIND)) { +# if HWLOC_API_VERSION >= 0x20000 + hwloc_set_membind(topology, node->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_THREAD | HWLOC_MEMBIND_BYNODESET); +# else + hwloc_set_membind_nodeset(topology, node->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_THREAD); +# endif + } + + Platform::setThreadAffinity(static_cast(hwloc_bitmap_first(node->cpuset))); + } + + hwloc_topology_destroy(topology); +} +#else +inline static void bindToNUMANode(uint32_t) {} +#endif + + class RxPrivate { public: - inline RxPrivate() + inline RxPrivate() : + m_seed() { +# ifdef XMRIG_FEATURE_HWLOC + if (Cpu::info()->nodes() > 1) { + for (uint32_t nodeId : HwlocCpuInfo::nodeIndexes()) { + datasets.insert({ nodeId, nullptr }); + } + } + else +# endif + { + datasets.insert({ 0, nullptr }); + } } @@ -75,32 +115,19 @@ public: } + inline bool isNUMA() const { return m_numa; } + inline const Algorithm &algorithm() const { return m_algorithm; } + inline const uint8_t *seed() const { return m_seed; } + inline size_t count() const { return isNUMA() ? datasets.size() : 1; } + + static void allocate(uint32_t nodeId) { const uint64_t ts = Chrono::steadyMSecs(); -# ifdef XMRIG_FEATURE_HWLOC - if (d_ptr->numa) { - hwloc_topology_t topology; - hwloc_topology_init(&topology); - hwloc_topology_load(topology); - - hwloc_obj_t node = hwloc_get_numanode_obj_by_os_index(topology, nodeId); - if (node) { - if (HwlocCpuInfo::has(HwlocCpuInfo::SET_THISTHREAD_MEMBIND)) { -# if HWLOC_API_VERSION >= 0x20000 - hwloc_set_membind(topology, node->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_THREAD | HWLOC_MEMBIND_BYNODESET); -# else - hwloc_set_membind_nodeset(topology, node->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_THREAD); -# endif - } - - Platform::setThreadAffinity(static_cast(hwloc_bitmap_first(node->cpuset))); - } - - hwloc_topology_destroy(topology); + if (d_ptr->isNUMA()) { + bindToNUMANode(nodeId); } -# endif LOG_INFO("%s" CYAN_BOLD("#%u") MAGENTA_BOLD(" allocate") CYAN_BOLD(" %zu MB") BLACK_BOLD(" (%zu+%zu) for RandomX dataset & cache"), tag, @@ -110,7 +137,7 @@ public: RxCache::size() / 1024 / 1024 ); - RxDataset *dataset = new RxDataset(d_ptr->hugePages); + RxDataset *dataset = new RxDataset(d_ptr->m_hugePages); d_ptr->datasets[nodeId] = dataset; if (dataset->get() != nullptr) { @@ -134,29 +161,84 @@ public: } - bool hugePages = true; - bool numa = true; + static void initDataset(uint32_t nodeId, uint32_t threads) + { + std::lock_guard lock(d_ptr->mutex); + + const uint64_t ts = Chrono::steadyMSecs(); + + d_ptr->getOrAllocate(nodeId)->init(d_ptr->seed(), threads); + d_ptr->m_ready++; + + LOG_INFO("%s" CYAN_BOLD("#%u") GREEN(" init done") BLACK_BOLD(" (%" PRIu64 " ms)"), tag, nodeId, Chrono::steadyMSecs() - ts); + } + + + inline RxDataset *getOrAllocate(uint32_t nodeId) + { + RxDataset *dataset = datasets.at(nodeId); + + if (dataset == nullptr) { + # ifdef XMRIG_FEATURE_HWLOC + if (d_ptr->isNUMA()) { + std::thread thread(allocate, nodeId); + thread.join(); + } else + # endif + { + allocate(nodeId); + } + + dataset = datasets.at(nodeId); + } + + return dataset; + } + + + inline void setState(const Job &job, bool hugePages, bool numa) + { + if (m_algorithm != job.algorithm()) { + m_algorithm = RxAlgo::apply(job.algorithm()); + } + + m_ready = 0; + m_numa = numa && Cpu::info()->nodes() > 1; + m_hugePages = hugePages; + + memcpy(m_seed, job.seedHash(), sizeof(m_seed)); + } + + + inline bool isReady(const Job &job) + { + return m_ready == count() && m_algorithm == job.algorithm() && memcmp(m_seed, job.seedHash(), sizeof(m_seed)) == 0; + } + + std::map datasets; std::mutex mutex; + +private: + bool m_hugePages = true; + bool m_numa = true; + Algorithm m_algorithm; + size_t m_ready = 0; + uint8_t m_seed[32]; }; } // namespace xmrig -bool xmrig::Rx::isReady(const Job &job, uint32_t nodeId) +xmrig::RxDataset *xmrig::Rx::dataset(const Job &job, uint32_t nodeId) { std::lock_guard lock(d_ptr->mutex); + if (!d_ptr->isReady(job)) { + return nullptr; + } - return isReady(job.seedHash(), job.algorithm(), d_ptr->numa ? nodeId : 0); -} - - -xmrig::RxDataset *xmrig::Rx::dataset(uint32_t nodeId) -{ - std::lock_guard lock(d_ptr->mutex); - - return d_ptr->datasets[d_ptr->numa ? nodeId : 0]; + return d_ptr->datasets.at(d_ptr->isNUMA() ? nodeId : 0); } @@ -201,88 +283,33 @@ void xmrig::Rx::init(const Job &job, int initThreads, bool hugePages, bool numa) std::lock_guard lock(d_ptr->mutex); - size_t ready = 0; - - for (auto const &item : d_ptr->datasets) { - if (isReady(job.seedHash(), job.algorithm(), item.first)) { - ready++; - } - } - - if (!d_ptr->datasets.empty() && ready == d_ptr->datasets.size()) { + if (d_ptr->isReady(job)) { return; } - d_ptr->hugePages = hugePages; - d_ptr->numa = numa && Cpu::info()->nodes() > 1; - const uint32_t threads = initThreads < 1 ? static_cast(Cpu::info()->threads()) - : static_cast(initThreads); + d_ptr->setState(job, hugePages, numa); + const uint32_t threads = initThreads < 1 ? static_cast(Cpu::info()->threads()) : static_cast(initThreads); + const String buf = Buffer::toHex(job.seedHash(), 8); + + LOG_INFO("%s" MAGENTA_BOLD("init dataset%s") " algo " WHITE_BOLD("%s (") CYAN_BOLD("%u") WHITE_BOLD(" threads)") BLACK_BOLD(" seed %s..."), + tag, + d_ptr->count() > 1 ? "s" : "", + job.algorithm().shortName(), + threads, + buf.data() + ); # ifdef XMRIG_FEATURE_HWLOC - if (d_ptr->numa) { - for (uint32_t nodeId : HwlocCpuInfo::nodeIndexes()) { - std::thread thread(initDataset, nodeId, job.seedHash(), job.algorithm(), threads); + if (d_ptr->isNUMA()) { + for (auto const &item : d_ptr->datasets) { + std::thread thread(RxPrivate::initDataset, item.first, threads); thread.detach(); } } else # endif { - std::thread thread(initDataset, 0, job.seedHash(), job.algorithm(), threads); + std::thread thread(RxPrivate::initDataset, 0, threads); thread.detach(); } } - - -bool xmrig::Rx::isReady(const uint8_t *seed, const Algorithm &algorithm, uint32_t nodeId) -{ - return !d_ptr->datasets.empty() && d_ptr->datasets[nodeId] != nullptr && d_ptr->datasets[nodeId]->isReady(seed, algorithm); -} - - -void xmrig::Rx::initDataset(uint32_t nodeId, const uint8_t *seed, const Algorithm &algorithm, uint32_t threads) -{ - std::lock_guard lock(d_ptr->mutex); - - RxDataset *dataset = d_ptr->datasets[nodeId]; - - if (!dataset) { -# ifdef XMRIG_FEATURE_HWLOC - if (d_ptr->numa) { - std::thread thread(RxPrivate::allocate, nodeId); - thread.join(); - } else -# endif - { - RxPrivate::allocate(nodeId); - } - - dataset = d_ptr->datasets[nodeId]; - } - - if (!dataset->isReady(seed, algorithm)) { - const uint64_t ts = Chrono::steadyMSecs(); - - if (dataset->get() != nullptr) { - LOG_INFO("%s" CYAN_BOLD("#%u") MAGENTA_BOLD(" init dataset") " algo " WHITE_BOLD("%s (") CYAN_BOLD("%u") WHITE_BOLD(" threads)") BLACK_BOLD(" seed %s..."), - tag, - nodeId, - algorithm.shortName(), - threads, - Buffer::toHex(seed, 8).data() - ); - } - else { - LOG_INFO("%s" CYAN_BOLD("#%u") MAGENTA_BOLD(" init cache") " algo " WHITE_BOLD("%s") BLACK_BOLD(" seed %s..."), - tag, - nodeId, - algorithm.shortName(), - Buffer::toHex(seed, 8).data() - ); - } - - dataset->init(seed, algorithm, threads); - - LOG_INFO("%s" CYAN_BOLD("#%u") GREEN(" init done") BLACK_BOLD(" (%" PRIu64 " ms)"), tag, nodeId, Chrono::steadyMSecs() - ts); - } -} diff --git a/src/crypto/rx/Rx.h b/src/crypto/rx/Rx.h index c484c3b4..1ba6397e 100644 --- a/src/crypto/rx/Rx.h +++ b/src/crypto/rx/Rx.h @@ -44,16 +44,11 @@ class Job; class Rx { public: - static bool isReady(const Job &job, uint32_t nodeId); - static RxDataset *dataset(uint32_t nodeId); + static RxDataset *dataset(const Job &job, uint32_t nodeId); static std::pair hugePages(); static void destroy(); static void init(); static void init(const Job &job, int initThreads, bool hugePages, bool numa); - -private: - static bool isReady(const uint8_t *seed, const Algorithm &algorithm, uint32_t nodeId); - static void initDataset(uint32_t nodeId, const uint8_t *seed, const Algorithm &algorithm, uint32_t threads); }; diff --git a/src/crypto/rx/RxCache.h b/src/crypto/rx/RxCache.h index c48924a1..80c1faba 100644 --- a/src/crypto/rx/RxCache.h +++ b/src/crypto/rx/RxCache.h @@ -53,11 +53,12 @@ public: inline randomx_cache *get() const { return m_cache; } bool init(const void *seed); - bool isReady(const void *seed) const; static inline constexpr size_t size() { return RANDOMX_CACHE_MAX_SIZE; } private: + bool isReady(const void *seed) const; + int m_flags = 0; randomx_cache *m_cache = nullptr; uint8_t m_seed[32]; diff --git a/src/crypto/rx/RxDataset.cpp b/src/crypto/rx/RxDataset.cpp index 617b9200..7d498c4d 100644 --- a/src/crypto/rx/RxDataset.cpp +++ b/src/crypto/rx/RxDataset.cpp @@ -64,16 +64,8 @@ xmrig::RxDataset::~RxDataset() } -bool xmrig::RxDataset::init(const void *seed, const Algorithm &algorithm, uint32_t numThreads) +bool xmrig::RxDataset::init(const void *seed, uint32_t numThreads) { - if (isReady(seed, algorithm)) { - return false; - } - - if (m_algorithm != algorithm) { - m_algorithm = RxAlgo::apply(algorithm); - } - cache()->init(seed); if (!get()) { @@ -104,12 +96,6 @@ bool xmrig::RxDataset::init(const void *seed, const Algorithm &algorithm, uint32 } -bool xmrig::RxDataset::isReady(const void *seed, const Algorithm &algorithm) const -{ - return algorithm == m_algorithm && cache()->isReady(seed); -} - - std::pair xmrig::RxDataset::hugePages() const { constexpr size_t twoMiB = 2u * 1024u * 1024u; diff --git a/src/crypto/rx/RxDataset.h b/src/crypto/rx/RxDataset.h index 7944d52c..d3488668 100644 --- a/src/crypto/rx/RxDataset.h +++ b/src/crypto/rx/RxDataset.h @@ -52,8 +52,7 @@ public: inline randomx_dataset *get() const { return m_dataset; } inline RxCache *cache() const { return m_cache; } - bool init(const void *seed, const Algorithm &algorithm, uint32_t numThreads); - bool isReady(const void *seed, const Algorithm &algorithm) const; + bool init(const void *seed, uint32_t numThreads); std::pair hugePages() const; static inline constexpr size_t size() { return RANDOMX_DATASET_MAX_SIZE; } From 718be7e9aa89b163077d40cf0e6cd63dae7d2757 Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 2 Aug 2019 10:54:00 +0700 Subject: [PATCH 113/172] Fixed 32-bit. --- cmake/flags.cmake | 6 +++++- src/base/net/stratum/Job.cpp | 17 +++++++++++++++++ src/base/net/stratum/Job.h | 5 ++++- 3 files changed, 26 insertions(+), 2 deletions(-) diff --git a/cmake/flags.cmake b/cmake/flags.cmake index 3f2bd0a0..bc441dd0 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -34,7 +34,11 @@ if (CMAKE_CXX_COMPILER_ID MATCHES GNU) endif() if (WIN32) - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static") + if (CMAKE_SIZEOF_VOID_P EQUAL 8) + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static") + else() + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static -Wl,--large-address-aware") + endif() else() set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libgcc -static-libstdc++") endif() diff --git a/src/base/net/stratum/Job.cpp b/src/base/net/stratum/Job.cpp index a383bbf7..512b686e 100644 --- a/src/base/net/stratum/Job.cpp +++ b/src/base/net/stratum/Job.cpp @@ -160,3 +160,20 @@ void xmrig::Job::setDiff(uint64_t diff) m_rawTarget[16] = '\0'; # endif } + + +void xmrig::Job::copy(const Job &other) +{ + m_algorithm = other.m_algorithm; + m_nicehash = other.m_nicehash; + m_size = other.m_size; + m_clientId = other.m_clientId; + m_id = other.m_id; + m_diff = other.m_diff; + m_height = other.m_height; + m_target = other.m_target; + m_index = other.m_index; + + memcpy(m_blob, other.m_blob, sizeof (m_blob)); + memcpy(m_seedHash, other.m_seedHash, sizeof(m_seedHash)); +} diff --git a/src/base/net/stratum/Job.h b/src/base/net/stratum/Job.h index 06d1be79..2b256a12 100644 --- a/src/base/net/stratum/Job.h +++ b/src/base/net/stratum/Job.h @@ -90,10 +90,13 @@ public: inline bool operator==(const Job &other) const { return isEqual(other); } inline bool operator!=(const Job &other) const { return !isEqual(other); } + inline Job &operator=(const Job &other) { copy(other); return *this; } private: + void copy(const Job &other); + Algorithm m_algorithm; - bool m_nicehash = false; + bool m_nicehash = false; size_t m_size = 0; String m_clientId; String m_id; From bdaf28adf814cda247db0592c403b8ee4c92a321 Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 2 Aug 2019 14:44:38 +0700 Subject: [PATCH 114/172] Unified memory allocation functions. --- src/crypto/randomx/virtual_memory.cpp | 81 ++++++--------------------- 1 file changed, 17 insertions(+), 64 deletions(-) diff --git a/src/crypto/randomx/virtual_memory.cpp b/src/crypto/randomx/virtual_memory.cpp index 925e8e86..661740fc 100644 --- a/src/crypto/randomx/virtual_memory.cpp +++ b/src/crypto/randomx/virtual_memory.cpp @@ -26,80 +26,33 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include "virtual_memory.hpp" - #include -#if defined(_WIN32) || defined(__CYGWIN__) -#include -#else -#ifdef __APPLE__ -#include -#endif -#include -#include -#ifndef MAP_ANONYMOUS -#define MAP_ANONYMOUS MAP_ANON -#endif -#endif -#if defined(_WIN32) || defined(__CYGWIN__) -std::string getErrorMessage(const char* function) { - LPSTR messageBuffer = nullptr; - size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&messageBuffer, 0, NULL); - std::string message(messageBuffer, size); - LocalFree(messageBuffer); - return std::string(function) + std::string(": ") + message; -} -#endif +#include "crypto/common/VirtualMemory.h" +#include "virtual_memory.hpp" + void* allocExecutableMemory(std::size_t bytes) { - void* mem; -#if defined(_WIN32) || defined(__CYGWIN__) - mem = VirtualAlloc(nullptr, bytes, MEM_COMMIT, PAGE_EXECUTE_READWRITE); - if (mem == nullptr) - throw std::runtime_error(getErrorMessage("allocExecutableMemory - VirtualAlloc")); -#else - mem = mmap(nullptr, bytes, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); - if (mem == MAP_FAILED) - throw std::runtime_error("allocExecutableMemory - mmap failed"); -#endif - return mem; + void *mem = xmrig::VirtualMemory::allocateExecutableMemory(bytes); + if (mem == nullptr) { + throw std::runtime_error("Failed to allocate executable memory"); + } + + return mem; } -constexpr std::size_t align(std::size_t pos, std::size_t align) { - return ((pos - 1) / align + 1) * align; -} void* allocLargePagesMemory(std::size_t bytes) { - void* mem; -#if defined(_WIN32) || defined(__CYGWIN__) - auto pageMinimum = GetLargePageMinimum(); - if (pageMinimum > 0) - mem = VirtualAlloc(NULL, align(bytes, pageMinimum), MEM_COMMIT | MEM_RESERVE | MEM_LARGE_PAGES, PAGE_READWRITE); - else - throw std::runtime_error("allocLargePagesMemory - Large pages are not supported"); - if (mem == nullptr) - throw std::runtime_error(getErrorMessage("allocLargePagesMemory - VirtualAlloc")); -#else -#ifdef __APPLE__ - mem = mmap(nullptr, bytes, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, VM_FLAGS_SUPERPAGE_SIZE_2MB, 0); -#elif defined(__FreeBSD__) - mem = mmap(nullptr, bytes, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_ALIGNED_SUPER, -1, 0); -#else - mem = mmap(nullptr, bytes, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1, 0); -#endif - if (mem == MAP_FAILED) - throw std::runtime_error("allocLargePagesMemory - mmap failed"); -#endif - return mem; + void *mem = xmrig::VirtualMemory::allocateLargePagesMemory(bytes); + if (mem == nullptr) { + throw std::runtime_error("Failed to allocate large pages memory"); + } + + return mem; } + void freePagedMemory(void* ptr, std::size_t bytes) { -#if defined(_WIN32) || defined(__CYGWIN__) - VirtualFree(ptr, 0, MEM_RELEASE); -#else - munmap(ptr, bytes); -#endif + xmrig::VirtualMemory::freeLargePagesMemory(ptr, bytes); } From e584b266df174567cb2597e92d457a3c315a76aa Mon Sep 17 00:00:00 2001 From: Tony Butler Date: Thu, 1 Aug 2019 13:45:03 -0600 Subject: [PATCH 115/172] Build hwloc version string based on HWLOC_API_VERSION, whenever `hwlocVersion` object does not exist (<1.11.x) --- src/backend/cpu/platform/HwlocCpuInfo.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/backend/cpu/platform/HwlocCpuInfo.cpp b/src/backend/cpu/platform/HwlocCpuInfo.cpp index 23ee554f..b2aa47d7 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.cpp +++ b/src/backend/cpu/platform/HwlocCpuInfo.cpp @@ -167,7 +167,11 @@ xmrig::HwlocCpuInfo::HwlocCpuInfo() : BasicCpuInfo(), else # endif { - snprintf(m_backend, sizeof m_backend, "hwloc"); + snprintf(m_backend, sizeof m_backend, "hwloc/%d.%d.%d", + (HWLOC_API_VERSION>>16)&0x000000ff, + (HWLOC_API_VERSION>>8 )&0x000000ff, + (HWLOC_API_VERSION )&0x000000ff + ); } findCache(root, 2, 3, [this](hwloc_obj_t found) { this->m_cache[found->attr->cache.depth] += found->attr->cache.size; }); From f7ea4b6dbd739fdad1d0d38aef0c8a8d3da3f08d Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 2 Aug 2019 16:44:55 +0700 Subject: [PATCH 116/172] Always stop mining threads in RandomX dataset change upcoming. --- src/backend/cpu/CpuBackend.cpp | 24 +++++++++++++----------- src/base/io/log/Log.h | 4 ++++ src/core/Miner.cpp | 10 ++++++++-- src/crypto/rx/Rx.cpp | 10 +++++++++- src/crypto/rx/Rx.h | 1 + 5 files changed, 35 insertions(+), 14 deletions(-) diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp index 005ff1de..60ca8cf3 100644 --- a/src/backend/cpu/CpuBackend.cpp +++ b/src/backend/cpu/CpuBackend.cpp @@ -49,7 +49,8 @@ namespace xmrig { extern template class Threads; -static const String kType = "cpu"; +static const char *tag = CYAN_BG_BOLD(" cpu "); +static const String kType = "cpu"; struct LaunchStatus @@ -94,7 +95,8 @@ public: inline void start() { - LOG_INFO(GREEN_BOLD("CPU") " use profile " BLUE_BG(WHITE_BOLD_S " %s ") WHITE_BOLD_S " (" CYAN_BOLD("%zu") WHITE_BOLD(" threads)") " scratchpad " CYAN_BOLD("%zu KB"), + LOG_INFO("%s use profile " BLUE_BG(WHITE_BOLD_S " %s ") WHITE_BOLD_S " (" CYAN_BOLD("%zu") WHITE_BOLD(" threads)") " scratchpad " CYAN_BOLD("%zu KB"), + tag, profileName.data(), threads.size(), algo.memory() / 1024 @@ -170,12 +172,8 @@ const xmrig::String &xmrig::CpuBackend::type() const } -void xmrig::CpuBackend::prepare(const Job &nextJob) +void xmrig::CpuBackend::prepare(const Job &) { - if (nextJob.algorithm().family() == Algorithm::RANDOM_X && nextJob.algorithm() != d_ptr->algo) { - d_ptr->workers.stop(); - d_ptr->threads.clear(); - } } @@ -207,9 +205,7 @@ void xmrig::CpuBackend::printHashrate(bool details) void xmrig::CpuBackend::setJob(const Job &job) { if (!isEnabled()) { - d_ptr->workers.stop(); - d_ptr->threads.clear(); - return; + return stop(); } const CpuConfig &cpu = d_ptr->controller->config()->cpu(); @@ -249,7 +245,8 @@ void xmrig::CpuBackend::start(IWorker *worker) const double percent = d_ptr->status.hugePages == 0 ? 0.0 : static_cast(d_ptr->status.hugePages) / d_ptr->status.pages * 100.0; const size_t memory = d_ptr->status.ways * d_ptr->status.memory / 1024; - LOG_INFO(GREEN_BOLD("CPU READY") " threads " CYAN_BOLD("%zu(%zu)") " huge pages %s%zu/%zu %1.0f%%\x1B[0m memory " CYAN_BOLD("%zu KB") BLACK_BOLD(" (%" PRIu64 " ms)"), + LOG_INFO("%s" GREEN_BOLD(" READY") " threads " CYAN_BOLD("%zu(%zu)") " huge pages %s%zu/%zu %1.0f%%\x1B[0m memory " CYAN_BOLD("%zu KB") BLACK_BOLD(" (%" PRIu64 " ms)"), + tag, d_ptr->status.threads, d_ptr->status.ways, (d_ptr->status.hugePages == d_ptr->status.pages ? GREEN_BOLD_S : (d_ptr->status.hugePages == 0 ? RED_BOLD_S : YELLOW_BOLD_S)), d_ptr->status.hugePages, d_ptr->status.pages, percent, memory, @@ -265,7 +262,12 @@ void xmrig::CpuBackend::start(IWorker *worker) void xmrig::CpuBackend::stop() { + const uint64_t ts = Chrono::steadyMSecs(); + d_ptr->workers.stop(); + d_ptr->threads.clear(); + + LOG_INFO("%s" YELLOW(" stopped") BLACK_BOLD(" (%" PRIu64 " ms)"), tag, Chrono::steadyMSecs() - ts); } diff --git a/src/base/io/log/Log.h b/src/base/io/log/Log.h index 078a8546..d8bcb44a 100644 --- a/src/base/io/log/Log.h +++ b/src/base/io/log/Log.h @@ -85,6 +85,8 @@ private: #define BLUE_BG_BOLD_S CSI "44;1m" #define MAGENTA_BG_S CSI "45m" #define MAGENTA_BG_BOLD_S CSI "45;1m" +#define CYAN_BG_S CSI "46m" +#define CYAN_BG_BOLD_S CSI "46;1m" //color wrappings #define BLACK(x) BLACK_S x CLEAR @@ -108,6 +110,8 @@ private: #define BLUE_BG_BOLD(x) BLUE_BG_BOLD_S x CLEAR #define MAGENTA_BG(x) MAGENTA_BG_S x CLEAR #define MAGENTA_BG_BOLD(x) MAGENTA_BG_BOLD_S x CLEAR +#define CYAN_BG(x) CYAN_BG_S x CLEAR +#define CYAN_BG_BOLD(x) CYAN_BG_BOLD_S x CLEAR #define LOG_EMERG(x, ...) xmrig::Log::print(xmrig::Log::EMERG, x, ##__VA_ARGS__) diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index ab4a8ef6..a1c65ed2 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -361,12 +361,18 @@ void xmrig::Miner::setEnabled(bool enabled) void xmrig::Miner::setJob(const Job &job, bool donate) { - d_ptr->algorithm = job.algorithm(); - for (IBackend *backend : d_ptr->backends) { backend->prepare(job); } +# ifdef XMRIG_ALGO_RANDOMX + if (d_ptr->algorithm.family() == Algorithm::RANDOM_X && job.algorithm().family() == Algorithm::RANDOM_X && !Rx::isReady(job)) { + stop(); + } +# endif + + d_ptr->algorithm = job.algorithm(); + uv_rwlock_wrlock(&d_ptr->rwlock); const uint8_t index = donate ? 1 : 0; diff --git a/src/crypto/rx/Rx.cpp b/src/crypto/rx/Rx.cpp index 5f5414c9..6df708be 100644 --- a/src/crypto/rx/Rx.cpp +++ b/src/crypto/rx/Rx.cpp @@ -54,7 +54,7 @@ namespace xmrig { class RxPrivate; -static const char *tag = BLUE_BG(WHITE_BOLD_S " rx ") " "; +static const char *tag = BLUE_BG(WHITE_BOLD_S " rx ") " "; static RxPrivate *d_ptr = nullptr; @@ -231,6 +231,14 @@ private: } // namespace xmrig +bool xmrig::Rx::isReady(const Job &job) +{ + std::lock_guard lock(d_ptr->mutex); + + return d_ptr->isReady(job); +} + + xmrig::RxDataset *xmrig::Rx::dataset(const Job &job, uint32_t nodeId) { std::lock_guard lock(d_ptr->mutex); diff --git a/src/crypto/rx/Rx.h b/src/crypto/rx/Rx.h index 1ba6397e..7cb94b4e 100644 --- a/src/crypto/rx/Rx.h +++ b/src/crypto/rx/Rx.h @@ -44,6 +44,7 @@ class Job; class Rx { public: + static bool isReady(const Job &job); static RxDataset *dataset(const Job &job, uint32_t nodeId); static std::pair hugePages(); static void destroy(); From 3df080990c474c610ed6aab194ddde3463cba274 Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 2 Aug 2019 17:57:41 +0700 Subject: [PATCH 117/172] Fixed warning. --- src/core/config/ConfigTransform.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/core/config/ConfigTransform.cpp b/src/core/config/ConfigTransform.cpp index 6d420739..ed315fb4 100644 --- a/src/core/config/ConfigTransform.cpp +++ b/src/core/config/ConfigTransform.cpp @@ -36,7 +36,10 @@ static const char *kAffinity = "affinity"; static const char *kAsterisk = "*"; static const char *kCpu = "cpu"; static const char *kIntensity = "intensity"; -static const char *kRandomX = "randomx"; + +#ifdef XMRIG_ALGO_RANDOMX +static const char *kRandomX = "randomx"; +#endif static inline uint64_t intensity(uint64_t av) From 7a93599d4e144fcfa94107cec4e81b5fd32974de Mon Sep 17 00:00:00 2001 From: xmrig Date: Fri, 2 Aug 2019 21:19:31 +0700 Subject: [PATCH 118/172] Update CHANGELOG.md --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c1cab4a9..646e53bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +# v2.99.4-beta +- [#1088](https://github.com/xmrig/xmrig/pull/1088) Fixed macOS compilation. +- [#1095](https://github.com/xmrig/xmrig/pull/1095) Fixed compatibility with hwloc 1.10.x. +- Optimized RandomX initialization and switching, fixed rare crash when re-initialize dataset. +- Fixed ARM build with hwloc. + # v2.99.3-beta - [#1082](https://github.com/xmrig/xmrig/issues/1082) Fixed hwloc auto configuration on AMD FX CPUs. - Added command line option `--export-topology` for export hwloc topology to a XML file. From f1dfa26783279f2f1664f0798f913b9d52cb905d Mon Sep 17 00:00:00 2001 From: xmrig Date: Sat, 3 Aug 2019 22:19:40 +0700 Subject: [PATCH 119/172] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 646e53bb..97216550 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ # v2.99.4-beta +- [#1062](https://github.com/xmrig/xmrig/issues/1062) Fixed 32 bit support. **32 bit is slow and deprecated**. - [#1088](https://github.com/xmrig/xmrig/pull/1088) Fixed macOS compilation. - [#1095](https://github.com/xmrig/xmrig/pull/1095) Fixed compatibility with hwloc 1.10.x. - Optimized RandomX initialization and switching, fixed rare crash when re-initialize dataset. From af50bdc79776ffc41aeabb983e951a110fc3ca8a Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 3 Aug 2019 22:25:58 +0700 Subject: [PATCH 120/172] v2.99.4-beta --- src/version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/version.h b/src/version.h index 350814be..1e1ca8b6 100644 --- a/src/version.h +++ b/src/version.h @@ -28,7 +28,7 @@ #define APP_ID "xmrig" #define APP_NAME "XMRig" #define APP_DESC "XMRig CPU miner" -#define APP_VERSION "2.99.4-evo" +#define APP_VERSION "2.99.4-beta" #define APP_DOMAIN "xmrig.com" #define APP_SITE "www.xmrig.com" #define APP_COPYRIGHT "Copyright (C) 2016-2019 xmrig.com" From df559b44d872837e119cfa8367f1ed2b519bb7a7 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 3 Aug 2019 23:08:57 +0700 Subject: [PATCH 121/172] v2.99.5-evo --- src/version.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/version.h b/src/version.h index 1e1ca8b6..e2c62435 100644 --- a/src/version.h +++ b/src/version.h @@ -28,7 +28,7 @@ #define APP_ID "xmrig" #define APP_NAME "XMRig" #define APP_DESC "XMRig CPU miner" -#define APP_VERSION "2.99.4-beta" +#define APP_VERSION "2.99.5-evo" #define APP_DOMAIN "xmrig.com" #define APP_SITE "www.xmrig.com" #define APP_COPYRIGHT "Copyright (C) 2016-2019 xmrig.com" @@ -36,7 +36,7 @@ #define APP_VER_MAJOR 2 #define APP_VER_MINOR 99 -#define APP_VER_PATCH 4 +#define APP_VER_PATCH 5 #ifdef _MSC_VER # if (_MSC_VER >= 1920) From 7eaf7764f73e68af5965a766a18b13fa2f1f5d1e Mon Sep 17 00:00:00 2001 From: XMRig Date: Sun, 4 Aug 2019 19:47:23 +0700 Subject: [PATCH 122/172] rapidjson updated to recent git version. --- src/3rdparty/rapidjson/allocators.h | 17 +- src/3rdparty/rapidjson/cursorstreamwrapper.h | 78 ++ src/3rdparty/rapidjson/document.h | 265 ++++--- src/3rdparty/rapidjson/encodedstream.h | 2 +- src/3rdparty/rapidjson/encodings.h | 86 +-- src/3rdparty/rapidjson/error/error.h | 10 +- src/3rdparty/rapidjson/filereadstream.h | 4 +- src/3rdparty/rapidjson/filewritestream.h | 4 +- src/3rdparty/rapidjson/internal/biginteger.h | 4 +- src/3rdparty/rapidjson/internal/diyfp.h | 37 +- src/3rdparty/rapidjson/internal/dtoa.h | 8 +- src/3rdparty/rapidjson/internal/ieee754.h | 4 +- src/3rdparty/rapidjson/internal/itoa.h | 82 ++- src/3rdparty/rapidjson/internal/meta.h | 9 +- src/3rdparty/rapidjson/internal/regex.h | 319 +++++---- src/3rdparty/rapidjson/internal/stack.h | 10 +- src/3rdparty/rapidjson/internal/strfunc.h | 14 + src/3rdparty/rapidjson/internal/strtod.h | 111 +-- src/3rdparty/rapidjson/istreamwrapper.h | 87 ++- src/3rdparty/rapidjson/license.txt | 57 ++ src/3rdparty/rapidjson/pointer.h | 92 ++- src/3rdparty/rapidjson/prettywriter.h | 60 +- src/3rdparty/rapidjson/rapidjson.h | 112 ++- src/3rdparty/rapidjson/reader.h | 601 ++++++++++++---- src/3rdparty/rapidjson/schema.h | 717 ++++++++++++++++--- src/3rdparty/rapidjson/stream.h | 52 +- src/3rdparty/rapidjson/stringbuffer.h | 4 + src/3rdparty/rapidjson/writer.h | 159 +++- 28 files changed, 2228 insertions(+), 777 deletions(-) create mode 100644 src/3rdparty/rapidjson/cursorstreamwrapper.h create mode 100644 src/3rdparty/rapidjson/license.txt diff --git a/src/3rdparty/rapidjson/allocators.h b/src/3rdparty/rapidjson/allocators.h index 98affe03..cc67c897 100644 --- a/src/3rdparty/rapidjson/allocators.h +++ b/src/3rdparty/rapidjson/allocators.h @@ -52,6 +52,19 @@ concept Allocator { \endcode */ + +/*! \def RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY + \ingroup RAPIDJSON_CONFIG + \brief User-defined kDefaultChunkCapacity definition. + + User can define this as any \c size that is a power of 2. +*/ + +#ifndef RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY +#define RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY (64 * 1024) +#endif + + /////////////////////////////////////////////////////////////////////////////// // CrtAllocator @@ -236,7 +249,7 @@ private: */ bool AddChunk(size_t capacity) { if (!baseAllocator_) - ownBaseAllocator_ = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator()); + ownBaseAllocator_ = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator)(); if (ChunkHeader* chunk = reinterpret_cast(baseAllocator_->Malloc(RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + capacity))) { chunk->capacity = capacity; chunk->size = 0; @@ -248,7 +261,7 @@ private: return false; } - static const int kDefaultChunkCapacity = 64 * 1024; //!< Default chunk capacity. + static const int kDefaultChunkCapacity = RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY; //!< Default chunk capacity. //! Chunk header for perpending to each chunk. /*! Chunks are stored as a singly linked list. diff --git a/src/3rdparty/rapidjson/cursorstreamwrapper.h b/src/3rdparty/rapidjson/cursorstreamwrapper.h new file mode 100644 index 00000000..52c11a7c --- /dev/null +++ b/src/3rdparty/rapidjson/cursorstreamwrapper.h @@ -0,0 +1,78 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_CURSORSTREAMWRAPPER_H_ +#define RAPIDJSON_CURSORSTREAMWRAPPER_H_ + +#include "stream.h" + +#if defined(__GNUC__) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif + +#if defined(_MSC_VER) && _MSC_VER <= 1800 +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4702) // unreachable code +RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated +#endif + +RAPIDJSON_NAMESPACE_BEGIN + + +//! Cursor stream wrapper for counting line and column number if error exists. +/*! + \tparam InputStream Any stream that implements Stream Concept +*/ +template > +class CursorStreamWrapper : public GenericStreamWrapper { +public: + typedef typename Encoding::Ch Ch; + + CursorStreamWrapper(InputStream& is): + GenericStreamWrapper(is), line_(1), col_(0) {} + + // counting line and column number + Ch Take() { + Ch ch = this->is_.Take(); + if(ch == '\n') { + line_ ++; + col_ = 0; + } else { + col_ ++; + } + return ch; + } + + //! Get the error line number, if error exists. + size_t GetLine() const { return line_; } + //! Get the error column number, if error exists. + size_t GetColumn() const { return col_; } + +private: + size_t line_; //!< Current Line + size_t col_; //!< Current Column +}; + +#if defined(_MSC_VER) && _MSC_VER <= 1800 +RAPIDJSON_DIAG_POP +#endif + +#if defined(__GNUC__) +RAPIDJSON_DIAG_POP +#endif + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_CURSORSTREAMWRAPPER_H_ diff --git a/src/3rdparty/rapidjson/document.h b/src/3rdparty/rapidjson/document.h index e3e20dfb..9783fe4a 100644 --- a/src/3rdparty/rapidjson/document.h +++ b/src/3rdparty/rapidjson/document.h @@ -26,26 +26,21 @@ #include RAPIDJSON_DIAG_PUSH -#ifdef _MSC_VER -RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant -RAPIDJSON_DIAG_OFF(4244) // conversion from kXxxFlags to 'uint16_t', possible loss of data -#endif - #ifdef __clang__ RAPIDJSON_DIAG_OFF(padded) RAPIDJSON_DIAG_OFF(switch-enum) RAPIDJSON_DIAG_OFF(c++98-compat) +#elif defined(_MSC_VER) +RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant +RAPIDJSON_DIAG_OFF(4244) // conversion from kXxxFlags to 'uint16_t', possible loss of data #endif #ifdef __GNUC__ RAPIDJSON_DIAG_OFF(effc++) -#if __GNUC__ >= 6 -RAPIDJSON_DIAG_OFF(terminate) // ignore throwing RAPIDJSON_ASSERT in RAPIDJSON_NOEXCEPT functions -#endif #endif // __GNUC__ #ifndef RAPIDJSON_NOMEMBERITERATORCLASS -#include // std::iterator, std::random_access_iterator_tag +#include // std::random_access_iterator_tag #endif #if RAPIDJSON_HAS_CXX11_RVALUE_REFS @@ -71,6 +66,12 @@ template struct GenericMember { GenericValue name; //!< name of member (must be a string) GenericValue value; //!< value of member. + + // swap() for std::sort() and other potential use in STL. + friend inline void swap(GenericMember& a, GenericMember& b) RAPIDJSON_NOEXCEPT { + a.name.Swap(b.name); + a.value.Swap(b.value); + } }; /////////////////////////////////////////////////////////////////////////////// @@ -98,16 +99,13 @@ struct GenericMember { \see GenericMember, GenericValue::MemberIterator, GenericValue::ConstMemberIterator */ template -class GenericMemberIterator - : public std::iterator >::Type> { +class GenericMemberIterator { friend class GenericValue; template friend class GenericMemberIterator; typedef GenericMember PlainType; typedef typename internal::MaybeAddConst::Type ValueType; - typedef std::iterator BaseType; public: //! Iterator type itself @@ -117,12 +115,21 @@ public: //! Non-constant iterator type typedef GenericMemberIterator NonConstIterator; + /** \name std::iterator_traits support */ + //@{ + typedef ValueType value_type; + typedef ValueType * pointer; + typedef ValueType & reference; + typedef std::ptrdiff_t difference_type; + typedef std::random_access_iterator_tag iterator_category; + //@} + //! Pointer to (const) GenericMember - typedef typename BaseType::pointer Pointer; + typedef pointer Pointer; //! Reference to (const) GenericMember - typedef typename BaseType::reference Reference; + typedef reference Reference; //! Signed integer type (e.g. \c ptrdiff_t) - typedef typename BaseType::difference_type DifferenceType; + typedef difference_type DifferenceType; //! Default constructor (singular value) /*! Creates an iterator pointing to no element. @@ -198,17 +205,17 @@ private: // class-based member iterator implementation disabled, use plain pointers template -struct GenericMemberIterator; +class GenericMemberIterator; //! non-const GenericMemberIterator template -struct GenericMemberIterator { +class GenericMemberIterator { //! use plain pointer as iterator type typedef GenericMember* Iterator; }; //! const GenericMemberIterator template -struct GenericMemberIterator { +class GenericMemberIterator { //! use plain const pointer as iterator type typedef const GenericMember* Iterator; }; @@ -300,7 +307,7 @@ struct GenericStringRef { */ #endif explicit GenericStringRef(const CharType* str) - : s(str), length(internal::StrLen(str)){ RAPIDJSON_ASSERT(s != 0); } + : s(str), length(NotNullStrLen(str)) {} //! Create constant string reference from pointer and length #ifndef __clang__ // -Wdocumentation @@ -312,12 +319,10 @@ struct GenericStringRef { */ #endif GenericStringRef(const CharType* str, SizeType len) - : s(str), length(len) { RAPIDJSON_ASSERT(s != 0); } + : s(RAPIDJSON_LIKELY(str) ? str : emptyString), length(len) { RAPIDJSON_ASSERT(str != 0 || len == 0u); } GenericStringRef(const GenericStringRef& rhs) : s(rhs.s), length(rhs.length) {} - GenericStringRef& operator=(const GenericStringRef& rhs) { s = rhs.s; length = rhs.length; } - //! implicit conversion to plain CharType pointer operator const Ch *() const { return s; } @@ -325,11 +330,24 @@ struct GenericStringRef { const SizeType length; //!< length of the string (excluding the trailing NULL terminator) private: + SizeType NotNullStrLen(const CharType* str) { + RAPIDJSON_ASSERT(str != 0); + return internal::StrLen(str); + } + + /// Empty string - used when passing in a NULL pointer + static const Ch emptyString[]; + //! Disallow construction from non-const array template GenericStringRef(CharType (&str)[N]) /* = delete */; + //! Copy assignment operator not permitted - immutable type + GenericStringRef& operator=(const GenericStringRef& rhs) /* = delete */; }; +template +const CharType GenericStringRef::emptyString[] = { CharType() }; + //! Mark a character pointer as constant string /*! Mark a plain character pointer as a "string literal". This function can be used to avoid copying a character string to be referenced as a @@ -344,7 +362,7 @@ private: */ template inline GenericStringRef StringRef(const CharType* str) { - return GenericStringRef(str, internal::StrLen(str)); + return GenericStringRef(str); } //! Mark a character pointer as constant string @@ -434,6 +452,26 @@ struct TypeHelper { static ValueType& Set(ValueType& v, unsigned data, typename ValueType::AllocatorType&) { return v.SetUint(data); } }; +#ifdef _MSC_VER +RAPIDJSON_STATIC_ASSERT(sizeof(long) == sizeof(int)); +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsInt(); } + static long Get(const ValueType& v) { return v.GetInt(); } + static ValueType& Set(ValueType& v, long data) { return v.SetInt(data); } + static ValueType& Set(ValueType& v, long data, typename ValueType::AllocatorType&) { return v.SetInt(data); } +}; + +RAPIDJSON_STATIC_ASSERT(sizeof(unsigned long) == sizeof(unsigned)); +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsUint(); } + static unsigned long Get(const ValueType& v) { return v.GetUint(); } + static ValueType& Set(ValueType& v, unsigned long data) { return v.SetUint(data); } + static ValueType& Set(ValueType& v, unsigned long data, typename ValueType::AllocatorType&) { return v.SetUint(data); } +}; +#endif + template struct TypeHelper { static bool Is(const ValueType& v) { return v.IsInt64(); } @@ -507,7 +545,7 @@ struct TypeHelper { static bool Is(const ValueType& v) { return v.IsObject(); } static ObjectType Get(ValueType& v) { return v.GetObject(); } static ValueType& Set(ValueType& v, ObjectType data) { return v = data; } - static ValueType& Set(ValueType& v, ObjectType data, typename ValueType::AllocatorType&) { v = data; } + static ValueType& Set(ValueType& v, ObjectType data, typename ValueType::AllocatorType&) { return v = data; } }; template @@ -590,11 +628,11 @@ public: \note Default content for number is zero. */ explicit GenericValue(Type type) RAPIDJSON_NOEXCEPT : data_() { - static const uint16_t defaultFlags[7] = { + static const uint16_t defaultFlags[] = { kNullFlag, kFalseFlag, kTrueFlag, kObjectFlag, kArrayFlag, kShortStringFlag, kNumberAnyFlag }; - RAPIDJSON_ASSERT(type <= kNumberType); + RAPIDJSON_NOEXCEPT_ASSERT(type >= kNullType && type <= kNumberType); data_.f.flags = defaultFlags[type]; // Use ShortString to store empty string. @@ -607,10 +645,50 @@ public: \tparam SourceAllocator allocator of \c rhs \param rhs Value to copy from (read-only) \param allocator Allocator for allocating copied elements and buffers. Commonly use GenericDocument::GetAllocator(). + \param copyConstStrings Force copying of constant strings (e.g. referencing an in-situ buffer) \see CopyFrom() */ - template< typename SourceAllocator > - GenericValue(const GenericValue& rhs, Allocator & allocator); + template + GenericValue(const GenericValue& rhs, Allocator& allocator, bool copyConstStrings = false) { + switch (rhs.GetType()) { + case kObjectType: { + SizeType count = rhs.data_.o.size; + Member* lm = reinterpret_cast(allocator.Malloc(count * sizeof(Member))); + const typename GenericValue::Member* rm = rhs.GetMembersPointer(); + for (SizeType i = 0; i < count; i++) { + new (&lm[i].name) GenericValue(rm[i].name, allocator, copyConstStrings); + new (&lm[i].value) GenericValue(rm[i].value, allocator, copyConstStrings); + } + data_.f.flags = kObjectFlag; + data_.o.size = data_.o.capacity = count; + SetMembersPointer(lm); + } + break; + case kArrayType: { + SizeType count = rhs.data_.a.size; + GenericValue* le = reinterpret_cast(allocator.Malloc(count * sizeof(GenericValue))); + const GenericValue* re = rhs.GetElementsPointer(); + for (SizeType i = 0; i < count; i++) + new (&le[i]) GenericValue(re[i], allocator, copyConstStrings); + data_.f.flags = kArrayFlag; + data_.a.size = data_.a.capacity = count; + SetElementsPointer(le); + } + break; + case kStringType: + if (rhs.data_.f.flags == kConstStringFlag && !copyConstStrings) { + data_.f.flags = rhs.data_.f.flags; + data_ = *reinterpret_cast(&rhs.data_); + } + else + SetStringRaw(StringRef(rhs.GetString(), rhs.GetStringLength()), allocator); + break; + default: + data_.f.flags = rhs.data_.f.flags; + data_ = *reinterpret_cast(&rhs.data_); + break; + } + } //! Constructor for boolean value. /*! \param b Boolean value @@ -672,6 +750,9 @@ public: //! Constructor for double value. explicit GenericValue(double d) RAPIDJSON_NOEXCEPT : data_() { data_.n.d = d; data_.f.flags = kNumberDoubleFlag; } + //! Constructor for float value. + explicit GenericValue(float f) RAPIDJSON_NOEXCEPT : data_() { data_.n.d = static_cast(f); data_.f.flags = kNumberDoubleFlag; } + //! Constructor for constant string (i.e. do not make a copy of string) GenericValue(const Ch* s, SizeType length) RAPIDJSON_NOEXCEPT : data_() { SetStringRaw(StringRef(s, length)); } @@ -753,9 +834,10 @@ public: /*! \param rhs Source of the assignment. It will become a null value after assignment. */ GenericValue& operator=(GenericValue& rhs) RAPIDJSON_NOEXCEPT { - RAPIDJSON_ASSERT(this != &rhs); - this->~GenericValue(); - RawAssign(rhs); + if (RAPIDJSON_LIKELY(this != &rhs)) { + this->~GenericValue(); + RawAssign(rhs); + } return *this; } @@ -800,12 +882,13 @@ public: \tparam SourceAllocator Allocator type of \c rhs \param rhs Value to copy from (read-only) \param allocator Allocator to use for copying + \param copyConstStrings Force copying of constant strings (e.g. referencing an in-situ buffer) */ template - GenericValue& CopyFrom(const GenericValue& rhs, Allocator& allocator) { + GenericValue& CopyFrom(const GenericValue& rhs, Allocator& allocator, bool copyConstStrings = false) { RAPIDJSON_ASSERT(static_cast(this) != static_cast(&rhs)); this->~GenericValue(); - new (this) GenericValue(rhs, allocator); + new (this) GenericValue(rhs, allocator, copyConstStrings); return *this; } @@ -846,7 +929,7 @@ public: //! Equal-to operator /*! \note If an object contains duplicated named member, comparing equality with any object is always \c false. - \note Linear time complexity (number of all values in the subtree and total lengths of all strings). + \note Complexity is quadratic in Object's member number and linear for the rest (number of all values in the subtree and total lengths of all strings). */ template bool operator==(const GenericValue& rhs) const { @@ -955,14 +1038,14 @@ public: uint64_t u = GetUint64(); volatile double d = static_cast(u); return (d >= 0.0) - && (d < static_cast(std::numeric_limits::max())) + && (d < static_cast((std::numeric_limits::max)())) && (u == static_cast(d)); } if (IsInt64()) { int64_t i = GetInt64(); volatile double d = static_cast(i); - return (d >= static_cast(std::numeric_limits::min())) - && (d < static_cast(std::numeric_limits::max())) + return (d >= static_cast((std::numeric_limits::min)())) + && (d < static_cast((std::numeric_limits::max)())) && (i == static_cast(d)); } return true; // double, int, uint are always lossless @@ -979,8 +1062,8 @@ public: bool IsLosslessFloat() const { if (!IsNumber()) return false; double a = GetDouble(); - if (a < static_cast(-std::numeric_limits::max()) - || a > static_cast(std::numeric_limits::max())) + if (a < static_cast(-(std::numeric_limits::max)()) + || a > static_cast((std::numeric_limits::max)())) return false; double b = static_cast(static_cast(a)); return a >= b && a <= b; // Prevent -Wfloat-equal @@ -1015,6 +1098,9 @@ public: //! Get the number of members in the object. SizeType MemberCount() const { RAPIDJSON_ASSERT(IsObject()); return data_.o.size; } + //! Get the capacity of object. + SizeType MemberCapacity() const { RAPIDJSON_ASSERT(IsObject()); return data_.o.capacity; } + //! Check whether the object is empty. bool ObjectEmpty() const { RAPIDJSON_ASSERT(IsObject()); return data_.o.size == 0; } @@ -1083,6 +1169,21 @@ public: /*! \pre IsObject() == true */ MemberIterator MemberEnd() { RAPIDJSON_ASSERT(IsObject()); return MemberIterator(GetMembersPointer() + data_.o.size); } + //! Request the object to have enough capacity to store members. + /*! \param newCapacity The capacity that the object at least need to have. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \note Linear time complexity. + */ + GenericValue& MemberReserve(SizeType newCapacity, Allocator &allocator) { + RAPIDJSON_ASSERT(IsObject()); + if (newCapacity > data_.o.capacity) { + SetMembersPointer(reinterpret_cast(allocator.Realloc(GetMembersPointer(), data_.o.capacity * sizeof(Member), newCapacity * sizeof(Member)))); + data_.o.capacity = newCapacity; + } + return *this; + } + //! Check whether a member exists in the object. /*! \param name Member name to be searched. @@ -1188,17 +1289,8 @@ public: RAPIDJSON_ASSERT(name.IsString()); ObjectData& o = data_.o; - if (o.size >= o.capacity) { - if (o.capacity == 0) { - o.capacity = kDefaultObjectCapacity; - SetMembersPointer(reinterpret_cast(allocator.Malloc(o.capacity * sizeof(Member)))); - } - else { - SizeType oldCapacity = o.capacity; - o.capacity += (oldCapacity + 1) / 2; // grow by factor 1.5 - SetMembersPointer(reinterpret_cast(allocator.Realloc(GetMembersPointer(), oldCapacity * sizeof(Member), o.capacity * sizeof(Member)))); - } - } + if (o.size >= o.capacity) + MemberReserve(o.capacity == 0 ? kDefaultObjectCapacity : (o.capacity + (o.capacity + 1) / 2), allocator); Member* members = GetMembersPointer(); members[o.size].name.RawAssign(name); members[o.size].value.RawAssign(value); @@ -1425,7 +1517,7 @@ public: MemberIterator pos = MemberBegin() + (first - MemberBegin()); for (MemberIterator itr = pos; itr != last; ++itr) itr->~Member(); - std::memmove(&*pos, &*last, static_cast(MemberEnd() - last) * sizeof(Member)); + std::memmove(static_cast(&*pos), &*last, static_cast(MemberEnd() - last) * sizeof(Member)); data_.o.size -= static_cast(last - first); return pos; } @@ -1628,8 +1720,8 @@ public: RAPIDJSON_ASSERT(last <= End()); ValueIterator pos = Begin() + (first - Begin()); for (ValueIterator itr = pos; itr != last; ++itr) - itr->~GenericValue(); - std::memmove(pos, last, static_cast(End() - last) * sizeof(GenericValue)); + itr->~GenericValue(); + std::memmove(static_cast(pos), last, static_cast(End() - last) * sizeof(GenericValue)); data_.a.size -= static_cast(last - first); return pos; } @@ -1671,7 +1763,7 @@ public: GenericValue& SetInt64(int64_t i64) { this->~GenericValue(); new (this) GenericValue(i64); return *this; } GenericValue& SetUint64(uint64_t u64) { this->~GenericValue(); new (this) GenericValue(u64); return *this; } GenericValue& SetDouble(double d) { this->~GenericValue(); new (this) GenericValue(d); return *this; } - GenericValue& SetFloat(float f) { this->~GenericValue(); new (this) GenericValue(f); return *this; } + GenericValue& SetFloat(float f) { this->~GenericValue(); new (this) GenericValue(static_cast(f)); return *this; } //@} @@ -1710,7 +1802,7 @@ public: \return The value itself for fluent API. \post IsString() == true && GetString() != s && strcmp(GetString(),s) == 0 && GetStringLength() == length */ - GenericValue& SetString(const Ch* s, SizeType length, Allocator& allocator) { this->~GenericValue(); SetStringRaw(StringRef(s, length), allocator); return *this; } + GenericValue& SetString(const Ch* s, SizeType length, Allocator& allocator) { return SetString(StringRef(s, length), allocator); } //! Set this value as a string by copying from source string. /*! \param s source string. @@ -1718,7 +1810,15 @@ public: \return The value itself for fluent API. \post IsString() == true && GetString() != s && strcmp(GetString(),s) == 0 && GetStringLength() == length */ - GenericValue& SetString(const Ch* s, Allocator& allocator) { return SetString(s, internal::StrLen(s), allocator); } + GenericValue& SetString(const Ch* s, Allocator& allocator) { return SetString(StringRef(s), allocator); } + + //! Set this value as a string by copying from source string. + /*! \param s source string reference + \param allocator Allocator for allocating copied buffer. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \post IsString() == true && GetString() != s.s && strcmp(GetString(),s) == 0 && GetStringLength() == length + */ + GenericValue& SetString(StringRefType s, Allocator& allocator) { this->~GenericValue(); SetStringRaw(s, allocator); return *this; } #if RAPIDJSON_HAS_STDSTRING //! Set this value as a string by copying from source string. @@ -1728,7 +1828,7 @@ public: \post IsString() == true && GetString() != s.data() && strcmp(GetString(),s.data() == 0 && GetStringLength() == s.size() \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. */ - GenericValue& SetString(const std::basic_string& s, Allocator& allocator) { return SetString(s.data(), SizeType(s.size()), allocator); } + GenericValue& SetString(const std::basic_string& s, Allocator& allocator) { return SetString(StringRef(s), allocator); } #endif //@} @@ -1936,7 +2036,7 @@ private: if (count) { GenericValue* e = static_cast(allocator.Malloc(count * sizeof(GenericValue))); SetElementsPointer(e); - std::memcpy(e, values, count * sizeof(GenericValue)); + std::memcpy(static_cast(e), values, count * sizeof(GenericValue)); } else SetElementsPointer(0); @@ -1949,7 +2049,7 @@ private: if (count) { Member* m = static_cast(allocator.Malloc(count * sizeof(Member))); SetMembersPointer(m); - std::memcpy(m, members, count * sizeof(Member)); + std::memcpy(static_cast(m), members, count * sizeof(Member)); } else SetMembersPointer(0); @@ -2038,7 +2138,7 @@ public: GenericValue(type), allocator_(allocator), ownAllocator_(0), stack_(stackAllocator, stackCapacity), parseResult_() { if (!allocator_) - ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)(); } //! Constructor @@ -2051,7 +2151,7 @@ public: allocator_(allocator), ownAllocator_(0), stack_(stackAllocator, stackCapacity), parseResult_() { if (!allocator_) - ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)(); } #if RAPIDJSON_HAS_CXX11_RVALUE_REFS @@ -2112,6 +2212,10 @@ public: return *this; } + // Allow Swap with ValueType. + // Refer to Effective C++ 3rd Edition/Item 33: Avoid hiding inherited names. + using ValueType::Swap; + //! free-standing swap function helper /*! Helper function to enable support for common swap implementation pattern based on \c std::swap: @@ -2243,7 +2347,7 @@ public: template GenericDocument& Parse(const typename SourceEncoding::Ch* str, size_t length) { RAPIDJSON_ASSERT(!(parseFlags & kParseInsituFlag)); - MemoryStream ms(static_cast(str), length * sizeof(typename SourceEncoding::Ch)); + MemoryStream ms(reinterpret_cast(str), length * sizeof(typename SourceEncoding::Ch)); EncodedInputStream is(ms); ParseStream(is); return *this; @@ -2280,7 +2384,7 @@ public: //!@name Handling parse errors //!@{ - //! Whether a parse error has occured in the last parsing. + //! Whether a parse error has occurred in the last parsing. bool HasParseError() const { return parseResult_.IsError(); } //! Get the \ref ParseErrorCode of last parsing. @@ -2401,35 +2505,6 @@ private: //! GenericDocument with UTF8 encoding typedef GenericDocument > Document; -// defined here due to the dependency on GenericDocument -template -template -inline -GenericValue::GenericValue(const GenericValue& rhs, Allocator& allocator) -{ - switch (rhs.GetType()) { - case kObjectType: - case kArrayType: { // perform deep copy via SAX Handler - GenericDocument d(&allocator); - rhs.Accept(d); - RawAssign(*d.stack_.template Pop(1)); - } - break; - case kStringType: - if (rhs.data_.f.flags == kConstStringFlag) { - data_.f.flags = rhs.data_.f.flags; - data_ = *reinterpret_cast(&rhs.data_); - } else { - SetStringRaw(StringRef(rhs.GetString(), rhs.GetStringLength()), allocator); - } - break; - default: - data_.f.flags = rhs.data_.f.flags; - data_ = *reinterpret_cast(&rhs.data_); - break; - } -} - //! Helper class for accessing Value of array type. /*! Instance of this helper class is obtained by \c GenericValue::GetArray(). @@ -2510,6 +2585,7 @@ public: ~GenericObject() {} SizeType MemberCount() const { return value_.MemberCount(); } + SizeType MemberCapacity() const { return value_.MemberCapacity(); } bool ObjectEmpty() const { return value_.ObjectEmpty(); } template ValueType& operator[](T* name) const { return value_[name]; } template ValueType& operator[](const GenericValue& name) const { return value_[name]; } @@ -2518,6 +2594,7 @@ public: #endif MemberIterator MemberBegin() const { return value_.MemberBegin(); } MemberIterator MemberEnd() const { return value_.MemberEnd(); } + GenericObject MemberReserve(SizeType newCapacity, AllocatorType &allocator) const { value_.MemberReserve(newCapacity, allocator); return *this; } bool HasMember(const Ch* name) const { return value_.HasMember(name); } #if RAPIDJSON_HAS_STDSTRING bool HasMember(const std::basic_string& name) const { return value_.HasMember(name); } @@ -2543,7 +2620,7 @@ public: GenericObject AddMember(StringRefType name, ValueType& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } GenericObject AddMember(StringRefType name, StringRefType value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } template RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (GenericObject)) AddMember(StringRefType name, T value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } - void RemoveAllMembers() { return value_.RemoveAllMembers(); } + void RemoveAllMembers() { value_.RemoveAllMembers(); } bool RemoveMember(const Ch* name) const { return value_.RemoveMember(name); } #if RAPIDJSON_HAS_STDSTRING bool RemoveMember(const std::basic_string& name) const { return value_.RemoveMember(name); } diff --git a/src/3rdparty/rapidjson/encodedstream.h b/src/3rdparty/rapidjson/encodedstream.h index 14506838..223601c0 100644 --- a/src/3rdparty/rapidjson/encodedstream.h +++ b/src/3rdparty/rapidjson/encodedstream.h @@ -200,7 +200,7 @@ private: // xx xx xx xx UTF-8 if (!hasBOM_) { - unsigned pattern = (c[0] ? 1 : 0) | (c[1] ? 2 : 0) | (c[2] ? 4 : 0) | (c[3] ? 8 : 0); + int pattern = (c[0] ? 1 : 0) | (c[1] ? 2 : 0) | (c[2] ? 4 : 0) | (c[3] ? 8 : 0); switch (pattern) { case 0x08: type_ = kUTF32BE; break; case 0x0A: type_ = kUTF16BE; break; diff --git a/src/3rdparty/rapidjson/encodings.h b/src/3rdparty/rapidjson/encodings.h index baa7c2b1..0b244679 100644 --- a/src/3rdparty/rapidjson/encodings.h +++ b/src/3rdparty/rapidjson/encodings.h @@ -17,7 +17,7 @@ #include "rapidjson.h" -#ifdef _MSC_VER +#if defined(_MSC_VER) && !defined(__clang__) RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_OFF(4244) // conversion from 'type1' to 'type2', possible loss of data RAPIDJSON_DIAG_OFF(4702) // unreachable code @@ -144,9 +144,9 @@ struct UTF8 { template static bool Decode(InputStream& is, unsigned* codepoint) { -#define COPY() c = is.Take(); *codepoint = (*codepoint << 6) | (static_cast(c) & 0x3Fu) -#define TRANS(mask) result &= ((GetRange(static_cast(c)) & mask) != 0) -#define TAIL() COPY(); TRANS(0x70) +#define RAPIDJSON_COPY() c = is.Take(); *codepoint = (*codepoint << 6) | (static_cast(c) & 0x3Fu) +#define RAPIDJSON_TRANS(mask) result &= ((GetRange(static_cast(c)) & mask) != 0) +#define RAPIDJSON_TAIL() RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x70) typename InputStream::Ch c = is.Take(); if (!(c & 0x80)) { *codepoint = static_cast(c); @@ -157,48 +157,48 @@ struct UTF8 { if (type >= 32) { *codepoint = 0; } else { - *codepoint = (0xFF >> type) & static_cast(c); + *codepoint = (0xFFu >> type) & static_cast(c); } bool result = true; switch (type) { - case 2: TAIL(); return result; - case 3: TAIL(); TAIL(); return result; - case 4: COPY(); TRANS(0x50); TAIL(); return result; - case 5: COPY(); TRANS(0x10); TAIL(); TAIL(); return result; - case 6: TAIL(); TAIL(); TAIL(); return result; - case 10: COPY(); TRANS(0x20); TAIL(); return result; - case 11: COPY(); TRANS(0x60); TAIL(); TAIL(); return result; + case 2: RAPIDJSON_TAIL(); return result; + case 3: RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result; + case 4: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x50); RAPIDJSON_TAIL(); return result; + case 5: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x10); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result; + case 6: RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result; + case 10: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x20); RAPIDJSON_TAIL(); return result; + case 11: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x60); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result; default: return false; } -#undef COPY -#undef TRANS -#undef TAIL +#undef RAPIDJSON_COPY +#undef RAPIDJSON_TRANS +#undef RAPIDJSON_TAIL } template static bool Validate(InputStream& is, OutputStream& os) { -#define COPY() os.Put(c = is.Take()) -#define TRANS(mask) result &= ((GetRange(static_cast(c)) & mask) != 0) -#define TAIL() COPY(); TRANS(0x70) +#define RAPIDJSON_COPY() os.Put(c = is.Take()) +#define RAPIDJSON_TRANS(mask) result &= ((GetRange(static_cast(c)) & mask) != 0) +#define RAPIDJSON_TAIL() RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x70) Ch c; - COPY(); + RAPIDJSON_COPY(); if (!(c & 0x80)) return true; bool result = true; switch (GetRange(static_cast(c))) { - case 2: TAIL(); return result; - case 3: TAIL(); TAIL(); return result; - case 4: COPY(); TRANS(0x50); TAIL(); return result; - case 5: COPY(); TRANS(0x10); TAIL(); TAIL(); return result; - case 6: TAIL(); TAIL(); TAIL(); return result; - case 10: COPY(); TRANS(0x20); TAIL(); return result; - case 11: COPY(); TRANS(0x60); TAIL(); TAIL(); return result; + case 2: RAPIDJSON_TAIL(); return result; + case 3: RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result; + case 4: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x50); RAPIDJSON_TAIL(); return result; + case 5: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x10); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result; + case 6: RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result; + case 10: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x20); RAPIDJSON_TAIL(); return result; + case 11: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x60); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result; default: return false; } -#undef COPY -#undef TRANS -#undef TAIL +#undef RAPIDJSON_COPY +#undef RAPIDJSON_TRANS +#undef RAPIDJSON_TAIL } static unsigned char GetRange(unsigned char c) { @@ -283,7 +283,7 @@ struct UTF16 { RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); unsigned v = codepoint - 0x10000; os.Put(static_cast((v >> 10) | 0xD800)); - os.Put((v & 0x3FF) | 0xDC00); + os.Put(static_cast((v & 0x3FF) | 0xDC00)); } } @@ -299,7 +299,7 @@ struct UTF16 { RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); unsigned v = codepoint - 0x10000; PutUnsafe(os, static_cast((v >> 10) | 0xD800)); - PutUnsafe(os, (v & 0x3FF) | 0xDC00); + PutUnsafe(os, static_cast((v & 0x3FF) | 0xDC00)); } } @@ -384,7 +384,7 @@ struct UTF16BE : UTF16 { static CharType Take(InputByteStream& is) { RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); unsigned c = static_cast(static_cast(is.Take())) << 8; - c |= static_cast(is.Take()); + c |= static_cast(static_cast(is.Take())); return static_cast(c); } @@ -620,28 +620,28 @@ struct AutoUTF { #define RAPIDJSON_ENCODINGS_FUNC(x) UTF8::x, UTF16LE::x, UTF16BE::x, UTF32LE::x, UTF32BE::x template - RAPIDJSON_FORCEINLINE static void Encode(OutputStream& os, unsigned codepoint) { + static RAPIDJSON_FORCEINLINE void Encode(OutputStream& os, unsigned codepoint) { typedef void (*EncodeFunc)(OutputStream&, unsigned); static const EncodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Encode) }; (*f[os.GetType()])(os, codepoint); } template - RAPIDJSON_FORCEINLINE static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { + static RAPIDJSON_FORCEINLINE void EncodeUnsafe(OutputStream& os, unsigned codepoint) { typedef void (*EncodeFunc)(OutputStream&, unsigned); static const EncodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(EncodeUnsafe) }; (*f[os.GetType()])(os, codepoint); } template - RAPIDJSON_FORCEINLINE static bool Decode(InputStream& is, unsigned* codepoint) { + static RAPIDJSON_FORCEINLINE bool Decode(InputStream& is, unsigned* codepoint) { typedef bool (*DecodeFunc)(InputStream&, unsigned*); static const DecodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Decode) }; return (*f[is.GetType()])(is, codepoint); } template - RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) { + static RAPIDJSON_FORCEINLINE bool Validate(InputStream& is, OutputStream& os) { typedef bool (*ValidateFunc)(InputStream&, OutputStream&); static const ValidateFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Validate) }; return (*f[is.GetType()])(is, os); @@ -658,7 +658,7 @@ template struct Transcoder { //! Take one Unicode codepoint from source encoding, convert it to target encoding and put it to the output stream. template - RAPIDJSON_FORCEINLINE static bool Transcode(InputStream& is, OutputStream& os) { + static RAPIDJSON_FORCEINLINE bool Transcode(InputStream& is, OutputStream& os) { unsigned codepoint; if (!SourceEncoding::Decode(is, &codepoint)) return false; @@ -667,7 +667,7 @@ struct Transcoder { } template - RAPIDJSON_FORCEINLINE static bool TranscodeUnsafe(InputStream& is, OutputStream& os) { + static RAPIDJSON_FORCEINLINE bool TranscodeUnsafe(InputStream& is, OutputStream& os) { unsigned codepoint; if (!SourceEncoding::Decode(is, &codepoint)) return false; @@ -677,7 +677,7 @@ struct Transcoder { //! Validate one Unicode codepoint from an encoded stream. template - RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) { + static RAPIDJSON_FORCEINLINE bool Validate(InputStream& is, OutputStream& os) { return Transcode(is, os); // Since source/target encoding is different, must transcode. } }; @@ -690,26 +690,26 @@ inline void PutUnsafe(Stream& stream, typename Stream::Ch c); template struct Transcoder { template - RAPIDJSON_FORCEINLINE static bool Transcode(InputStream& is, OutputStream& os) { + static RAPIDJSON_FORCEINLINE bool Transcode(InputStream& is, OutputStream& os) { os.Put(is.Take()); // Just copy one code unit. This semantic is different from primary template class. return true; } template - RAPIDJSON_FORCEINLINE static bool TranscodeUnsafe(InputStream& is, OutputStream& os) { + static RAPIDJSON_FORCEINLINE bool TranscodeUnsafe(InputStream& is, OutputStream& os) { PutUnsafe(os, is.Take()); // Just copy one code unit. This semantic is different from primary template class. return true; } template - RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) { + static RAPIDJSON_FORCEINLINE bool Validate(InputStream& is, OutputStream& os) { return Encoding::Validate(is, os); // source/target encoding are the same } }; RAPIDJSON_NAMESPACE_END -#if defined(__GNUC__) || defined(_MSC_VER) +#if defined(__GNUC__) || (defined(_MSC_VER) && !defined(__clang__)) RAPIDJSON_DIAG_POP #endif diff --git a/src/3rdparty/rapidjson/error/error.h b/src/3rdparty/rapidjson/error/error.h index 95cb31a7..9311d2f0 100644 --- a/src/3rdparty/rapidjson/error/error.h +++ b/src/3rdparty/rapidjson/error/error.h @@ -104,6 +104,8 @@ enum ParseErrorCode { \see GenericReader::Parse, GenericDocument::Parse */ struct ParseResult { + //!! Unspecified boolean type + typedef bool (ParseResult::*BooleanType)() const; public: //! Default constructor, no error. ParseResult() : code_(kParseErrorNone), offset_(0) {} @@ -115,8 +117,8 @@ public: //! Get the error offset, if \ref IsError(), 0 otherwise. size_t Offset() const { return offset_; } - //! Conversion to \c bool, returns \c true, iff !\ref IsError(). - operator bool() const { return !IsError(); } + //! Explicit conversion to \c bool, returns \c true, iff !\ref IsError(). + operator BooleanType() const { return !IsError() ? &ParseResult::IsError : NULL; } //! Whether the result is an error. bool IsError() const { return code_ != kParseErrorNone; } @@ -124,6 +126,10 @@ public: bool operator==(ParseErrorCode code) const { return code_ == code; } friend bool operator==(ParseErrorCode code, const ParseResult & err) { return code == err.code_; } + bool operator!=(const ParseResult& that) const { return !(*this == that); } + bool operator!=(ParseErrorCode code) const { return !(*this == code); } + friend bool operator!=(ParseErrorCode code, const ParseResult & err) { return err != code; } + //! Reset error code. void Clear() { Set(kParseErrorNone); } //! Update error code and offset. diff --git a/src/3rdparty/rapidjson/filereadstream.h b/src/3rdparty/rapidjson/filereadstream.h index b56ea13b..6b343707 100644 --- a/src/3rdparty/rapidjson/filereadstream.h +++ b/src/3rdparty/rapidjson/filereadstream.h @@ -59,7 +59,7 @@ public: // For encoding detection only. const Ch* Peek4() const { - return (current_ + 4 <= bufferLast_) ? current_ : 0; + return (current_ + 4 - !eof_ <= bufferLast_) ? current_ : 0; } private: @@ -68,7 +68,7 @@ private: ++current_; else if (!eof_) { count_ += readCount_; - readCount_ = fread(buffer_, 1, bufferSize_, fp_); + readCount_ = std::fread(buffer_, 1, bufferSize_, fp_); bufferLast_ = buffer_ + readCount_ - 1; current_ = buffer_; diff --git a/src/3rdparty/rapidjson/filewritestream.h b/src/3rdparty/rapidjson/filewritestream.h index 6378dd60..8b48fee1 100644 --- a/src/3rdparty/rapidjson/filewritestream.h +++ b/src/3rdparty/rapidjson/filewritestream.h @@ -25,7 +25,7 @@ RAPIDJSON_DIAG_OFF(unreachable-code) RAPIDJSON_NAMESPACE_BEGIN -//! Wrapper of C file stream for input using fread(). +//! Wrapper of C file stream for output using fwrite(). /*! \note implements Stream concept */ @@ -62,7 +62,7 @@ public: void Flush() { if (current_ != buffer_) { - size_t result = fwrite(buffer_, 1, static_cast(current_ - buffer_), fp_); + size_t result = std::fwrite(buffer_, 1, static_cast(current_ - buffer_), fp_); if (result < static_cast(current_ - buffer_)) { // failure deliberately ignored at this time // added to avoid warn_unused_result build errors diff --git a/src/3rdparty/rapidjson/internal/biginteger.h b/src/3rdparty/rapidjson/internal/biginteger.h index 9d3e88c9..a31c8a88 100644 --- a/src/3rdparty/rapidjson/internal/biginteger.h +++ b/src/3rdparty/rapidjson/internal/biginteger.h @@ -17,7 +17,7 @@ #include "../rapidjson.h" -#if defined(_MSC_VER) && defined(_M_AMD64) +#if defined(_MSC_VER) && !__INTEL_COMPILER && defined(_M_AMD64) #include // for _umul128 #pragma intrinsic(_umul128) #endif @@ -133,7 +133,7 @@ public: RAPIDJSON_ASSERT(count_ + offset <= kCapacity); if (interShift == 0) { - std::memmove(&digits_[count_ - 1 + offset], &digits_[count_ - 1], count_ * sizeof(Type)); + std::memmove(digits_ + offset, digits_, count_ * sizeof(Type)); count_ += offset; } else { diff --git a/src/3rdparty/rapidjson/internal/diyfp.h b/src/3rdparty/rapidjson/internal/diyfp.h index c9fefdc6..b6c2cf56 100644 --- a/src/3rdparty/rapidjson/internal/diyfp.h +++ b/src/3rdparty/rapidjson/internal/diyfp.h @@ -1,5 +1,5 @@ // Tencent is pleased to support the open source community by making RapidJSON available. -// +// // Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. // // Licensed under the MIT License (the "License"); you may not use this file except @@ -7,9 +7,9 @@ // // http://opensource.org/licenses/MIT // -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. // This is a C++ header-only implementation of Grisu2 algorithm from the publication: @@ -20,8 +20,9 @@ #define RAPIDJSON_DIYFP_H_ #include "../rapidjson.h" +#include -#if defined(_MSC_VER) && defined(_M_AMD64) +#if defined(_MSC_VER) && defined(_M_AMD64) && !defined(__INTEL_COMPILER) #include #pragma intrinsic(_BitScanReverse64) #pragma intrinsic(_umul128) @@ -56,7 +57,7 @@ struct DiyFp { if (biased_e != 0) { f = significand + kDpHiddenBit; e = biased_e - kDpExponentBias; - } + } else { f = significand; e = kDpMinExponent + 1; @@ -99,6 +100,7 @@ struct DiyFp { } DiyFp Normalize() const { + RAPIDJSON_ASSERT(f != 0); // https://stackoverflow.com/a/26809183/291737 #if defined(_MSC_VER) && defined(_M_AMD64) unsigned long index; _BitScanReverse64(&index, f); @@ -141,7 +143,16 @@ struct DiyFp { double d; uint64_t u64; }u; - const uint64_t be = (e == kDpDenormalExponent && (f & kDpHiddenBit) == 0) ? 0 : + RAPIDJSON_ASSERT(f <= kDpHiddenBit + kDpSignificandMask); + if (e < kDpDenormalExponent) { + // Underflow. + return 0.0; + } + if (e >= kDpMaxExponent) { + // Overflow. + return std::numeric_limits::infinity(); + } + const uint64_t be = (e == kDpDenormalExponent && (f & kDpHiddenBit) == 0) ? 0 : static_cast(e + kDpExponentBias); u.u64 = (f & kDpSignificandMask) | (be << kDpSignificandSize); return u.d; @@ -220,9 +231,10 @@ inline DiyFp GetCachedPowerByIndex(size_t index) { 641, 667, 694, 720, 747, 774, 800, 827, 853, 880, 907, 933, 960, 986, 1013, 1039, 1066 }; + RAPIDJSON_ASSERT(index < 87); return DiyFp(kCachedPowers_F[index], kCachedPowers_E[index]); } - + inline DiyFp GetCachedPower(int e, int* K) { //int k = static_cast(ceil((-61 - e) * 0.30102999566398114)) + 374; @@ -238,10 +250,11 @@ inline DiyFp GetCachedPower(int e, int* K) { } inline DiyFp GetCachedPower10(int exp, int *outExp) { - unsigned index = (static_cast(exp) + 348u) / 8u; - *outExp = -348 + static_cast(index) * 8; - return GetCachedPowerByIndex(index); - } + RAPIDJSON_ASSERT(exp >= -348); + unsigned index = static_cast(exp + 348) / 8u; + *outExp = -348 + static_cast(index) * 8; + return GetCachedPowerByIndex(index); +} #ifdef __GNUC__ RAPIDJSON_DIAG_POP diff --git a/src/3rdparty/rapidjson/internal/dtoa.h b/src/3rdparty/rapidjson/internal/dtoa.h index 8d6350e6..bf2e9b2e 100644 --- a/src/3rdparty/rapidjson/internal/dtoa.h +++ b/src/3rdparty/rapidjson/internal/dtoa.h @@ -41,7 +41,7 @@ inline void GrisuRound(char* buffer, int len, uint64_t delta, uint64_t rest, uin } } -inline unsigned CountDecimalDigit32(uint32_t n) { +inline int CountDecimalDigit32(uint32_t n) { // Simple pure C++ implementation was faster than __builtin_clz version in this situation. if (n < 10) return 1; if (n < 100) return 2; @@ -63,7 +63,7 @@ inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, char* buff const DiyFp wp_w = Mp - W; uint32_t p1 = static_cast(Mp.f >> -one.e); uint64_t p2 = Mp.f & (one.f - 1); - unsigned kappa = CountDecimalDigit32(p1); // kappa in [0, 9] + int kappa = CountDecimalDigit32(p1); // kappa in [0, 9] *len = 0; while (kappa > 0) { @@ -102,8 +102,8 @@ inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, char* buff kappa--; if (p2 < delta) { *K += kappa; - int index = -static_cast(kappa); - GrisuRound(buffer, *len, delta, p2, one.f, wp_w.f * (index < 9 ? kPow10[-static_cast(kappa)] : 0)); + int index = -kappa; + GrisuRound(buffer, *len, delta, p2, one.f, wp_w.f * (index < 9 ? kPow10[index] : 0)); return; } } diff --git a/src/3rdparty/rapidjson/internal/ieee754.h b/src/3rdparty/rapidjson/internal/ieee754.h index 82bb0b99..c2684ba2 100644 --- a/src/3rdparty/rapidjson/internal/ieee754.h +++ b/src/3rdparty/rapidjson/internal/ieee754.h @@ -48,13 +48,13 @@ public: int IntegerExponent() const { return (IsNormal() ? Exponent() : kDenormalExponent) - kSignificandSize; } uint64_t ToBias() const { return (u_ & kSignMask) ? ~u_ + 1 : u_ | kSignMask; } - static unsigned EffectiveSignificandSize(int order) { + static int EffectiveSignificandSize(int order) { if (order >= -1021) return 53; else if (order <= -1074) return 0; else - return static_cast(order) + 1074; + return order + 1074; } private: diff --git a/src/3rdparty/rapidjson/internal/itoa.h b/src/3rdparty/rapidjson/internal/itoa.h index 01a4e7e7..9b1c45cc 100644 --- a/src/3rdparty/rapidjson/internal/itoa.h +++ b/src/3rdparty/rapidjson/internal/itoa.h @@ -1,5 +1,5 @@ // Tencent is pleased to support the open source community by making RapidJSON available. -// +// // Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. // // Licensed under the MIT License (the "License"); you may not use this file except @@ -7,9 +7,9 @@ // // http://opensource.org/licenses/MIT // -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #ifndef RAPIDJSON_ITOA_ @@ -37,12 +37,14 @@ inline const char* GetDigitsLut() { } inline char* u32toa(uint32_t value, char* buffer) { + RAPIDJSON_ASSERT(buffer != 0); + const char* cDigitsLut = GetDigitsLut(); if (value < 10000) { const uint32_t d1 = (value / 100) << 1; const uint32_t d2 = (value % 100) << 1; - + if (value >= 1000) *buffer++ = cDigitsLut[d1]; if (value >= 100) @@ -55,13 +57,13 @@ inline char* u32toa(uint32_t value, char* buffer) { // value = bbbbcccc const uint32_t b = value / 10000; const uint32_t c = value % 10000; - + const uint32_t d1 = (b / 100) << 1; const uint32_t d2 = (b % 100) << 1; - + const uint32_t d3 = (c / 100) << 1; const uint32_t d4 = (c % 100) << 1; - + if (value >= 10000000) *buffer++ = cDigitsLut[d1]; if (value >= 1000000) @@ -69,7 +71,7 @@ inline char* u32toa(uint32_t value, char* buffer) { if (value >= 100000) *buffer++ = cDigitsLut[d2]; *buffer++ = cDigitsLut[d2 + 1]; - + *buffer++ = cDigitsLut[d3]; *buffer++ = cDigitsLut[d3 + 1]; *buffer++ = cDigitsLut[d4]; @@ -77,10 +79,10 @@ inline char* u32toa(uint32_t value, char* buffer) { } else { // value = aabbbbcccc in decimal - + const uint32_t a = value / 100000000; // 1 to 42 value %= 100000000; - + if (a >= 10) { const unsigned i = a << 1; *buffer++ = cDigitsLut[i]; @@ -91,13 +93,13 @@ inline char* u32toa(uint32_t value, char* buffer) { const uint32_t b = value / 10000; // 0 to 9999 const uint32_t c = value % 10000; // 0 to 9999 - + const uint32_t d1 = (b / 100) << 1; const uint32_t d2 = (b % 100) << 1; - + const uint32_t d3 = (c / 100) << 1; const uint32_t d4 = (c % 100) << 1; - + *buffer++ = cDigitsLut[d1]; *buffer++ = cDigitsLut[d1 + 1]; *buffer++ = cDigitsLut[d2]; @@ -111,6 +113,7 @@ inline char* u32toa(uint32_t value, char* buffer) { } inline char* i32toa(int32_t value, char* buffer) { + RAPIDJSON_ASSERT(buffer != 0); uint32_t u = static_cast(value); if (value < 0) { *buffer++ = '-'; @@ -121,6 +124,7 @@ inline char* i32toa(int32_t value, char* buffer) { } inline char* u64toa(uint64_t value, char* buffer) { + RAPIDJSON_ASSERT(buffer != 0); const char* cDigitsLut = GetDigitsLut(); const uint64_t kTen8 = 100000000; const uint64_t kTen9 = kTen8 * 10; @@ -131,13 +135,13 @@ inline char* u64toa(uint64_t value, char* buffer) { const uint64_t kTen14 = kTen8 * 1000000; const uint64_t kTen15 = kTen8 * 10000000; const uint64_t kTen16 = kTen8 * kTen8; - + if (value < kTen8) { uint32_t v = static_cast(value); if (v < 10000) { const uint32_t d1 = (v / 100) << 1; const uint32_t d2 = (v % 100) << 1; - + if (v >= 1000) *buffer++ = cDigitsLut[d1]; if (v >= 100) @@ -150,13 +154,13 @@ inline char* u64toa(uint64_t value, char* buffer) { // value = bbbbcccc const uint32_t b = v / 10000; const uint32_t c = v % 10000; - + const uint32_t d1 = (b / 100) << 1; const uint32_t d2 = (b % 100) << 1; - + const uint32_t d3 = (c / 100) << 1; const uint32_t d4 = (c % 100) << 1; - + if (value >= 10000000) *buffer++ = cDigitsLut[d1]; if (value >= 1000000) @@ -164,7 +168,7 @@ inline char* u64toa(uint64_t value, char* buffer) { if (value >= 100000) *buffer++ = cDigitsLut[d2]; *buffer++ = cDigitsLut[d2 + 1]; - + *buffer++ = cDigitsLut[d3]; *buffer++ = cDigitsLut[d3 + 1]; *buffer++ = cDigitsLut[d4]; @@ -174,22 +178,22 @@ inline char* u64toa(uint64_t value, char* buffer) { else if (value < kTen16) { const uint32_t v0 = static_cast(value / kTen8); const uint32_t v1 = static_cast(value % kTen8); - + const uint32_t b0 = v0 / 10000; const uint32_t c0 = v0 % 10000; - + const uint32_t d1 = (b0 / 100) << 1; const uint32_t d2 = (b0 % 100) << 1; - + const uint32_t d3 = (c0 / 100) << 1; const uint32_t d4 = (c0 % 100) << 1; const uint32_t b1 = v1 / 10000; const uint32_t c1 = v1 % 10000; - + const uint32_t d5 = (b1 / 100) << 1; const uint32_t d6 = (b1 % 100) << 1; - + const uint32_t d7 = (c1 / 100) << 1; const uint32_t d8 = (c1 % 100) << 1; @@ -207,9 +211,8 @@ inline char* u64toa(uint64_t value, char* buffer) { *buffer++ = cDigitsLut[d3 + 1]; if (value >= kTen9) *buffer++ = cDigitsLut[d4]; - if (value >= kTen8) - *buffer++ = cDigitsLut[d4 + 1]; - + + *buffer++ = cDigitsLut[d4 + 1]; *buffer++ = cDigitsLut[d5]; *buffer++ = cDigitsLut[d5 + 1]; *buffer++ = cDigitsLut[d6]; @@ -222,7 +225,7 @@ inline char* u64toa(uint64_t value, char* buffer) { else { const uint32_t a = static_cast(value / kTen16); // 1 to 1844 value %= kTen16; - + if (a < 10) *buffer++ = static_cast('0' + static_cast(a)); else if (a < 100) { @@ -232,7 +235,7 @@ inline char* u64toa(uint64_t value, char* buffer) { } else if (a < 1000) { *buffer++ = static_cast('0' + static_cast(a / 100)); - + const uint32_t i = (a % 100) << 1; *buffer++ = cDigitsLut[i]; *buffer++ = cDigitsLut[i + 1]; @@ -245,28 +248,28 @@ inline char* u64toa(uint64_t value, char* buffer) { *buffer++ = cDigitsLut[j]; *buffer++ = cDigitsLut[j + 1]; } - + const uint32_t v0 = static_cast(value / kTen8); const uint32_t v1 = static_cast(value % kTen8); - + const uint32_t b0 = v0 / 10000; const uint32_t c0 = v0 % 10000; - + const uint32_t d1 = (b0 / 100) << 1; const uint32_t d2 = (b0 % 100) << 1; - + const uint32_t d3 = (c0 / 100) << 1; const uint32_t d4 = (c0 % 100) << 1; - + const uint32_t b1 = v1 / 10000; const uint32_t c1 = v1 % 10000; - + const uint32_t d5 = (b1 / 100) << 1; const uint32_t d6 = (b1 % 100) << 1; - + const uint32_t d7 = (c1 / 100) << 1; const uint32_t d8 = (c1 % 100) << 1; - + *buffer++ = cDigitsLut[d1]; *buffer++ = cDigitsLut[d1 + 1]; *buffer++ = cDigitsLut[d2]; @@ -284,11 +287,12 @@ inline char* u64toa(uint64_t value, char* buffer) { *buffer++ = cDigitsLut[d8]; *buffer++ = cDigitsLut[d8 + 1]; } - + return buffer; } inline char* i64toa(int64_t value, char* buffer) { + RAPIDJSON_ASSERT(buffer != 0); uint64_t u = static_cast(value); if (value < 0) { *buffer++ = '-'; diff --git a/src/3rdparty/rapidjson/internal/meta.h b/src/3rdparty/rapidjson/internal/meta.h index 5a9aaa42..d401edf8 100644 --- a/src/3rdparty/rapidjson/internal/meta.h +++ b/src/3rdparty/rapidjson/internal/meta.h @@ -21,7 +21,8 @@ RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_OFF(effc++) #endif -#if defined(_MSC_VER) + +#if defined(_MSC_VER) && !defined(__clang__) RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_OFF(6334) #endif @@ -174,7 +175,11 @@ template struct RemoveSfinaeTag { typedef T Type; RAPIDJSON_NAMESPACE_END //@endcond -#if defined(__GNUC__) || defined(_MSC_VER) +#if defined(_MSC_VER) && !defined(__clang__) +RAPIDJSON_DIAG_POP +#endif + +#ifdef __GNUC__ RAPIDJSON_DIAG_POP #endif diff --git a/src/3rdparty/rapidjson/internal/regex.h b/src/3rdparty/rapidjson/internal/regex.h index 422a5240..16e35592 100644 --- a/src/3rdparty/rapidjson/internal/regex.h +++ b/src/3rdparty/rapidjson/internal/regex.h @@ -24,16 +24,17 @@ RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_OFF(padded) RAPIDJSON_DIAG_OFF(switch-enum) RAPIDJSON_DIAG_OFF(implicit-fallthrough) +#elif defined(_MSC_VER) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated #endif #ifdef __GNUC__ RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_OFF(effc++) +#if __GNUC__ >= 7 +RAPIDJSON_DIAG_OFF(implicit-fallthrough) #endif - -#ifdef _MSC_VER -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated #endif #ifndef RAPIDJSON_REGEX_VERBOSE @@ -43,12 +44,40 @@ RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated RAPIDJSON_NAMESPACE_BEGIN namespace internal { +/////////////////////////////////////////////////////////////////////////////// +// DecodedStream + +template +class DecodedStream { +public: + DecodedStream(SourceStream& ss) : ss_(ss), codepoint_() { Decode(); } + unsigned Peek() { return codepoint_; } + unsigned Take() { + unsigned c = codepoint_; + if (c) // No further decoding when '\0' + Decode(); + return c; + } + +private: + void Decode() { + if (!Encoding::Decode(ss_, &codepoint_)) + codepoint_ = 0; + } + + SourceStream& ss_; + unsigned codepoint_; +}; + /////////////////////////////////////////////////////////////////////////////// // GenericRegex static const SizeType kRegexInvalidState = ~SizeType(0); //!< Represents an invalid index in GenericRegex::State::out, out1 static const SizeType kRegexInvalidRange = ~SizeType(0); +template +class GenericRegexSearch; + //! Regular expression engine with subset of ECMAscript grammar. /*! Supported regular expression syntax: @@ -84,45 +113,29 @@ static const SizeType kRegexInvalidRange = ~SizeType(0); template class GenericRegex { public: + typedef Encoding EncodingType; typedef typename Encoding::Ch Ch; + template friend class GenericRegexSearch; GenericRegex(const Ch* source, Allocator* allocator = 0) : - states_(allocator, 256), ranges_(allocator, 256), root_(kRegexInvalidState), stateCount_(), rangeCount_(), - stateSet_(), state0_(allocator, 0), state1_(allocator, 0), anchorBegin_(), anchorEnd_() + ownAllocator_(allocator ? 0 : RAPIDJSON_NEW(Allocator)()), allocator_(allocator ? allocator : ownAllocator_), + states_(allocator_, 256), ranges_(allocator_, 256), root_(kRegexInvalidState), stateCount_(), rangeCount_(), + anchorBegin_(), anchorEnd_() { GenericStringStream ss(source); - DecodedStream > ds(ss); + DecodedStream, Encoding> ds(ss); Parse(ds); } - ~GenericRegex() { - Allocator::Free(stateSet_); + ~GenericRegex() + { + RAPIDJSON_DELETE(ownAllocator_); } bool IsValid() const { return root_ != kRegexInvalidState; } - template - bool Match(InputStream& is) const { - return SearchWithAnchoring(is, true, true); - } - - bool Match(const Ch* s) const { - GenericStringStream is(s); - return Match(is); - } - - template - bool Search(InputStream& is) const { - return SearchWithAnchoring(is, anchorBegin_, anchorEnd_); - } - - bool Search(const Ch* s) const { - GenericStringStream is(s); - return Search(is); - } - private: enum Operator { kZeroOrOne, @@ -157,28 +170,6 @@ private: SizeType minIndex; }; - template - class DecodedStream { - public: - DecodedStream(SourceStream& ss) : ss_(ss), codepoint_() { Decode(); } - unsigned Peek() { return codepoint_; } - unsigned Take() { - unsigned c = codepoint_; - if (c) // No further decoding when '\0' - Decode(); - return c; - } - - private: - void Decode() { - if (!Encoding::Decode(ss_, &codepoint_)) - codepoint_ = 0; - } - - SourceStream& ss_; - unsigned codepoint_; - }; - State& GetState(SizeType index) { RAPIDJSON_ASSERT(index < stateCount_); return states_.template Bottom()[index]; @@ -200,11 +191,10 @@ private: } template - void Parse(DecodedStream& ds) { - Allocator allocator; - Stack operandStack(&allocator, 256); // Frag - Stack operatorStack(&allocator, 256); // Operator - Stack atomCountStack(&allocator, 256); // unsigned (Atom per parenthesis) + void Parse(DecodedStream& ds) { + Stack operandStack(allocator_, 256); // Frag + Stack operatorStack(allocator_, 256); // Operator + Stack atomCountStack(allocator_, 256); // unsigned (Atom per parenthesis) *atomCountStack.template Push() = 0; @@ -327,14 +317,6 @@ private: printf("\n"); #endif } - - // Preallocate buffer for SearchWithAnchoring() - RAPIDJSON_ASSERT(stateSet_ == 0); - if (stateCount_ > 0) { - stateSet_ = static_cast(states_.GetAllocator().Malloc(GetStateSetSize())); - state0_.template Reserve(stateCount_); - state1_.template Reserve(stateCount_); - } } SizeType NewState(SizeType out, SizeType out1, unsigned codepoint) { @@ -413,8 +395,7 @@ private: } return false; - default: - RAPIDJSON_ASSERT(op == kOneOrMore); + case kOneOrMore: if (operandStack.GetSize() >= sizeof(Frag)) { Frag e = *operandStack.template Pop(1); SizeType s = NewState(kRegexInvalidState, e.start, 0); @@ -423,6 +404,10 @@ private: return true; } return false; + + default: + // syntax error (e.g. unclosed kLeftParenthesis) + return false; } } @@ -483,7 +468,7 @@ private: } template - bool ParseUnsigned(DecodedStream& ds, unsigned* u) { + bool ParseUnsigned(DecodedStream& ds, unsigned* u) { unsigned r = 0; if (ds.Peek() < '0' || ds.Peek() > '9') return false; @@ -497,7 +482,7 @@ private: } template - bool ParseRange(DecodedStream& ds, SizeType* range) { + bool ParseRange(DecodedStream& ds, SizeType* range) { bool isBegin = true; bool negate = false; int step = 0; @@ -575,7 +560,7 @@ private: } template - bool CharacterEscape(DecodedStream& ds, unsigned* escapedCodepoint) { + bool CharacterEscape(DecodedStream& ds, unsigned* escapedCodepoint) { unsigned codepoint; switch (codepoint = ds.Take()) { case '^': @@ -603,72 +588,8 @@ private: } } - template - bool SearchWithAnchoring(InputStream& is, bool anchorBegin, bool anchorEnd) const { - RAPIDJSON_ASSERT(IsValid()); - DecodedStream ds(is); - - state0_.Clear(); - Stack *current = &state0_, *next = &state1_; - const size_t stateSetSize = GetStateSetSize(); - std::memset(stateSet_, 0, stateSetSize); - - bool matched = AddState(*current, root_); - unsigned codepoint; - while (!current->Empty() && (codepoint = ds.Take()) != 0) { - std::memset(stateSet_, 0, stateSetSize); - next->Clear(); - matched = false; - for (const SizeType* s = current->template Bottom(); s != current->template End(); ++s) { - const State& sr = GetState(*s); - if (sr.codepoint == codepoint || - sr.codepoint == kAnyCharacterClass || - (sr.codepoint == kRangeCharacterClass && MatchRange(sr.rangeStart, codepoint))) - { - matched = AddState(*next, sr.out) || matched; - if (!anchorEnd && matched) - return true; - } - if (!anchorBegin) - AddState(*next, root_); - } - internal::Swap(current, next); - } - - return matched; - } - - size_t GetStateSetSize() const { - return (stateCount_ + 31) / 32 * 4; - } - - // Return whether the added states is a match state - bool AddState(Stack& l, SizeType index) const { - RAPIDJSON_ASSERT(index != kRegexInvalidState); - - const State& s = GetState(index); - if (s.out1 != kRegexInvalidState) { // Split - bool matched = AddState(l, s.out); - return AddState(l, s.out1) || matched; - } - else if (!(stateSet_[index >> 5] & (1 << (index & 31)))) { - stateSet_[index >> 5] |= (1 << (index & 31)); - *l.template PushUnsafe() = index; - } - return s.out == kRegexInvalidState; // by using PushUnsafe() above, we can ensure s is not validated due to reallocation. - } - - bool MatchRange(SizeType rangeIndex, unsigned codepoint) const { - bool yes = (GetRange(rangeIndex).start & kRangeNegationFlag) == 0; - while (rangeIndex != kRegexInvalidRange) { - const Range& r = GetRange(rangeIndex); - if (codepoint >= (r.start & ~kRangeNegationFlag) && codepoint <= r.end) - return yes; - rangeIndex = r.next; - } - return !yes; - } - + Allocator* ownAllocator_; + Allocator* allocator_; Stack states_; Stack ranges_; SizeType root_; @@ -678,23 +599,141 @@ private: static const unsigned kInfinityQuantifier = ~0u; // For SearchWithAnchoring() - uint32_t* stateSet_; // allocated by states_.GetAllocator() - mutable Stack state0_; - mutable Stack state1_; bool anchorBegin_; bool anchorEnd_; }; +template +class GenericRegexSearch { +public: + typedef typename RegexType::EncodingType Encoding; + typedef typename Encoding::Ch Ch; + + GenericRegexSearch(const RegexType& regex, Allocator* allocator = 0) : + regex_(regex), allocator_(allocator), ownAllocator_(0), + state0_(allocator, 0), state1_(allocator, 0), stateSet_() + { + RAPIDJSON_ASSERT(regex_.IsValid()); + if (!allocator_) + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)(); + stateSet_ = static_cast(allocator_->Malloc(GetStateSetSize())); + state0_.template Reserve(regex_.stateCount_); + state1_.template Reserve(regex_.stateCount_); + } + + ~GenericRegexSearch() { + Allocator::Free(stateSet_); + RAPIDJSON_DELETE(ownAllocator_); + } + + template + bool Match(InputStream& is) { + return SearchWithAnchoring(is, true, true); + } + + bool Match(const Ch* s) { + GenericStringStream is(s); + return Match(is); + } + + template + bool Search(InputStream& is) { + return SearchWithAnchoring(is, regex_.anchorBegin_, regex_.anchorEnd_); + } + + bool Search(const Ch* s) { + GenericStringStream is(s); + return Search(is); + } + +private: + typedef typename RegexType::State State; + typedef typename RegexType::Range Range; + + template + bool SearchWithAnchoring(InputStream& is, bool anchorBegin, bool anchorEnd) { + DecodedStream ds(is); + + state0_.Clear(); + Stack *current = &state0_, *next = &state1_; + const size_t stateSetSize = GetStateSetSize(); + std::memset(stateSet_, 0, stateSetSize); + + bool matched = AddState(*current, regex_.root_); + unsigned codepoint; + while (!current->Empty() && (codepoint = ds.Take()) != 0) { + std::memset(stateSet_, 0, stateSetSize); + next->Clear(); + matched = false; + for (const SizeType* s = current->template Bottom(); s != current->template End(); ++s) { + const State& sr = regex_.GetState(*s); + if (sr.codepoint == codepoint || + sr.codepoint == RegexType::kAnyCharacterClass || + (sr.codepoint == RegexType::kRangeCharacterClass && MatchRange(sr.rangeStart, codepoint))) + { + matched = AddState(*next, sr.out) || matched; + if (!anchorEnd && matched) + return true; + } + if (!anchorBegin) + AddState(*next, regex_.root_); + } + internal::Swap(current, next); + } + + return matched; + } + + size_t GetStateSetSize() const { + return (regex_.stateCount_ + 31) / 32 * 4; + } + + // Return whether the added states is a match state + bool AddState(Stack& l, SizeType index) { + RAPIDJSON_ASSERT(index != kRegexInvalidState); + + const State& s = regex_.GetState(index); + if (s.out1 != kRegexInvalidState) { // Split + bool matched = AddState(l, s.out); + return AddState(l, s.out1) || matched; + } + else if (!(stateSet_[index >> 5] & (1u << (index & 31)))) { + stateSet_[index >> 5] |= (1u << (index & 31)); + *l.template PushUnsafe() = index; + } + return s.out == kRegexInvalidState; // by using PushUnsafe() above, we can ensure s is not validated due to reallocation. + } + + bool MatchRange(SizeType rangeIndex, unsigned codepoint) const { + bool yes = (regex_.GetRange(rangeIndex).start & RegexType::kRangeNegationFlag) == 0; + while (rangeIndex != kRegexInvalidRange) { + const Range& r = regex_.GetRange(rangeIndex); + if (codepoint >= (r.start & ~RegexType::kRangeNegationFlag) && codepoint <= r.end) + return yes; + rangeIndex = r.next; + } + return !yes; + } + + const RegexType& regex_; + Allocator* allocator_; + Allocator* ownAllocator_; + Stack state0_; + Stack state1_; + uint32_t* stateSet_; +}; + typedef GenericRegex > Regex; +typedef GenericRegexSearch RegexSearch; } // namespace internal RAPIDJSON_NAMESPACE_END -#ifdef __clang__ +#ifdef __GNUC__ RAPIDJSON_DIAG_POP #endif -#ifdef _MSC_VER +#if defined(__clang__) || defined(_MSC_VER) RAPIDJSON_DIAG_POP #endif diff --git a/src/3rdparty/rapidjson/internal/stack.h b/src/3rdparty/rapidjson/internal/stack.h index 022c9aab..45dca6a8 100644 --- a/src/3rdparty/rapidjson/internal/stack.h +++ b/src/3rdparty/rapidjson/internal/stack.h @@ -17,6 +17,7 @@ #include "../allocators.h" #include "swap.h" +#include #if defined(__clang__) RAPIDJSON_DIAG_PUSH @@ -100,7 +101,7 @@ public: void ShrinkToFit() { if (Empty()) { // If the stack is empty, completely deallocate the memory. - Allocator::Free(stack_); + Allocator::Free(stack_); // NOLINT (+clang-analyzer-unix.Malloc) stack_ = 0; stackTop_ = 0; stackEnd_ = 0; @@ -114,7 +115,7 @@ public: template RAPIDJSON_FORCEINLINE void Reserve(size_t count = 1) { // Expand the stack if needed - if (RAPIDJSON_UNLIKELY(stackTop_ + sizeof(T) * count > stackEnd_)) + if (RAPIDJSON_UNLIKELY(static_cast(sizeof(T) * count) > (stackEnd_ - stackTop_))) Expand(count); } @@ -126,7 +127,8 @@ public: template RAPIDJSON_FORCEINLINE T* PushUnsafe(size_t count = 1) { - RAPIDJSON_ASSERT(stackTop_ + sizeof(T) * count <= stackEnd_); + RAPIDJSON_ASSERT(stackTop_); + RAPIDJSON_ASSERT(static_cast(sizeof(T) * count) <= (stackEnd_ - stackTop_)); T* ret = reinterpret_cast(stackTop_); stackTop_ += sizeof(T) * count; return ret; @@ -183,7 +185,7 @@ private: size_t newCapacity; if (stack_ == 0) { if (!allocator_) - ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)(); newCapacity = initialCapacity_; } else { newCapacity = GetCapacity(); diff --git a/src/3rdparty/rapidjson/internal/strfunc.h b/src/3rdparty/rapidjson/internal/strfunc.h index 2edfae52..226439a7 100644 --- a/src/3rdparty/rapidjson/internal/strfunc.h +++ b/src/3rdparty/rapidjson/internal/strfunc.h @@ -16,6 +16,7 @@ #define RAPIDJSON_INTERNAL_STRFUNC_H_ #include "../stream.h" +#include RAPIDJSON_NAMESPACE_BEGIN namespace internal { @@ -28,14 +29,27 @@ namespace internal { */ template inline SizeType StrLen(const Ch* s) { + RAPIDJSON_ASSERT(s != 0); const Ch* p = s; while (*p) ++p; return SizeType(p - s); } +template <> +inline SizeType StrLen(const char* s) { + return SizeType(std::strlen(s)); +} + +template <> +inline SizeType StrLen(const wchar_t* s) { + return SizeType(std::wcslen(s)); +} + //! Returns number of code points in a encoded string. template bool CountStringCodePoint(const typename Encoding::Ch* s, SizeType length, SizeType* outCount) { + RAPIDJSON_ASSERT(s != 0); + RAPIDJSON_ASSERT(outCount != 0); GenericStringStream is(s); const typename Encoding::Ch* end = s + length; SizeType count = 0; diff --git a/src/3rdparty/rapidjson/internal/strtod.h b/src/3rdparty/rapidjson/internal/strtod.h index 289c413b..dfca22b6 100644 --- a/src/3rdparty/rapidjson/internal/strtod.h +++ b/src/3rdparty/rapidjson/internal/strtod.h @@ -19,6 +19,8 @@ #include "biginteger.h" #include "diyfp.h" #include "pow10.h" +#include +#include RAPIDJSON_NAMESPACE_BEGIN namespace internal { @@ -126,46 +128,46 @@ inline bool StrtodFast(double d, int p, double* result) { } // Compute an approximation and see if it is within 1/2 ULP -inline bool StrtodDiyFp(const char* decimals, size_t length, size_t decimalPosition, int exp, double* result) { +inline bool StrtodDiyFp(const char* decimals, int dLen, int dExp, double* result) { uint64_t significand = 0; - size_t i = 0; // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999 - for (; i < length; i++) { + int i = 0; // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999 + for (; i < dLen; i++) { if (significand > RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) || (significand == RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] > '5')) break; significand = significand * 10u + static_cast(decimals[i] - '0'); } - if (i < length && decimals[i] >= '5') // Rounding + if (i < dLen && decimals[i] >= '5') // Rounding significand++; - size_t remaining = length - i; - const unsigned kUlpShift = 3; - const unsigned kUlp = 1 << kUlpShift; + int remaining = dLen - i; + const int kUlpShift = 3; + const int kUlp = 1 << kUlpShift; int64_t error = (remaining == 0) ? 0 : kUlp / 2; DiyFp v(significand, 0); v = v.Normalize(); error <<= -v.e; - const int dExp = static_cast(decimalPosition) - static_cast(i) + exp; + dExp += remaining; int actualExp; DiyFp cachedPower = GetCachedPower10(dExp, &actualExp); if (actualExp != dExp) { static const DiyFp kPow10[] = { - DiyFp(RAPIDJSON_UINT64_C2(0xa0000000, 00000000), -60), // 10^1 - DiyFp(RAPIDJSON_UINT64_C2(0xc8000000, 00000000), -57), // 10^2 - DiyFp(RAPIDJSON_UINT64_C2(0xfa000000, 00000000), -54), // 10^3 - DiyFp(RAPIDJSON_UINT64_C2(0x9c400000, 00000000), -50), // 10^4 - DiyFp(RAPIDJSON_UINT64_C2(0xc3500000, 00000000), -47), // 10^5 - DiyFp(RAPIDJSON_UINT64_C2(0xf4240000, 00000000), -44), // 10^6 - DiyFp(RAPIDJSON_UINT64_C2(0x98968000, 00000000), -40) // 10^7 + DiyFp(RAPIDJSON_UINT64_C2(0xa0000000, 0x00000000), -60), // 10^1 + DiyFp(RAPIDJSON_UINT64_C2(0xc8000000, 0x00000000), -57), // 10^2 + DiyFp(RAPIDJSON_UINT64_C2(0xfa000000, 0x00000000), -54), // 10^3 + DiyFp(RAPIDJSON_UINT64_C2(0x9c400000, 0x00000000), -50), // 10^4 + DiyFp(RAPIDJSON_UINT64_C2(0xc3500000, 0x00000000), -47), // 10^5 + DiyFp(RAPIDJSON_UINT64_C2(0xf4240000, 0x00000000), -44), // 10^6 + DiyFp(RAPIDJSON_UINT64_C2(0x98968000, 0x00000000), -40) // 10^7 }; - int adjustment = dExp - actualExp - 1; - RAPIDJSON_ASSERT(adjustment >= 0 && adjustment < 7); - v = v * kPow10[adjustment]; - if (length + static_cast(adjustment)> 19u) // has more digits than decimal digits in 64-bit + int adjustment = dExp - actualExp; + RAPIDJSON_ASSERT(adjustment >= 1 && adjustment < 8); + v = v * kPow10[adjustment - 1]; + if (dLen + adjustment > 19) // has more digits than decimal digits in 64-bit error += kUlp / 2; } @@ -177,17 +179,17 @@ inline bool StrtodDiyFp(const char* decimals, size_t length, size_t decimalPosit v = v.Normalize(); error <<= oldExp - v.e; - const unsigned effectiveSignificandSize = Double::EffectiveSignificandSize(64 + v.e); - unsigned precisionSize = 64 - effectiveSignificandSize; + const int effectiveSignificandSize = Double::EffectiveSignificandSize(64 + v.e); + int precisionSize = 64 - effectiveSignificandSize; if (precisionSize + kUlpShift >= 64) { - unsigned scaleExp = (precisionSize + kUlpShift) - 63; + int scaleExp = (precisionSize + kUlpShift) - 63; v.f >>= scaleExp; v.e += scaleExp; - error = (error >> scaleExp) + 1 + static_cast(kUlp); + error = (error >> scaleExp) + 1 + kUlp; precisionSize -= scaleExp; } - DiyFp rounded(v.f >> precisionSize, v.e + static_cast(precisionSize)); + DiyFp rounded(v.f >> precisionSize, v.e + precisionSize); const uint64_t precisionBits = (v.f & ((uint64_t(1) << precisionSize) - 1)) * kUlp; const uint64_t halfWay = (uint64_t(1) << (precisionSize - 1)) * kUlp; if (precisionBits >= halfWay + static_cast(error)) { @@ -203,9 +205,9 @@ inline bool StrtodDiyFp(const char* decimals, size_t length, size_t decimalPosit return halfWay - static_cast(error) >= precisionBits || precisionBits >= halfWay + static_cast(error); } -inline double StrtodBigInteger(double approx, const char* decimals, size_t length, size_t decimalPosition, int exp) { - const BigInteger dInt(decimals, length); - const int dExp = static_cast(decimalPosition) - static_cast(length) + exp; +inline double StrtodBigInteger(double approx, const char* decimals, int dLen, int dExp) { + RAPIDJSON_ASSERT(dLen >= 0); + const BigInteger dInt(decimals, static_cast(dLen)); Double a(approx); int cmp = CheckWithinHalfULP(a.Value(), dInt, dExp); if (cmp < 0) @@ -225,42 +227,61 @@ inline double StrtodFullPrecision(double d, int p, const char* decimals, size_t RAPIDJSON_ASSERT(d >= 0.0); RAPIDJSON_ASSERT(length >= 1); - double result; + double result = 0.0; if (StrtodFast(d, p, &result)) return result; + RAPIDJSON_ASSERT(length <= INT_MAX); + int dLen = static_cast(length); + + RAPIDJSON_ASSERT(length >= decimalPosition); + RAPIDJSON_ASSERT(length - decimalPosition <= INT_MAX); + int dExpAdjust = static_cast(length - decimalPosition); + + RAPIDJSON_ASSERT(exp >= INT_MIN + dExpAdjust); + int dExp = exp - dExpAdjust; + + // Make sure length+dExp does not overflow + RAPIDJSON_ASSERT(dExp <= INT_MAX - dLen); + // Trim leading zeros - while (*decimals == '0' && length > 1) { - length--; + while (dLen > 0 && *decimals == '0') { + dLen--; decimals++; - decimalPosition--; } // Trim trailing zeros - while (decimals[length - 1] == '0' && length > 1) { - length--; - decimalPosition--; - exp++; + while (dLen > 0 && decimals[dLen - 1] == '0') { + dLen--; + dExp++; + } + + if (dLen == 0) { // Buffer only contains zeros. + return 0.0; } // Trim right-most digits - const int kMaxDecimalDigit = 780; - if (static_cast(length) > kMaxDecimalDigit) { - int delta = (static_cast(length) - kMaxDecimalDigit); - exp += delta; - decimalPosition -= static_cast(delta); - length = kMaxDecimalDigit; + const int kMaxDecimalDigit = 767 + 1; + if (dLen > kMaxDecimalDigit) { + dExp += dLen - kMaxDecimalDigit; + dLen = kMaxDecimalDigit; } - // If too small, underflow to zero - if (int(length) + exp < -324) + // If too small, underflow to zero. + // Any x <= 10^-324 is interpreted as zero. + if (dLen + dExp <= -324) return 0.0; - if (StrtodDiyFp(decimals, length, decimalPosition, exp, &result)) + // If too large, overflow to infinity. + // Any x >= 10^309 is interpreted as +infinity. + if (dLen + dExp > 309) + return std::numeric_limits::infinity(); + + if (StrtodDiyFp(decimals, dLen, dExp, &result)) return result; // Use approximation from StrtodDiyFp and make adjustment with BigInteger comparison - return StrtodBigInteger(result, decimals, length, decimalPosition, exp); + return StrtodBigInteger(result, decimals, dLen, dExp); } } // namespace internal diff --git a/src/3rdparty/rapidjson/istreamwrapper.h b/src/3rdparty/rapidjson/istreamwrapper.h index f5fe2897..c4950b9d 100644 --- a/src/3rdparty/rapidjson/istreamwrapper.h +++ b/src/3rdparty/rapidjson/istreamwrapper.h @@ -17,13 +17,12 @@ #include "stream.h" #include +#include #ifdef __clang__ RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_OFF(padded) -#endif - -#ifdef _MSC_VER +#elif defined(_MSC_VER) RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_OFF(4351) // new behavior: elements of array 'array' will be default initialized #endif @@ -50,57 +49,71 @@ template class BasicIStreamWrapper { public: typedef typename StreamType::char_type Ch; - BasicIStreamWrapper(StreamType& stream) : stream_(stream), count_(), peekBuffer_() {} - Ch Peek() const { - typename StreamType::int_type c = stream_.peek(); - return RAPIDJSON_LIKELY(c != StreamType::traits_type::eof()) ? static_cast(c) : '\0'; + //! Constructor. + /*! + \param stream stream opened for read. + */ + BasicIStreamWrapper(StreamType &stream) : stream_(stream), buffer_(peekBuffer_), bufferSize_(4), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) { + Read(); } - Ch Take() { - typename StreamType::int_type c = stream_.get(); - if (RAPIDJSON_LIKELY(c != StreamType::traits_type::eof())) { - count_++; - return static_cast(c); - } - else - return '\0'; + //! Constructor. + /*! + \param stream stream opened for read. + \param buffer user-supplied buffer. + \param bufferSize size of buffer in bytes. Must >=4 bytes. + */ + BasicIStreamWrapper(StreamType &stream, char* buffer, size_t bufferSize) : stream_(stream), buffer_(buffer), bufferSize_(bufferSize), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) { + RAPIDJSON_ASSERT(bufferSize >= 4); + Read(); } - // tellg() may return -1 when failed. So we count by ourself. - size_t Tell() const { return count_; } + Ch Peek() const { return *current_; } + Ch Take() { Ch c = *current_; Read(); return c; } + size_t Tell() const { return count_ + static_cast(current_ - buffer_); } - Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + // Not implemented void Put(Ch) { RAPIDJSON_ASSERT(false); } - void Flush() { RAPIDJSON_ASSERT(false); } + void Flush() { RAPIDJSON_ASSERT(false); } + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } // For encoding detection only. const Ch* Peek4() const { - RAPIDJSON_ASSERT(sizeof(Ch) == 1); // Only usable for byte stream. - int i; - bool hasError = false; - for (i = 0; i < 4; ++i) { - typename StreamType::int_type c = stream_.get(); - if (c == StreamType::traits_type::eof()) { - hasError = true; - stream_.clear(); - break; - } - peekBuffer_[i] = static_cast(c); - } - for (--i; i >= 0; --i) - stream_.putback(peekBuffer_[i]); - return !hasError ? peekBuffer_ : 0; + return (current_ + 4 - !eof_ <= bufferLast_) ? current_ : 0; } private: + BasicIStreamWrapper(); BasicIStreamWrapper(const BasicIStreamWrapper&); BasicIStreamWrapper& operator=(const BasicIStreamWrapper&); - StreamType& stream_; - size_t count_; //!< Number of characters read. Note: - mutable Ch peekBuffer_[4]; + void Read() { + if (current_ < bufferLast_) + ++current_; + else if (!eof_) { + count_ += readCount_; + readCount_ = bufferSize_; + bufferLast_ = buffer_ + readCount_ - 1; + current_ = buffer_; + + if (!stream_.read(buffer_, static_cast(bufferSize_))) { + readCount_ = static_cast(stream_.gcount()); + *(bufferLast_ = buffer_ + readCount_) = '\0'; + eof_ = true; + } + } + } + + StreamType &stream_; + Ch peekBuffer_[4], *buffer_; + size_t bufferSize_; + Ch *bufferLast_; + Ch *current_; + size_t readCount_; + size_t count_; //!< Number of characters read + bool eof_; }; typedef BasicIStreamWrapper IStreamWrapper; diff --git a/src/3rdparty/rapidjson/license.txt b/src/3rdparty/rapidjson/license.txt new file mode 100644 index 00000000..7ccc161c --- /dev/null +++ b/src/3rdparty/rapidjson/license.txt @@ -0,0 +1,57 @@ +Tencent is pleased to support the open source community by making RapidJSON available. + +Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. + +If you have downloaded a copy of the RapidJSON binary from Tencent, please note that the RapidJSON binary is licensed under the MIT License. +If you have downloaded a copy of the RapidJSON source code from Tencent, please note that RapidJSON source code is licensed under the MIT License, except for the third-party components listed below which are subject to different license terms. Your integration of RapidJSON into your own projects may require compliance with the MIT License, as well as the other licenses applicable to the third-party components included within RapidJSON. To avoid the problematic JSON license in your own projects, it's sufficient to exclude the bin/jsonchecker/ directory, as it's the only code under the JSON license. +A copy of the MIT License is included in this file. + +Other dependencies and licenses: + +Open Source Software Licensed Under the BSD License: +-------------------------------------------------------------------- + +The msinttypes r29 +Copyright (c) 2006-2013 Alexander Chemeris +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +* Neither the name of copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Open Source Software Licensed Under the JSON License: +-------------------------------------------------------------------- + +json.org +Copyright (c) 2002 JSON.org +All Rights Reserved. + +JSON_checker +Copyright (c) 2002 JSON.org +All Rights Reserved. + + +Terms of the JSON License: +--------------------------------------------------- + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +The Software shall be used for Good, not Evil. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +Terms of the MIT License: +-------------------------------------------------------------------- + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/3rdparty/rapidjson/pointer.h b/src/3rdparty/rapidjson/pointer.h index 0206ac1c..063abab9 100644 --- a/src/3rdparty/rapidjson/pointer.h +++ b/src/3rdparty/rapidjson/pointer.h @@ -21,9 +21,7 @@ #ifdef __clang__ RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_OFF(switch-enum) -#endif - -#ifdef _MSC_VER +#elif defined(_MSC_VER) RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated #endif @@ -165,7 +163,12 @@ public: GenericPointer(const Token* tokens, size_t tokenCount) : allocator_(), ownAllocator_(), nameBuffer_(), tokens_(const_cast(tokens)), tokenCount_(tokenCount), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {} //! Copy constructor. - GenericPointer(const GenericPointer& rhs, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { + GenericPointer(const GenericPointer& rhs) : allocator_(rhs.allocator_), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { + *this = rhs; + } + + //! Copy constructor. + GenericPointer(const GenericPointer& rhs, Allocator* allocator) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { *this = rhs; } @@ -197,6 +200,36 @@ public: return *this; } + //! Swap the content of this pointer with an other. + /*! + \param other The pointer to swap with. + \note Constant complexity. + */ + GenericPointer& Swap(GenericPointer& other) RAPIDJSON_NOEXCEPT { + internal::Swap(allocator_, other.allocator_); + internal::Swap(ownAllocator_, other.ownAllocator_); + internal::Swap(nameBuffer_, other.nameBuffer_); + internal::Swap(tokens_, other.tokens_); + internal::Swap(tokenCount_, other.tokenCount_); + internal::Swap(parseErrorOffset_, other.parseErrorOffset_); + internal::Swap(parseErrorCode_, other.parseErrorCode_); + return *this; + } + + //! free-standing swap function helper + /*! + Helper function to enable support for common swap implementation pattern based on \c std::swap: + \code + void swap(MyClass& a, MyClass& b) { + using std::swap; + swap(a.pointer, b.pointer); + // ... + } + \endcode + \see Swap() + */ + friend inline void swap(GenericPointer& a, GenericPointer& b) RAPIDJSON_NOEXCEPT { a.Swap(b); } + //@} //!@name Append token @@ -240,7 +273,7 @@ public: template RAPIDJSON_DISABLEIF_RETURN((internal::NotExpr::Type, Ch> >), (GenericPointer)) Append(T* name, Allocator* allocator = 0) const { - return Append(name, StrLen(name), allocator); + return Append(name, internal::StrLen(name), allocator); } #if RAPIDJSON_HAS_STDSTRING @@ -274,7 +307,7 @@ public: else { Ch name[21]; for (size_t i = 0; i <= length; i++) - name[i] = buffer[i]; + name[i] = static_cast(buffer[i]); Token token = { name, length, index }; return Append(token, allocator); } @@ -353,6 +386,33 @@ public: */ bool operator!=(const GenericPointer& rhs) const { return !(*this == rhs); } + //! Less than operator. + /*! + \note Invalid pointers are always greater than valid ones. + */ + bool operator<(const GenericPointer& rhs) const { + if (!IsValid()) + return false; + if (!rhs.IsValid()) + return true; + + if (tokenCount_ != rhs.tokenCount_) + return tokenCount_ < rhs.tokenCount_; + + for (size_t i = 0; i < tokenCount_; i++) { + if (tokens_[i].index != rhs.tokens_[i].index) + return tokens_[i].index < rhs.tokens_[i].index; + + if (tokens_[i].length != rhs.tokens_[i].length) + return tokens_[i].length < rhs.tokens_[i].length; + + if (int cmp = std::memcmp(tokens_[i].name, rhs.tokens_[i].name, sizeof(Ch) * tokens_[i].length)) + return cmp < 0; + } + + return false; + } + //@} //!@name Stringify @@ -532,14 +592,14 @@ public: */ ValueType& GetWithDefault(ValueType& root, const ValueType& defaultValue, typename ValueType::AllocatorType& allocator) const { bool alreadyExist; - Value& v = Create(root, allocator, &alreadyExist); + ValueType& v = Create(root, allocator, &alreadyExist); return alreadyExist ? v : v.CopyFrom(defaultValue, allocator); } //! Query a value in a subtree with default null-terminated string. ValueType& GetWithDefault(ValueType& root, const Ch* defaultValue, typename ValueType::AllocatorType& allocator) const { bool alreadyExist; - Value& v = Create(root, allocator, &alreadyExist); + ValueType& v = Create(root, allocator, &alreadyExist); return alreadyExist ? v : v.SetString(defaultValue, allocator); } @@ -547,7 +607,7 @@ public: //! Query a value in a subtree with default std::basic_string. ValueType& GetWithDefault(ValueType& root, const std::basic_string& defaultValue, typename ValueType::AllocatorType& allocator) const { bool alreadyExist; - Value& v = Create(root, allocator, &alreadyExist); + ValueType& v = Create(root, allocator, &alreadyExist); return alreadyExist ? v : v.SetString(defaultValue, allocator); } #endif @@ -758,7 +818,7 @@ private: */ Ch* CopyFromRaw(const GenericPointer& rhs, size_t extraToken = 0, size_t extraNameBufferSize = 0) { if (!allocator_) // allocator is independently owned. - ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)(); size_t nameBufferSize = rhs.tokenCount_; // null terminators for tokens for (Token *t = rhs.tokens_; t != rhs.tokens_ + rhs.tokenCount_; ++t) @@ -806,7 +866,7 @@ private: // Create own allocator if user did not supply. if (!allocator_) - ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)(); // Count number of '/' as tokenCount tokenCount_ = 0; @@ -1029,8 +1089,8 @@ private: unsigned char u = static_cast(c); static const char hexDigits[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; os_.Put('%'); - os_.Put(hexDigits[u >> 4]); - os_.Put(hexDigits[u & 15]); + os_.Put(static_cast(hexDigits[u >> 4])); + os_.Put(static_cast(hexDigits[u & 15])); } private: OutputStream& os_; @@ -1347,11 +1407,7 @@ bool EraseValueByPointer(T& root, const CharType(&source)[N]) { RAPIDJSON_NAMESPACE_END -#ifdef __clang__ -RAPIDJSON_DIAG_POP -#endif - -#ifdef _MSC_VER +#if defined(__clang__) || defined(_MSC_VER) RAPIDJSON_DIAG_POP #endif diff --git a/src/3rdparty/rapidjson/prettywriter.h b/src/3rdparty/rapidjson/prettywriter.h index 0dcb0fee..45afb694 100644 --- a/src/3rdparty/rapidjson/prettywriter.h +++ b/src/3rdparty/rapidjson/prettywriter.h @@ -22,6 +22,11 @@ RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_OFF(effc++) #endif +#if defined(__clang__) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(c++98-compat) +#endif + RAPIDJSON_NAMESPACE_BEGIN //! Combination of PrettyWriter format flags. @@ -34,7 +39,7 @@ enum PrettyFormatOptions { //! Writer with indentation and spacing. /*! - \tparam OutputStream Type of ouptut os. + \tparam OutputStream Type of output os. \tparam SourceEncoding Encoding of source string. \tparam TargetEncoding Encoding of output stream. \tparam StackAllocator Type of allocator for allocating memory of stack. @@ -42,7 +47,7 @@ enum PrettyFormatOptions { template, typename TargetEncoding = UTF8<>, typename StackAllocator = CrtAllocator, unsigned writeFlags = kWriteDefaultFlags> class PrettyWriter : public Writer { public: - typedef Writer Base; + typedef Writer Base; typedef typename Base::Ch Ch; //! Constructor @@ -57,6 +62,11 @@ public: explicit PrettyWriter(StackAllocator* allocator = 0, size_t levelDepth = Base::kDefaultLevelDepth) : Base(allocator, levelDepth), indentChar_(' '), indentCharCount_(4) {} +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + PrettyWriter(PrettyWriter&& rhs) : + Base(std::forward(rhs)), indentChar_(rhs.indentChar_), indentCharCount_(rhs.indentCharCount_), formatOptions_(rhs.formatOptions_) {} +#endif + //! Set custom indentation. /*! \param indentChar Character for indentation. Must be whitespace character (' ', '\\t', '\\n', '\\r'). \param indentCharCount Number of indent characters for each indentation level. @@ -82,24 +92,26 @@ public: */ //@{ - bool Null() { PrettyPrefix(kNullType); return Base::WriteNull(); } - bool Bool(bool b) { PrettyPrefix(b ? kTrueType : kFalseType); return Base::WriteBool(b); } - bool Int(int i) { PrettyPrefix(kNumberType); return Base::WriteInt(i); } - bool Uint(unsigned u) { PrettyPrefix(kNumberType); return Base::WriteUint(u); } - bool Int64(int64_t i64) { PrettyPrefix(kNumberType); return Base::WriteInt64(i64); } - bool Uint64(uint64_t u64) { PrettyPrefix(kNumberType); return Base::WriteUint64(u64); } - bool Double(double d) { PrettyPrefix(kNumberType); return Base::WriteDouble(d); } + bool Null() { PrettyPrefix(kNullType); return Base::EndValue(Base::WriteNull()); } + bool Bool(bool b) { PrettyPrefix(b ? kTrueType : kFalseType); return Base::EndValue(Base::WriteBool(b)); } + bool Int(int i) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteInt(i)); } + bool Uint(unsigned u) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteUint(u)); } + bool Int64(int64_t i64) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteInt64(i64)); } + bool Uint64(uint64_t u64) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteUint64(u64)); } + bool Double(double d) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteDouble(d)); } bool RawNumber(const Ch* str, SizeType length, bool copy = false) { + RAPIDJSON_ASSERT(str != 0); (void)copy; PrettyPrefix(kNumberType); - return Base::WriteString(str, length); + return Base::EndValue(Base::WriteString(str, length)); } bool String(const Ch* str, SizeType length, bool copy = false) { + RAPIDJSON_ASSERT(str != 0); (void)copy; PrettyPrefix(kStringType); - return Base::WriteString(str, length); + return Base::EndValue(Base::WriteString(str, length)); } #if RAPIDJSON_HAS_STDSTRING @@ -124,19 +136,21 @@ public: bool EndObject(SizeType memberCount = 0) { (void)memberCount; - RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level)); - RAPIDJSON_ASSERT(!Base::level_stack_.template Top()->inArray); + RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level)); // not inside an Object + RAPIDJSON_ASSERT(!Base::level_stack_.template Top()->inArray); // currently inside an Array, not Object + RAPIDJSON_ASSERT(0 == Base::level_stack_.template Top()->valueCount % 2); // Object has a Key without a Value + bool empty = Base::level_stack_.template Pop(1)->valueCount == 0; if (!empty) { Base::os_->Put('\n'); WriteIndent(); } - bool ret = Base::WriteEndObject(); + bool ret = Base::EndValue(Base::WriteEndObject()); (void)ret; RAPIDJSON_ASSERT(ret == true); if (Base::level_stack_.Empty()) // end of json text - Base::os_->Flush(); + Base::Flush(); return true; } @@ -156,11 +170,11 @@ public: Base::os_->Put('\n'); WriteIndent(); } - bool ret = Base::WriteEndArray(); + bool ret = Base::EndValue(Base::WriteEndArray()); (void)ret; RAPIDJSON_ASSERT(ret == true); if (Base::level_stack_.Empty()) // end of json text - Base::os_->Flush(); + Base::Flush(); return true; } @@ -184,7 +198,11 @@ public: \param type Type of the root of json. \note When using PrettyWriter::RawValue(), the result json may not be indented correctly. */ - bool RawValue(const Ch* json, size_t length, Type type) { PrettyPrefix(type); return Base::WriteRawValue(json, length); } + bool RawValue(const Ch* json, size_t length, Type type) { + RAPIDJSON_ASSERT(json != 0); + PrettyPrefix(type); + return Base::EndValue(Base::WriteRawValue(json, length)); + } protected: void PrettyPrefix(Type type) { @@ -233,7 +251,7 @@ protected: void WriteIndent() { size_t count = (Base::level_stack_.GetSize() / sizeof(typename Base::Level)) * indentCharCount_; - PutN(*Base::os_, static_cast(indentChar_), count); + PutN(*Base::os_, static_cast(indentChar_), count); } Ch indentChar_; @@ -248,6 +266,10 @@ private: RAPIDJSON_NAMESPACE_END +#if defined(__clang__) +RAPIDJSON_DIAG_POP +#endif + #ifdef __GNUC__ RAPIDJSON_DIAG_POP #endif diff --git a/src/3rdparty/rapidjson/rapidjson.h b/src/3rdparty/rapidjson/rapidjson.h index 2ef9bc56..549936ff 100644 --- a/src/3rdparty/rapidjson/rapidjson.h +++ b/src/3rdparty/rapidjson/rapidjson.h @@ -26,7 +26,7 @@ Some RapidJSON features are configurable to adapt the library to a wide variety of platforms, environments and usage scenarios. Most of the - features can be configured in terms of overriden or predefined + features can be configured in terms of overridden or predefined preprocessor macros at compile-time. Some additional customization is available in the \ref RAPIDJSON_ERRORS APIs. @@ -49,6 +49,11 @@ // token stringification #define RAPIDJSON_STRINGIFY(x) RAPIDJSON_DO_STRINGIFY(x) #define RAPIDJSON_DO_STRINGIFY(x) #x + +// token concatenation +#define RAPIDJSON_JOIN(X, Y) RAPIDJSON_DO_JOIN(X, Y) +#define RAPIDJSON_DO_JOIN(X, Y) RAPIDJSON_DO_JOIN2(X, Y) +#define RAPIDJSON_DO_JOIN2(X, Y) X##Y //!@endcond /*! \def RAPIDJSON_MAJOR_VERSION @@ -214,7 +219,7 @@ # elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ # define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN # else -# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN. +# error Unknown machine endianness detected. User needs to define RAPIDJSON_ENDIAN. # endif // __BYTE_ORDER__ // Detect with GLIBC's endian.h # elif defined(__GLIBC__) @@ -224,7 +229,7 @@ # elif (__BYTE_ORDER == __BIG_ENDIAN) # define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN # else -# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN. +# error Unknown machine endianness detected. User needs to define RAPIDJSON_ENDIAN. # endif // __GLIBC__ // Detect with _LITTLE_ENDIAN and _BIG_ENDIAN macro # elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN) @@ -236,12 +241,12 @@ # define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN # elif defined(__i386__) || defined(__alpha__) || defined(__ia64) || defined(__ia64__) || defined(_M_IX86) || defined(_M_IA64) || defined(_M_ALPHA) || defined(__amd64) || defined(__amd64__) || defined(_M_AMD64) || defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || defined(__bfin__) # define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN -# elif defined(_MSC_VER) && defined(_M_ARM) +# elif defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_ARM64)) # define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN # elif defined(RAPIDJSON_DOXYGEN_RUNNING) # define RAPIDJSON_ENDIAN # else -# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN. +# error Unknown machine endianness detected. User needs to define RAPIDJSON_ENDIAN. # endif #endif // RAPIDJSON_ENDIAN @@ -264,16 +269,11 @@ /*! \ingroup RAPIDJSON_CONFIG \param x pointer to align - Some machines require strict data alignment. Currently the default uses 4 bytes - alignment on 32-bit platforms and 8 bytes alignment for 64-bit platforms. + Some machines require strict data alignment. The default is 8 bytes. User can customize by defining the RAPIDJSON_ALIGN function macro. */ #ifndef RAPIDJSON_ALIGN -#if RAPIDJSON_64BIT == 1 -#define RAPIDJSON_ALIGN(x) (((x) + static_cast(7u)) & ~static_cast(7u)) -#else -#define RAPIDJSON_ALIGN(x) (((x) + 3u) & ~3u) -#endif +#define RAPIDJSON_ALIGN(x) (((x) + static_cast(7u)) & ~static_cast(7u)) #endif /////////////////////////////////////////////////////////////////////////////// @@ -320,17 +320,17 @@ #endif /////////////////////////////////////////////////////////////////////////////// -// RAPIDJSON_SSE2/RAPIDJSON_SSE42/RAPIDJSON_SIMD +// RAPIDJSON_SSE2/RAPIDJSON_SSE42/RAPIDJSON_NEON/RAPIDJSON_SIMD /*! \def RAPIDJSON_SIMD \ingroup RAPIDJSON_CONFIG - \brief Enable SSE2/SSE4.2 optimization. + \brief Enable SSE2/SSE4.2/Neon optimization. RapidJSON supports optimized implementations for some parsing operations - based on the SSE2 or SSE4.2 SIMD extensions on modern Intel-compatible - processors. + based on the SSE2, SSE4.2 or NEon SIMD extensions on modern Intel + or ARM compatible processors. - To enable these optimizations, two different symbols can be defined; + To enable these optimizations, three different symbols can be defined; \code // Enable SSE2 optimization. #define RAPIDJSON_SSE2 @@ -339,13 +339,17 @@ #define RAPIDJSON_SSE42 \endcode - \c RAPIDJSON_SSE42 takes precedence, if both are defined. + // Enable ARM Neon optimization. + #define RAPIDJSON_NEON + \endcode + + \c RAPIDJSON_SSE42 takes precedence over SSE2, if both are defined. If any of these symbols is defined, RapidJSON defines the macro \c RAPIDJSON_SIMD to indicate the availability of the optimized code. */ #if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) \ - || defined(RAPIDJSON_DOXYGEN_RUNNING) + || defined(RAPIDJSON_NEON) || defined(RAPIDJSON_DOXYGEN_RUNNING) #define RAPIDJSON_SIMD #endif @@ -398,13 +402,22 @@ RAPIDJSON_NAMESPACE_END \ref RAPIDJSON_ERRORS APIs. */ #ifndef RAPIDJSON_ASSERT -#define RAPIDJSON_ASSERT(x) +#include +#define RAPIDJSON_ASSERT(x) assert(x) #endif // RAPIDJSON_ASSERT /////////////////////////////////////////////////////////////////////////////// // RAPIDJSON_STATIC_ASSERT -// Adopt from boost +// Prefer C++11 static_assert, if available +#ifndef RAPIDJSON_STATIC_ASSERT +#if __cplusplus >= 201103L || ( defined(_MSC_VER) && _MSC_VER >= 1800 ) +#define RAPIDJSON_STATIC_ASSERT(x) \ + static_assert(x, RAPIDJSON_STRINGIFY(x)) +#endif // C++11 +#endif // RAPIDJSON_STATIC_ASSERT + +// Adopt C++03 implementation from boost #ifndef RAPIDJSON_STATIC_ASSERT #ifndef __clang__ //!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN @@ -412,14 +425,10 @@ RAPIDJSON_NAMESPACE_END RAPIDJSON_NAMESPACE_BEGIN template struct STATIC_ASSERTION_FAILURE; template <> struct STATIC_ASSERTION_FAILURE { enum { value = 1 }; }; -template struct StaticAssertTest {}; +template struct StaticAssertTest {}; RAPIDJSON_NAMESPACE_END -#define RAPIDJSON_JOIN(X, Y) RAPIDJSON_DO_JOIN(X, Y) -#define RAPIDJSON_DO_JOIN(X, Y) RAPIDJSON_DO_JOIN2(X, Y) -#define RAPIDJSON_DO_JOIN2(X, Y) X##Y - -#if defined(__GNUC__) +#if defined(__GNUC__) || defined(__clang__) #define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE __attribute__((unused)) #else #define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE @@ -437,7 +446,7 @@ RAPIDJSON_NAMESPACE_END typedef ::RAPIDJSON_NAMESPACE::StaticAssertTest< \ sizeof(::RAPIDJSON_NAMESPACE::STATIC_ASSERTION_FAILURE)> \ RAPIDJSON_JOIN(StaticAssertTypedef, __LINE__) RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE -#endif +#endif // RAPIDJSON_STATIC_ASSERT /////////////////////////////////////////////////////////////////////////////// // RAPIDJSON_LIKELY, RAPIDJSON_UNLIKELY @@ -529,13 +538,14 @@ RAPIDJSON_NAMESPACE_END #ifndef RAPIDJSON_HAS_CXX11_RVALUE_REFS #if defined(__clang__) #if __has_feature(cxx_rvalue_references) && \ - (defined(_LIBCPP_VERSION) || defined(__GLIBCXX__) && __GLIBCXX__ >= 20080306) + (defined(_MSC_VER) || defined(_LIBCPP_VERSION) || defined(__GLIBCXX__) && __GLIBCXX__ >= 20080306) #define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1 #else #define RAPIDJSON_HAS_CXX11_RVALUE_REFS 0 #endif #elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,3,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \ - (defined(_MSC_VER) && _MSC_VER >= 1600) + (defined(_MSC_VER) && _MSC_VER >= 1600) || \ + (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5140 && defined(__GXX_EXPERIMENTAL_CXX0X__)) #define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1 #else @@ -546,8 +556,9 @@ RAPIDJSON_NAMESPACE_END #ifndef RAPIDJSON_HAS_CXX11_NOEXCEPT #if defined(__clang__) #define RAPIDJSON_HAS_CXX11_NOEXCEPT __has_feature(cxx_noexcept) -#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) -// (defined(_MSC_VER) && _MSC_VER >= ????) // not yet supported +#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \ + (defined(_MSC_VER) && _MSC_VER >= 1900) || \ + (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5140 && defined(__GXX_EXPERIMENTAL_CXX0X__)) #define RAPIDJSON_HAS_CXX11_NOEXCEPT 1 #else #define RAPIDJSON_HAS_CXX11_NOEXCEPT 0 @@ -561,14 +572,19 @@ RAPIDJSON_NAMESPACE_END // no automatic detection, yet #ifndef RAPIDJSON_HAS_CXX11_TYPETRAITS +#if (defined(_MSC_VER) && _MSC_VER >= 1700) +#define RAPIDJSON_HAS_CXX11_TYPETRAITS 1 +#else #define RAPIDJSON_HAS_CXX11_TYPETRAITS 0 #endif +#endif #ifndef RAPIDJSON_HAS_CXX11_RANGE_FOR #if defined(__clang__) #define RAPIDJSON_HAS_CXX11_RANGE_FOR __has_feature(cxx_range_for) -#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,3,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \ - (defined(_MSC_VER) && _MSC_VER >= 1700) +#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \ + (defined(_MSC_VER) && _MSC_VER >= 1700) || \ + (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5140 && defined(__GXX_EXPERIMENTAL_CXX0X__)) #define RAPIDJSON_HAS_CXX11_RANGE_FOR 1 #else #define RAPIDJSON_HAS_CXX11_RANGE_FOR 0 @@ -577,12 +593,38 @@ RAPIDJSON_NAMESPACE_END //!@endcond +//! Assertion (in non-throwing contexts). + /*! \ingroup RAPIDJSON_CONFIG + Some functions provide a \c noexcept guarantee, if the compiler supports it. + In these cases, the \ref RAPIDJSON_ASSERT macro cannot be overridden to + throw an exception. This macro adds a separate customization point for + such cases. + + Defaults to C \c assert() (as \ref RAPIDJSON_ASSERT), if \c noexcept is + supported, and to \ref RAPIDJSON_ASSERT otherwise. + */ + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_NOEXCEPT_ASSERT + +#ifndef RAPIDJSON_NOEXCEPT_ASSERT +#ifdef RAPIDJSON_ASSERT_THROWS +#if RAPIDJSON_HAS_CXX11_NOEXCEPT +#define RAPIDJSON_NOEXCEPT_ASSERT(x) +#else +#define RAPIDJSON_NOEXCEPT_ASSERT(x) RAPIDJSON_ASSERT(x) +#endif // RAPIDJSON_HAS_CXX11_NOEXCEPT +#else +#define RAPIDJSON_NOEXCEPT_ASSERT(x) RAPIDJSON_ASSERT(x) +#endif // RAPIDJSON_ASSERT_THROWS +#endif // RAPIDJSON_NOEXCEPT_ASSERT + /////////////////////////////////////////////////////////////////////////////// // new/delete #ifndef RAPIDJSON_NEW ///! customization point for global \c new -#define RAPIDJSON_NEW(x) new x +#define RAPIDJSON_NEW(TypeName) new TypeName #endif #ifndef RAPIDJSON_DELETE ///! customization point for global \c delete diff --git a/src/3rdparty/rapidjson/reader.h b/src/3rdparty/rapidjson/reader.h index 303aac2e..44a6bcd3 100644 --- a/src/3rdparty/rapidjson/reader.h +++ b/src/3rdparty/rapidjson/reader.h @@ -33,12 +33,8 @@ #include #elif defined(RAPIDJSON_SSE2) #include -#endif - -#ifdef _MSC_VER -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant -RAPIDJSON_DIAG_OFF(4702) // unreachable code +#elif defined(RAPIDJSON_NEON) +#include #endif #ifdef __clang__ @@ -46,6 +42,10 @@ RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_OFF(old-style-cast) RAPIDJSON_DIAG_OFF(padded) RAPIDJSON_DIAG_OFF(switch-enum) +#elif defined(_MSC_VER) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant +RAPIDJSON_DIAG_OFF(4702) // unreachable code #endif #ifdef __GNUC__ @@ -136,7 +136,7 @@ RAPIDJSON_NAMESPACE_BEGIN User can define this as any \c ParseFlag combinations. */ #ifndef RAPIDJSON_PARSE_DEFAULT_FLAGS -#define RAPIDJSON_PARSE_DEFAULT_FLAGS kParseCommentsFlag | kParseTrailingCommasFlag +#define RAPIDJSON_PARSE_DEFAULT_FLAGS kParseNoFlags #endif //! Combination of parseFlags @@ -299,16 +299,9 @@ inline const char *SkipWhitespace_SIMD(const char* p) { for (;; p += 16) { const __m128i s = _mm_load_si128(reinterpret_cast(p)); - const int r = _mm_cvtsi128_si32(_mm_cmpistrm(w, s, _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_BIT_MASK | _SIDD_NEGATIVE_POLARITY)); - if (r != 0) { // some of characters is non-whitespace -#ifdef _MSC_VER // Find the index of first non-whitespace - unsigned long offset; - _BitScanForward(&offset, r); - return p + offset; -#else - return p + __builtin_ffs(r) - 1; -#endif - } + const int r = _mm_cmpistri(w, s, _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | _SIDD_NEGATIVE_POLARITY); + if (r != 16) // some of characters is non-whitespace + return p + r; } } @@ -325,16 +318,9 @@ inline const char *SkipWhitespace_SIMD(const char* p, const char* end) { for (; p <= end - 16; p += 16) { const __m128i s = _mm_loadu_si128(reinterpret_cast(p)); - const int r = _mm_cvtsi128_si32(_mm_cmpistrm(w, s, _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_BIT_MASK | _SIDD_NEGATIVE_POLARITY)); - if (r != 0) { // some of characters is non-whitespace -#ifdef _MSC_VER // Find the index of first non-whitespace - unsigned long offset; - _BitScanForward(&offset, r); - return p + offset; -#else - return p + __builtin_ffs(r) - 1; -#endif - } + const int r = _mm_cmpistri(w, s, _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | _SIDD_NEGATIVE_POLARITY); + if (r != 16) // some of characters is non-whitespace + return p + r; } return SkipWhitespace(p, end); @@ -425,7 +411,92 @@ inline const char *SkipWhitespace_SIMD(const char* p, const char* end) { return SkipWhitespace(p, end); } -#endif // RAPIDJSON_SSE2 +#elif defined(RAPIDJSON_NEON) + +//! Skip whitespace with ARM Neon instructions, testing 16 8-byte characters at once. +inline const char *SkipWhitespace_SIMD(const char* p) { + // Fast return for single non-whitespace + if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') + ++p; + else + return p; + + // 16-byte align to the next boundary + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + while (p != nextAligned) + if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') + ++p; + else + return p; + + const uint8x16_t w0 = vmovq_n_u8(' '); + const uint8x16_t w1 = vmovq_n_u8('\n'); + const uint8x16_t w2 = vmovq_n_u8('\r'); + const uint8x16_t w3 = vmovq_n_u8('\t'); + + for (;; p += 16) { + const uint8x16_t s = vld1q_u8(reinterpret_cast(p)); + uint8x16_t x = vceqq_u8(s, w0); + x = vorrq_u8(x, vceqq_u8(s, w1)); + x = vorrq_u8(x, vceqq_u8(s, w2)); + x = vorrq_u8(x, vceqq_u8(s, w3)); + + x = vmvnq_u8(x); // Negate + x = vrev64q_u8(x); // Rev in 64 + uint64_t low = vgetq_lane_u64(reinterpret_cast(x), 0); // extract + uint64_t high = vgetq_lane_u64(reinterpret_cast(x), 1); // extract + + if (low == 0) { + if (high != 0) { + int lz =__builtin_clzll(high);; + return p + 8 + (lz >> 3); + } + } else { + int lz = __builtin_clzll(low);; + return p + (lz >> 3); + } + } +} + +inline const char *SkipWhitespace_SIMD(const char* p, const char* end) { + // Fast return for single non-whitespace + if (p != end && (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')) + ++p; + else + return p; + + const uint8x16_t w0 = vmovq_n_u8(' '); + const uint8x16_t w1 = vmovq_n_u8('\n'); + const uint8x16_t w2 = vmovq_n_u8('\r'); + const uint8x16_t w3 = vmovq_n_u8('\t'); + + for (; p <= end - 16; p += 16) { + const uint8x16_t s = vld1q_u8(reinterpret_cast(p)); + uint8x16_t x = vceqq_u8(s, w0); + x = vorrq_u8(x, vceqq_u8(s, w1)); + x = vorrq_u8(x, vceqq_u8(s, w2)); + x = vorrq_u8(x, vceqq_u8(s, w3)); + + x = vmvnq_u8(x); // Negate + x = vrev64q_u8(x); // Rev in 64 + uint64_t low = vgetq_lane_u64(reinterpret_cast(x), 0); // extract + uint64_t high = vgetq_lane_u64(reinterpret_cast(x), 1); // extract + + if (low == 0) { + if (high != 0) { + int lz = __builtin_clzll(high); + return p + 8 + (lz >> 3); + } + } else { + int lz = __builtin_clzll(low); + return p + (lz >> 3); + } + } + + return SkipWhitespace(p, end); +} + +#endif // RAPIDJSON_NEON #ifdef RAPIDJSON_SIMD //! Template function specialization for InsituStringStream @@ -471,7 +542,8 @@ public: /*! \param stackAllocator Optional allocator for allocating stack memory. (Only use for non-destructive parsing) \param stackCapacity stack capacity in bytes for storing a single decoded string. (Only use for non-destructive parsing) */ - GenericReader(StackAllocator* stackAllocator = 0, size_t stackCapacity = kDefaultStackCapacity) : stack_(stackAllocator, stackCapacity), parseResult_() {} + GenericReader(StackAllocator* stackAllocator = 0, size_t stackCapacity = kDefaultStackCapacity) : + stack_(stackAllocator, stackCapacity), parseResult_(), state_(IterativeParsingStartState) {} //! Parse JSON text. /*! \tparam parseFlags Combination of \ref ParseFlag. @@ -527,7 +599,84 @@ public: return Parse(is, handler); } - //! Whether a parse error has occured in the last parsing. + //! Initialize JSON text token-by-token parsing + /*! + */ + void IterativeParseInit() { + parseResult_.Clear(); + state_ = IterativeParsingStartState; + } + + //! Parse one token from JSON text + /*! \tparam InputStream Type of input stream, implementing Stream concept + \tparam Handler Type of handler, implementing Handler concept. + \param is Input stream to be parsed. + \param handler The handler to receive events. + \return Whether the parsing is successful. + */ + template + bool IterativeParseNext(InputStream& is, Handler& handler) { + while (RAPIDJSON_LIKELY(is.Peek() != '\0')) { + SkipWhitespaceAndComments(is); + + Token t = Tokenize(is.Peek()); + IterativeParsingState n = Predict(state_, t); + IterativeParsingState d = Transit(state_, t, n, is, handler); + + // If we've finished or hit an error... + if (RAPIDJSON_UNLIKELY(IsIterativeParsingCompleteState(d))) { + // Report errors. + if (d == IterativeParsingErrorState) { + HandleError(state_, is); + return false; + } + + // Transition to the finish state. + RAPIDJSON_ASSERT(d == IterativeParsingFinishState); + state_ = d; + + // If StopWhenDone is not set... + if (!(parseFlags & kParseStopWhenDoneFlag)) { + // ... and extra non-whitespace data is found... + SkipWhitespaceAndComments(is); + if (is.Peek() != '\0') { + // ... this is considered an error. + HandleError(state_, is); + return false; + } + } + + // Success! We are done! + return true; + } + + // Transition to the new state. + state_ = d; + + // If we parsed anything other than a delimiter, we invoked the handler, so we can return true now. + if (!IsIterativeParsingDelimiterState(n)) + return true; + } + + // We reached the end of file. + stack_.Clear(); + + if (state_ != IterativeParsingFinishState) { + HandleError(state_, is); + return false; + } + + return true; + } + + //! Check if token-by-token parsing JSON text is complete + /*! \return Whether the JSON has been fully decoded. + */ + RAPIDJSON_FORCEINLINE bool IterativeParseComplete() const { + return IsIterativeParsingCompleteState(state_); + } + + //! Whether a parse error has occurred in the last parsing. bool HasParseError() const { return parseResult_.IsError(); } //! Get the \ref ParseErrorCode of last parsing. @@ -575,7 +724,7 @@ private: } } else if (RAPIDJSON_LIKELY(Consume(is, '/'))) - while (is.Peek() != '\0' && is.Take() != '\n'); + while (is.Peek() != '\0' && is.Take() != '\n') {} else RAPIDJSON_PARSE_ERROR(kParseErrorUnspecificSyntaxError, is.Tell()); @@ -750,7 +899,7 @@ private: return false; } - // Helper function to parse four hexidecimal digits in \uXXXX in ParseString(). + // Helper function to parse four hexadecimal digits in \uXXXX in ParseString(). template unsigned ParseHex4(InputStream& is, size_t escapeOffset) { unsigned codepoint = 0; @@ -857,7 +1006,7 @@ private: Ch c = is.Peek(); if (RAPIDJSON_UNLIKELY(c == '\\')) { // Escape - size_t escapeOffset = is.Tell(); // For invalid escaping, report the inital '\\' as error offset + size_t escapeOffset = is.Tell(); // For invalid escaping, report the initial '\\' as error offset is.Take(); Ch e = is.Peek(); if ((sizeof(Ch) == 1 || unsigned(e) < 256) && RAPIDJSON_LIKELY(escape[static_cast(e)])) { @@ -892,7 +1041,7 @@ private: if (c == '\0') RAPIDJSON_PARSE_ERROR(kParseErrorStringMissQuotationMark, is.Tell()); else - RAPIDJSON_PARSE_ERROR(kParseErrorStringEscapeInvalid, is.Tell()); + RAPIDJSON_PARSE_ERROR(kParseErrorStringInvalidEncoding, is.Tell()); } else { size_t offset = is.Tell(); @@ -927,7 +1076,7 @@ private: // The rest of string using SIMD static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; - static const char space[16] = { 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19 }; + static const char space[16] = { 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F }; const __m128i dq = _mm_loadu_si128(reinterpret_cast(&dquote[0])); const __m128i bs = _mm_loadu_si128(reinterpret_cast(&bslash[0])); const __m128i sp = _mm_loadu_si128(reinterpret_cast(&space[0])); @@ -936,7 +1085,7 @@ private: const __m128i s = _mm_load_si128(reinterpret_cast(p)); const __m128i t1 = _mm_cmpeq_epi8(s, dq); const __m128i t2 = _mm_cmpeq_epi8(s, bs); - const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x19) == 0x19 + const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x1F) == 0x1F const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); unsigned short r = static_cast(_mm_movemask_epi8(x)); if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped @@ -948,11 +1097,13 @@ private: #else length = static_cast(__builtin_ffs(r) - 1); #endif - char* q = reinterpret_cast(os.Push(length)); - for (size_t i = 0; i < length; i++) - q[i] = p[i]; + if (length != 0) { + char* q = reinterpret_cast(os.Push(length)); + for (size_t i = 0; i < length; i++) + q[i] = p[i]; - p += length; + p += length; + } break; } _mm_storeu_si128(reinterpret_cast<__m128i *>(os.Push(16)), s); @@ -988,7 +1139,7 @@ private: // The rest of string using SIMD static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; - static const char space[16] = { 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19 }; + static const char space[16] = { 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F }; const __m128i dq = _mm_loadu_si128(reinterpret_cast(&dquote[0])); const __m128i bs = _mm_loadu_si128(reinterpret_cast(&bslash[0])); const __m128i sp = _mm_loadu_si128(reinterpret_cast(&space[0])); @@ -997,7 +1148,7 @@ private: const __m128i s = _mm_load_si128(reinterpret_cast(p)); const __m128i t1 = _mm_cmpeq_epi8(s, dq); const __m128i t2 = _mm_cmpeq_epi8(s, bs); - const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x19) == 0x19 + const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x1F) == 0x1F const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); unsigned short r = static_cast(_mm_movemask_epi8(x)); if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped @@ -1036,7 +1187,7 @@ private: // The rest of string using SIMD static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; - static const char space[16] = { 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19 }; + static const char space[16] = { 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F }; const __m128i dq = _mm_loadu_si128(reinterpret_cast(&dquote[0])); const __m128i bs = _mm_loadu_si128(reinterpret_cast(&bslash[0])); const __m128i sp = _mm_loadu_si128(reinterpret_cast(&space[0])); @@ -1045,7 +1196,7 @@ private: const __m128i s = _mm_load_si128(reinterpret_cast(p)); const __m128i t1 = _mm_cmpeq_epi8(s, dq); const __m128i t2 = _mm_cmpeq_epi8(s, bs); - const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x19) == 0x19 + const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x1F) == 0x1F const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); unsigned short r = static_cast(_mm_movemask_epi8(x)); if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped @@ -1064,7 +1215,180 @@ private: is.src_ = is.dst_ = p; } -#endif +#elif defined(RAPIDJSON_NEON) + // StringStream -> StackStream + static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(StringStream& is, StackStream& os) { + const char* p = is.src_; + + // Scan one by one until alignment (unaligned load may cross page boundary and cause crash) + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + while (p != nextAligned) + if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast(*p) < 0x20)) { + is.src_ = p; + return; + } + else + os.Put(*p++); + + // The rest of string using SIMD + const uint8x16_t s0 = vmovq_n_u8('"'); + const uint8x16_t s1 = vmovq_n_u8('\\'); + const uint8x16_t s2 = vmovq_n_u8('\b'); + const uint8x16_t s3 = vmovq_n_u8(32); + + for (;; p += 16) { + const uint8x16_t s = vld1q_u8(reinterpret_cast(p)); + uint8x16_t x = vceqq_u8(s, s0); + x = vorrq_u8(x, vceqq_u8(s, s1)); + x = vorrq_u8(x, vceqq_u8(s, s2)); + x = vorrq_u8(x, vcltq_u8(s, s3)); + + x = vrev64q_u8(x); // Rev in 64 + uint64_t low = vgetq_lane_u64(reinterpret_cast(x), 0); // extract + uint64_t high = vgetq_lane_u64(reinterpret_cast(x), 1); // extract + + SizeType length = 0; + bool escaped = false; + if (low == 0) { + if (high != 0) { + unsigned lz = (unsigned)__builtin_clzll(high);; + length = 8 + (lz >> 3); + escaped = true; + } + } else { + unsigned lz = (unsigned)__builtin_clzll(low);; + length = lz >> 3; + escaped = true; + } + if (RAPIDJSON_UNLIKELY(escaped)) { // some of characters is escaped + if (length != 0) { + char* q = reinterpret_cast(os.Push(length)); + for (size_t i = 0; i < length; i++) + q[i] = p[i]; + + p += length; + } + break; + } + vst1q_u8(reinterpret_cast(os.Push(16)), s); + } + + is.src_ = p; + } + + // InsituStringStream -> InsituStringStream + static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(InsituStringStream& is, InsituStringStream& os) { + RAPIDJSON_ASSERT(&is == &os); + (void)os; + + if (is.src_ == is.dst_) { + SkipUnescapedString(is); + return; + } + + char* p = is.src_; + char *q = is.dst_; + + // Scan one by one until alignment (unaligned load may cross page boundary and cause crash) + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + while (p != nextAligned) + if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast(*p) < 0x20)) { + is.src_ = p; + is.dst_ = q; + return; + } + else + *q++ = *p++; + + // The rest of string using SIMD + const uint8x16_t s0 = vmovq_n_u8('"'); + const uint8x16_t s1 = vmovq_n_u8('\\'); + const uint8x16_t s2 = vmovq_n_u8('\b'); + const uint8x16_t s3 = vmovq_n_u8(32); + + for (;; p += 16, q += 16) { + const uint8x16_t s = vld1q_u8(reinterpret_cast(p)); + uint8x16_t x = vceqq_u8(s, s0); + x = vorrq_u8(x, vceqq_u8(s, s1)); + x = vorrq_u8(x, vceqq_u8(s, s2)); + x = vorrq_u8(x, vcltq_u8(s, s3)); + + x = vrev64q_u8(x); // Rev in 64 + uint64_t low = vgetq_lane_u64(reinterpret_cast(x), 0); // extract + uint64_t high = vgetq_lane_u64(reinterpret_cast(x), 1); // extract + + SizeType length = 0; + bool escaped = false; + if (low == 0) { + if (high != 0) { + unsigned lz = (unsigned)__builtin_clzll(high); + length = 8 + (lz >> 3); + escaped = true; + } + } else { + unsigned lz = (unsigned)__builtin_clzll(low); + length = lz >> 3; + escaped = true; + } + if (RAPIDJSON_UNLIKELY(escaped)) { // some of characters is escaped + for (const char* pend = p + length; p != pend; ) { + *q++ = *p++; + } + break; + } + vst1q_u8(reinterpret_cast(q), s); + } + + is.src_ = p; + is.dst_ = q; + } + + // When read/write pointers are the same for insitu stream, just skip unescaped characters + static RAPIDJSON_FORCEINLINE void SkipUnescapedString(InsituStringStream& is) { + RAPIDJSON_ASSERT(is.src_ == is.dst_); + char* p = is.src_; + + // Scan one by one until alignment (unaligned load may cross page boundary and cause crash) + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + for (; p != nextAligned; p++) + if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast(*p) < 0x20)) { + is.src_ = is.dst_ = p; + return; + } + + // The rest of string using SIMD + const uint8x16_t s0 = vmovq_n_u8('"'); + const uint8x16_t s1 = vmovq_n_u8('\\'); + const uint8x16_t s2 = vmovq_n_u8('\b'); + const uint8x16_t s3 = vmovq_n_u8(32); + + for (;; p += 16) { + const uint8x16_t s = vld1q_u8(reinterpret_cast(p)); + uint8x16_t x = vceqq_u8(s, s0); + x = vorrq_u8(x, vceqq_u8(s, s1)); + x = vorrq_u8(x, vceqq_u8(s, s2)); + x = vorrq_u8(x, vcltq_u8(s, s3)); + + x = vrev64q_u8(x); // Rev in 64 + uint64_t low = vgetq_lane_u64(reinterpret_cast(x), 0); // extract + uint64_t high = vgetq_lane_u64(reinterpret_cast(x), 1); // extract + + if (low == 0) { + if (high != 0) { + int lz = __builtin_clzll(high); + p += 8 + (lz >> 3); + break; + } + } else { + int lz = __builtin_clzll(low); + p += lz >> 3; + break; + } + } + + is.src_ = is.dst_ = p; + } +#endif // RAPIDJSON_NEON template class NumberStream; @@ -1075,7 +1399,6 @@ private: typedef typename InputStream::Ch Ch; NumberStream(GenericReader& reader, InputStream& s) : is(s) { (void)reader; } - ~NumberStream() {} RAPIDJSON_FORCEINLINE Ch Peek() const { return is.Peek(); } RAPIDJSON_FORCEINLINE Ch TakePush() { return is.Take(); } @@ -1097,7 +1420,6 @@ private: typedef NumberStream Base; public: NumberStream(GenericReader& reader, InputStream& is) : Base(reader, is), stackStream(reader.stack_) {} - ~NumberStream() {} RAPIDJSON_FORCEINLINE Ch TakePush() { stackStream.Put(static_cast(Base::is.Peek())); @@ -1124,7 +1446,6 @@ private: typedef NumberStream Base; public: NumberStream(GenericReader& reader, InputStream& is) : Base(reader, is) {} - ~NumberStream() {} RAPIDJSON_FORCEINLINE Ch Take() { return Base::TakePush(); } }; @@ -1185,18 +1506,27 @@ private: } // Parse NaN or Infinity here else if ((parseFlags & kParseNanAndInfFlag) && RAPIDJSON_LIKELY((s.Peek() == 'I' || s.Peek() == 'N'))) { - useNanOrInf = true; - if (RAPIDJSON_LIKELY(Consume(s, 'N') && Consume(s, 'a') && Consume(s, 'N'))) { - d = std::numeric_limits::quiet_NaN(); + if (Consume(s, 'N')) { + if (Consume(s, 'a') && Consume(s, 'N')) { + d = std::numeric_limits::quiet_NaN(); + useNanOrInf = true; + } } - else if (RAPIDJSON_LIKELY(Consume(s, 'I') && Consume(s, 'n') && Consume(s, 'f'))) { - d = (minus ? -std::numeric_limits::infinity() : std::numeric_limits::infinity()); - if (RAPIDJSON_UNLIKELY(s.Peek() == 'i' && !(Consume(s, 'i') && Consume(s, 'n') - && Consume(s, 'i') && Consume(s, 't') && Consume(s, 'y')))) - RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell()); + else if (RAPIDJSON_LIKELY(Consume(s, 'I'))) { + if (Consume(s, 'n') && Consume(s, 'f')) { + d = (minus ? -std::numeric_limits::infinity() : std::numeric_limits::infinity()); + useNanOrInf = true; + + if (RAPIDJSON_UNLIKELY(s.Peek() == 'i' && !(Consume(s, 'i') && Consume(s, 'n') + && Consume(s, 'i') && Consume(s, 't') && Consume(s, 'y')))) { + RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell()); + } + } } - else + + if (RAPIDJSON_UNLIKELY(!useNanOrInf)) { RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell()); + } } else RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell()); @@ -1231,8 +1561,6 @@ private: // Force double for big integer if (useDouble) { while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { - if (RAPIDJSON_UNLIKELY(d >= 1.7976931348623157e307)) // DBL_MAX / 10.0 - RAPIDJSON_PARSE_ERROR(kParseErrorNumberTooBig, startOffset); d = d * 10 + (s.TakePush() - '0'); } } @@ -1302,9 +1630,18 @@ private: if (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { exp = static_cast(s.Take() - '0'); if (expMinus) { + // (exp + expFrac) must not underflow int => we're detecting when -exp gets + // dangerously close to INT_MIN (a pessimistic next digit 9 would push it into + // underflow territory): + // + // -(exp * 10 + 9) + expFrac >= INT_MIN + // <=> exp <= (expFrac - INT_MIN - 9) / 10 + RAPIDJSON_ASSERT(expFrac <= 0); + int maxExp = (expFrac + 2147483639) / 10; + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { exp = exp * 10 + static_cast(s.Take() - '0'); - if (exp >= 214748364) { // Issue #313: prevent overflow exponent + if (RAPIDJSON_UNLIKELY(exp > maxExp)) { while (RAPIDJSON_UNLIKELY(s.Peek() >= '0' && s.Peek() <= '9')) // Consume the rest of exponent s.Take(); } @@ -1363,6 +1700,13 @@ private: else d = internal::StrtodNormalPrecision(d, p); + // Use > max, instead of == inf, to fix bogus warning -Wfloat-equal + if (d > (std::numeric_limits::max)()) { + // Overflow + // TODO: internal::StrtodX should report overflow (or underflow) + RAPIDJSON_PARSE_ERROR(kParseErrorNumberTooBig, startOffset); + } + cont = handler.Double(minus ? -d : d); } else if (useNanOrInf) { @@ -1408,29 +1752,31 @@ private: // States enum IterativeParsingState { - IterativeParsingStartState = 0, - IterativeParsingFinishState, - IterativeParsingErrorState, + IterativeParsingFinishState = 0, // sink states at top + IterativeParsingErrorState, // sink states at top + IterativeParsingStartState, // Object states IterativeParsingObjectInitialState, IterativeParsingMemberKeyState, - IterativeParsingKeyValueDelimiterState, IterativeParsingMemberValueState, - IterativeParsingMemberDelimiterState, IterativeParsingObjectFinishState, // Array states IterativeParsingArrayInitialState, IterativeParsingElementState, - IterativeParsingElementDelimiterState, IterativeParsingArrayFinishState, // Single value state - IterativeParsingValueState - }; + IterativeParsingValueState, - enum { cIterativeParsingStateCount = IterativeParsingValueState + 1 }; + // Delimiter states (at bottom) + IterativeParsingElementDelimiterState, + IterativeParsingMemberDelimiterState, + IterativeParsingKeyValueDelimiterState, + + cIterativeParsingStateCount + }; // Tokens enum Token { @@ -1452,7 +1798,7 @@ private: kTokenCount }; - RAPIDJSON_FORCEINLINE Token Tokenize(Ch c) { + RAPIDJSON_FORCEINLINE Token Tokenize(Ch c) const { //!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN #define N NumberToken @@ -1479,9 +1825,21 @@ private: return NumberToken; } - RAPIDJSON_FORCEINLINE IterativeParsingState Predict(IterativeParsingState state, Token token) { + RAPIDJSON_FORCEINLINE IterativeParsingState Predict(IterativeParsingState state, Token token) const { // current state x one lookahead token -> new state static const char G[cIterativeParsingStateCount][kTokenCount] = { + // Finish(sink state) + { + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState + }, + // Error(sink state) + { + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState + }, // Start { IterativeParsingArrayInitialState, // Left bracket @@ -1496,18 +1854,6 @@ private: IterativeParsingValueState, // Null IterativeParsingValueState // Number }, - // Finish(sink state) - { - IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, - IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, - IterativeParsingErrorState - }, - // Error(sink state) - { - IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, - IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, - IterativeParsingErrorState - }, // ObjectInitial { IterativeParsingErrorState, // Left bracket @@ -1536,20 +1882,6 @@ private: IterativeParsingErrorState, // Null IterativeParsingErrorState // Number }, - // KeyValueDelimiter - { - IterativeParsingArrayInitialState, // Left bracket(push MemberValue state) - IterativeParsingErrorState, // Right bracket - IterativeParsingObjectInitialState, // Left curly bracket(push MemberValue state) - IterativeParsingErrorState, // Right curly bracket - IterativeParsingErrorState, // Comma - IterativeParsingErrorState, // Colon - IterativeParsingMemberValueState, // String - IterativeParsingMemberValueState, // False - IterativeParsingMemberValueState, // True - IterativeParsingMemberValueState, // Null - IterativeParsingMemberValueState // Number - }, // MemberValue { IterativeParsingErrorState, // Left bracket @@ -1564,20 +1896,6 @@ private: IterativeParsingErrorState, // Null IterativeParsingErrorState // Number }, - // MemberDelimiter - { - IterativeParsingErrorState, // Left bracket - IterativeParsingErrorState, // Right bracket - IterativeParsingErrorState, // Left curly bracket - IterativeParsingObjectFinishState, // Right curly bracket - IterativeParsingErrorState, // Comma - IterativeParsingErrorState, // Colon - IterativeParsingMemberKeyState, // String - IterativeParsingErrorState, // False - IterativeParsingErrorState, // True - IterativeParsingErrorState, // Null - IterativeParsingErrorState // Number - }, // ObjectFinish(sink state) { IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, @@ -1612,6 +1930,18 @@ private: IterativeParsingErrorState, // Null IterativeParsingErrorState // Number }, + // ArrayFinish(sink state) + { + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState + }, + // Single Value (sink state) + { + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState + }, // ElementDelimiter { IterativeParsingArrayInitialState, // Left bracket(push Element state) @@ -1626,18 +1956,34 @@ private: IterativeParsingElementState, // Null IterativeParsingElementState // Number }, - // ArrayFinish(sink state) + // MemberDelimiter { - IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, - IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, - IterativeParsingErrorState + IterativeParsingErrorState, // Left bracket + IterativeParsingErrorState, // Right bracket + IterativeParsingErrorState, // Left curly bracket + IterativeParsingObjectFinishState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingMemberKeyState, // String + IterativeParsingErrorState, // False + IterativeParsingErrorState, // True + IterativeParsingErrorState, // Null + IterativeParsingErrorState // Number }, - // Single Value (sink state) + // KeyValueDelimiter { - IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, - IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, - IterativeParsingErrorState - } + IterativeParsingArrayInitialState, // Left bracket(push MemberValue state) + IterativeParsingErrorState, // Right bracket + IterativeParsingObjectInitialState, // Left curly bracket(push MemberValue state) + IterativeParsingErrorState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingMemberValueState, // String + IterativeParsingMemberValueState, // False + IterativeParsingMemberValueState, // True + IterativeParsingMemberValueState, // Null + IterativeParsingMemberValueState // Number + }, }; // End of G return static_cast(G[state][token]); @@ -1818,6 +2164,14 @@ private: } } + RAPIDJSON_FORCEINLINE bool IsIterativeParsingDelimiterState(IterativeParsingState s) const { + return s >= IterativeParsingElementDelimiterState; + } + + RAPIDJSON_FORCEINLINE bool IsIterativeParsingCompleteState(IterativeParsingState s) const { + return s <= IterativeParsingErrorState; + } + template ParseResult IterativeParse(InputStream& is, Handler& handler) { parseResult_.Clear(); @@ -1856,6 +2210,7 @@ private: static const size_t kDefaultStackCapacity = 256; //!< Default stack capacity in bytes for storing a single decoded string. internal::Stack stack_; //!< A stack for storing decoded string temporarily during non-destructive parsing. ParseResult parseResult_; + IterativeParsingState state_; }; // class GenericReader //! Reader with UTF8 encoding and default allocator. @@ -1863,7 +2218,7 @@ typedef GenericReader, UTF8<> > Reader; RAPIDJSON_NAMESPACE_END -#ifdef __clang__ +#if defined(__clang__) || defined(_MSC_VER) RAPIDJSON_DIAG_POP #endif @@ -1872,8 +2227,4 @@ RAPIDJSON_DIAG_POP RAPIDJSON_DIAG_POP #endif -#ifdef _MSC_VER -RAPIDJSON_DIAG_POP -#endif - #endif // RAPIDJSON_READER_H_ diff --git a/src/3rdparty/rapidjson/schema.h b/src/3rdparty/rapidjson/schema.h index b182aa27..26ae9474 100644 --- a/src/3rdparty/rapidjson/schema.h +++ b/src/3rdparty/rapidjson/schema.h @@ -17,6 +17,7 @@ #include "document.h" #include "pointer.h" +#include "stringbuffer.h" #include // abs, floor #if !defined(RAPIDJSON_SCHEMA_USE_INTERNALREGEX) @@ -25,7 +26,7 @@ #define RAPIDJSON_SCHEMA_USE_INTERNALREGEX 0 #endif -#if !RAPIDJSON_SCHEMA_USE_INTERNALREGEX && !defined(RAPIDJSON_SCHEMA_USE_STDREGEX) && (__cplusplus >=201103L || (defined(_MSC_VER) && _MSC_VER >= 1800)) +#if !RAPIDJSON_SCHEMA_USE_INTERNALREGEX && defined(RAPIDJSON_SCHEMA_USE_STDREGEX) && (__cplusplus >=201103L || (defined(_MSC_VER) && _MSC_VER >= 1800)) #define RAPIDJSON_SCHEMA_USE_STDREGEX 1 #else #define RAPIDJSON_SCHEMA_USE_STDREGEX 0 @@ -62,9 +63,7 @@ RAPIDJSON_DIAG_OFF(weak-vtables) RAPIDJSON_DIAG_OFF(exit-time-destructors) RAPIDJSON_DIAG_OFF(c++98-compat-pedantic) RAPIDJSON_DIAG_OFF(variadic-macros) -#endif - -#ifdef _MSC_VER +#elif defined(_MSC_VER) RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated #endif @@ -157,6 +156,62 @@ public: virtual void FreeState(void* p) = 0; }; +/////////////////////////////////////////////////////////////////////////////// +// IValidationErrorHandler + +template +class IValidationErrorHandler { +public: + typedef typename SchemaType::Ch Ch; + typedef typename SchemaType::SValue SValue; + + virtual ~IValidationErrorHandler() {} + + virtual void NotMultipleOf(int64_t actual, const SValue& expected) = 0; + virtual void NotMultipleOf(uint64_t actual, const SValue& expected) = 0; + virtual void NotMultipleOf(double actual, const SValue& expected) = 0; + virtual void AboveMaximum(int64_t actual, const SValue& expected, bool exclusive) = 0; + virtual void AboveMaximum(uint64_t actual, const SValue& expected, bool exclusive) = 0; + virtual void AboveMaximum(double actual, const SValue& expected, bool exclusive) = 0; + virtual void BelowMinimum(int64_t actual, const SValue& expected, bool exclusive) = 0; + virtual void BelowMinimum(uint64_t actual, const SValue& expected, bool exclusive) = 0; + virtual void BelowMinimum(double actual, const SValue& expected, bool exclusive) = 0; + + virtual void TooLong(const Ch* str, SizeType length, SizeType expected) = 0; + virtual void TooShort(const Ch* str, SizeType length, SizeType expected) = 0; + virtual void DoesNotMatch(const Ch* str, SizeType length) = 0; + + virtual void DisallowedItem(SizeType index) = 0; + virtual void TooFewItems(SizeType actualCount, SizeType expectedCount) = 0; + virtual void TooManyItems(SizeType actualCount, SizeType expectedCount) = 0; + virtual void DuplicateItems(SizeType index1, SizeType index2) = 0; + + virtual void TooManyProperties(SizeType actualCount, SizeType expectedCount) = 0; + virtual void TooFewProperties(SizeType actualCount, SizeType expectedCount) = 0; + virtual void StartMissingProperties() = 0; + virtual void AddMissingProperty(const SValue& name) = 0; + virtual bool EndMissingProperties() = 0; + virtual void PropertyViolations(ISchemaValidator** subvalidators, SizeType count) = 0; + virtual void DisallowedProperty(const Ch* name, SizeType length) = 0; + + virtual void StartDependencyErrors() = 0; + virtual void StartMissingDependentProperties() = 0; + virtual void AddMissingDependentProperty(const SValue& targetName) = 0; + virtual void EndMissingDependentProperties(const SValue& sourceName) = 0; + virtual void AddDependencySchemaError(const SValue& souceName, ISchemaValidator* subvalidator) = 0; + virtual bool EndDependencyErrors() = 0; + + virtual void DisallowedValue() = 0; + virtual void StartDisallowedType() = 0; + virtual void AddExpectedType(const typename SchemaType::ValueType& expectedType) = 0; + virtual void EndDisallowedType(const typename SchemaType::ValueType& actualType) = 0; + virtual void NotAllOf(ISchemaValidator** subvalidators, SizeType count) = 0; + virtual void NoneOf(ISchemaValidator** subvalidators, SizeType count) = 0; + virtual void NotOneOf(ISchemaValidator** subvalidators, SizeType count) = 0; + virtual void Disallowed() = 0; +}; + + /////////////////////////////////////////////////////////////////////////////// // Hasher @@ -261,6 +316,7 @@ template struct SchemaValidationContext { typedef Schema SchemaType; typedef ISchemaStateFactory SchemaValidatorFactoryType; + typedef IValidationErrorHandler ErrorHandlerType; typedef typename SchemaType::ValueType ValueType; typedef typename ValueType::Ch Ch; @@ -270,8 +326,9 @@ struct SchemaValidationContext { kPatternValidatorWithAdditionalProperty }; - SchemaValidationContext(SchemaValidatorFactoryType& f, const SchemaType* s) : + SchemaValidationContext(SchemaValidatorFactoryType& f, ErrorHandlerType& eh, const SchemaType* s) : factory(f), + error_handler(eh), schema(s), valueSchema(), invalidKeyword(), @@ -311,6 +368,7 @@ struct SchemaValidationContext { } SchemaValidatorFactoryType& factory; + ErrorHandlerType& error_handler; const SchemaType* schema; const SchemaType* valueSchema; const Ch* invalidKeyword; @@ -345,15 +403,20 @@ public: typedef SchemaValidationContext Context; typedef Schema SchemaType; typedef GenericValue SValue; + typedef IValidationErrorHandler ErrorHandler; friend class GenericSchemaDocument; Schema(SchemaDocumentType* schemaDocument, const PointerType& p, const ValueType& value, const ValueType& document, AllocatorType* allocator) : allocator_(allocator), + uri_(schemaDocument->GetURI(), *allocator), + pointer_(p, allocator), + typeless_(schemaDocument->GetTypeless()), enum_(), enumCount_(), not_(), type_((1 << kTotalSchemaType) - 1), // typeless validatorCount_(), + notValidatorIndex_(), properties_(), additionalPropertiesSchema_(), patternProperties_(), @@ -377,7 +440,8 @@ public: minLength_(0), maxLength_(~SizeType(0)), exclusiveMinimum_(false), - exclusiveMaximum_(false) + exclusiveMaximum_(false), + defaultValueLength_(0) { typedef typename SchemaDocumentType::ValueType ValueType; typedef typename ValueType::ConstValueIterator ConstValueIterator; @@ -400,7 +464,7 @@ public: enum_ = static_cast(allocator_->Malloc(sizeof(uint64_t) * v->Size())); for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr) { typedef Hasher > EnumHasherType; - char buffer[256 + 24]; + char buffer[256u + 24]; MemoryPoolAllocator<> hasherAllocator(buffer, sizeof(buffer)); EnumHasherType h(&hasherAllocator, 256); itr->Accept(h); @@ -453,7 +517,7 @@ public: for (SizeType i = 0; i < propertyCount_; i++) { new (&properties_[i]) Property(); properties_[i].name = allProperties[i]; - properties_[i].schema = GetTypeless(); + properties_[i].schema = typeless_; } } } @@ -572,12 +636,16 @@ public: if (const ValueType* v = GetMember(value, GetMultipleOfString())) if (v->IsNumber() && v->GetDouble() > 0.0) multipleOf_.CopyFrom(*v, *allocator_); + + // Default + if (const ValueType* v = GetMember(value, GetDefaultValueString())) + if (v->IsString()) + defaultValueLength_ = v->GetStringLength(); + } ~Schema() { - if (allocator_) { - allocator_->Free(enum_); - } + AllocatorType::Free(enum_); if (properties_) { for (SizeType i = 0; i < propertyCount_; i++) properties_[i].~Property(); @@ -592,11 +660,19 @@ public: #if RAPIDJSON_SCHEMA_HAS_REGEX if (pattern_) { pattern_->~RegexType(); - allocator_->Free(pattern_); + AllocatorType::Free(pattern_); } #endif } + const SValue& GetURI() const { + return uri_; + } + + const PointerType& GetPointer() const { + return pointer_; + } + bool BeginValue(Context& context) const { if (context.inArray) { if (uniqueItems_) @@ -610,12 +686,14 @@ public: else if (additionalItemsSchema_) context.valueSchema = additionalItemsSchema_; else if (additionalItems_) - context.valueSchema = GetTypeless(); - else + context.valueSchema = typeless_; + else { + context.error_handler.DisallowedItem(context.arrayElementIndex); RAPIDJSON_INVALID_KEYWORD_RETURN(GetItemsString()); + } } else - context.valueSchema = GetTypeless(); + context.valueSchema = typeless_; context.arrayElementIndex++; } @@ -637,15 +715,21 @@ public: } if (context.objectPatternValidatorType == Context::kPatternValidatorOnly) { - if (!patternValid) + if (!patternValid) { + context.error_handler.PropertyViolations(context.patternPropertiesValidators, count); RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString()); + } } else if (context.objectPatternValidatorType == Context::kPatternValidatorWithProperty) { - if (!patternValid || !otherValid) + if (!patternValid || !otherValid) { + context.error_handler.PropertyViolations(context.patternPropertiesValidators, count + 1); RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString()); + } } - else if (!patternValid && !otherValid) // kPatternValidatorWithAdditionalProperty) + else if (!patternValid && !otherValid) { // kPatternValidatorWithAdditionalProperty) + context.error_handler.PropertyViolations(context.patternPropertiesValidators, count + 1); RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString()); + } } if (enum_) { @@ -653,19 +737,23 @@ public: for (SizeType i = 0; i < enumCount_; i++) if (enum_[i] == h) goto foundEnum; + context.error_handler.DisallowedValue(); RAPIDJSON_INVALID_KEYWORD_RETURN(GetEnumString()); foundEnum:; } if (allOf_.schemas) for (SizeType i = allOf_.begin; i < allOf_.begin + allOf_.count; i++) - if (!context.validators[i]->IsValid()) + if (!context.validators[i]->IsValid()) { + context.error_handler.NotAllOf(&context.validators[allOf_.begin], allOf_.count); RAPIDJSON_INVALID_KEYWORD_RETURN(GetAllOfString()); + } if (anyOf_.schemas) { for (SizeType i = anyOf_.begin; i < anyOf_.begin + anyOf_.count; i++) if (context.validators[i]->IsValid()) goto foundAny; + context.error_handler.NoneOf(&context.validators[anyOf_.begin], anyOf_.count); RAPIDJSON_INVALID_KEYWORD_RETURN(GetAnyOfString()); foundAny:; } @@ -674,30 +762,39 @@ public: bool oneValid = false; for (SizeType i = oneOf_.begin; i < oneOf_.begin + oneOf_.count; i++) if (context.validators[i]->IsValid()) { - if (oneValid) + if (oneValid) { + context.error_handler.NotOneOf(&context.validators[oneOf_.begin], oneOf_.count); RAPIDJSON_INVALID_KEYWORD_RETURN(GetOneOfString()); - else + } else oneValid = true; } - if (!oneValid) + if (!oneValid) { + context.error_handler.NotOneOf(&context.validators[oneOf_.begin], oneOf_.count); RAPIDJSON_INVALID_KEYWORD_RETURN(GetOneOfString()); + } } - if (not_ && context.validators[notValidatorIndex_]->IsValid()) + if (not_ && context.validators[notValidatorIndex_]->IsValid()) { + context.error_handler.Disallowed(); RAPIDJSON_INVALID_KEYWORD_RETURN(GetNotString()); + } return true; } - bool Null(Context& context) const { - if (!(type_ & (1 << kNullSchemaType))) + bool Null(Context& context) const { + if (!(type_ & (1 << kNullSchemaType))) { + DisallowedType(context, GetNullString()); RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + } return CreateParallelValidator(context); } - bool Bool(Context& context, bool) const { - if (!(type_ & (1 << kBooleanSchemaType))) + bool Bool(Context& context, bool) const { + if (!(type_ & (1 << kBooleanSchemaType))) { + DisallowedType(context, GetBooleanString()); RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + } return CreateParallelValidator(context); } @@ -726,8 +823,10 @@ public: } bool Double(Context& context, double d) const { - if (!(type_ & (1 << kNumberSchemaType))) + if (!(type_ & (1 << kNumberSchemaType))) { + DisallowedType(context, GetNumberString()); RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + } if (!minimum_.IsNull() && !CheckDoubleMinimum(context, d)) return false; @@ -742,28 +841,38 @@ public: } bool String(Context& context, const Ch* str, SizeType length, bool) const { - if (!(type_ & (1 << kStringSchemaType))) + if (!(type_ & (1 << kStringSchemaType))) { + DisallowedType(context, GetStringString()); RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + } if (minLength_ != 0 || maxLength_ != SizeType(~0)) { SizeType count; if (internal::CountStringCodePoint(str, length, &count)) { - if (count < minLength_) + if (count < minLength_) { + context.error_handler.TooShort(str, length, minLength_); RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinLengthString()); - if (count > maxLength_) + } + if (count > maxLength_) { + context.error_handler.TooLong(str, length, maxLength_); RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxLengthString()); + } } } - if (pattern_ && !IsPatternMatch(pattern_, str, length)) + if (pattern_ && !IsPatternMatch(pattern_, str, length)) { + context.error_handler.DoesNotMatch(str, length); RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternString()); + } return CreateParallelValidator(context); } - bool StartObject(Context& context) const { - if (!(type_ & (1 << kObjectSchemaType))) + bool StartObject(Context& context) const { + if (!(type_ & (1 << kObjectSchemaType))) { + DisallowedType(context, GetObjectString()); RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + } if (hasDependencies_ || hasRequired_) { context.propertyExist = static_cast(context.factory.MallocState(sizeof(bool) * propertyCount_)); @@ -784,15 +893,17 @@ public: if (patternProperties_) { context.patternPropertiesSchemaCount = 0; for (SizeType i = 0; i < patternPropertyCount_; i++) - if (patternProperties_[i].pattern && IsPatternMatch(patternProperties_[i].pattern, str, len)) + if (patternProperties_[i].pattern && IsPatternMatch(patternProperties_[i].pattern, str, len)) { context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = patternProperties_[i].schema; + context.valueSchema = typeless_; + } } SizeType index; if (FindPropertyIndex(ValueType(str, len).Move(), &index)) { if (context.patternPropertiesSchemaCount > 0) { context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = properties_[index].schema; - context.valueSchema = GetTypeless(); + context.valueSchema = typeless_; context.valuePatternValidatorType = Context::kPatternValidatorWithProperty; } else @@ -807,7 +918,7 @@ public: if (additionalPropertiesSchema_) { if (additionalPropertiesSchema_ && context.patternPropertiesSchemaCount > 0) { context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = additionalPropertiesSchema_; - context.valueSchema = GetTypeless(); + context.valueSchema = typeless_; context.valuePatternValidatorType = Context::kPatternValidatorWithAdditionalProperty; } else @@ -815,49 +926,70 @@ public: return true; } else if (additionalProperties_) { - context.valueSchema = GetTypeless(); + context.valueSchema = typeless_; return true; } - if (context.patternPropertiesSchemaCount == 0) // patternProperties are not additional properties + if (context.patternPropertiesSchemaCount == 0) { // patternProperties are not additional properties + context.error_handler.DisallowedProperty(str, len); RAPIDJSON_INVALID_KEYWORD_RETURN(GetAdditionalPropertiesString()); + } return true; } bool EndObject(Context& context, SizeType memberCount) const { - if (hasRequired_) + if (hasRequired_) { + context.error_handler.StartMissingProperties(); for (SizeType index = 0; index < propertyCount_; index++) - if (properties_[index].required) - if (!context.propertyExist[index]) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetRequiredString()); + if (properties_[index].required && !context.propertyExist[index]) + if (properties_[index].schema->defaultValueLength_ == 0 ) + context.error_handler.AddMissingProperty(properties_[index].name); + if (context.error_handler.EndMissingProperties()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetRequiredString()); + } - if (memberCount < minProperties_) + if (memberCount < minProperties_) { + context.error_handler.TooFewProperties(memberCount, minProperties_); RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinPropertiesString()); + } - if (memberCount > maxProperties_) + if (memberCount > maxProperties_) { + context.error_handler.TooManyProperties(memberCount, maxProperties_); RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxPropertiesString()); + } if (hasDependencies_) { - for (SizeType sourceIndex = 0; sourceIndex < propertyCount_; sourceIndex++) + context.error_handler.StartDependencyErrors(); + for (SizeType sourceIndex = 0; sourceIndex < propertyCount_; sourceIndex++) { + const Property& source = properties_[sourceIndex]; if (context.propertyExist[sourceIndex]) { - if (properties_[sourceIndex].dependencies) { + if (source.dependencies) { + context.error_handler.StartMissingDependentProperties(); for (SizeType targetIndex = 0; targetIndex < propertyCount_; targetIndex++) - if (properties_[sourceIndex].dependencies[targetIndex] && !context.propertyExist[targetIndex]) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetDependenciesString()); + if (source.dependencies[targetIndex] && !context.propertyExist[targetIndex]) + context.error_handler.AddMissingDependentProperty(properties_[targetIndex].name); + context.error_handler.EndMissingDependentProperties(source.name); + } + else if (source.dependenciesSchema) { + ISchemaValidator* dependenciesValidator = context.validators[source.dependenciesValidatorIndex]; + if (!dependenciesValidator->IsValid()) + context.error_handler.AddDependencySchemaError(source.name, dependenciesValidator); } - else if (properties_[sourceIndex].dependenciesSchema) - if (!context.validators[properties_[sourceIndex].dependenciesValidatorIndex]->IsValid()) - RAPIDJSON_INVALID_KEYWORD_RETURN(GetDependenciesString()); } + } + if (context.error_handler.EndDependencyErrors()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetDependenciesString()); } return true; } - bool StartArray(Context& context) const { - if (!(type_ & (1 << kArraySchemaType))) + bool StartArray(Context& context) const { + if (!(type_ & (1 << kArraySchemaType))) { + DisallowedType(context, GetArrayString()); RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + } context.arrayElementIndex = 0; context.inArray = true; @@ -865,14 +997,18 @@ public: return CreateParallelValidator(context); } - bool EndArray(Context& context, SizeType elementCount) const { + bool EndArray(Context& context, SizeType elementCount) const { context.inArray = false; - if (elementCount < minItems_) + if (elementCount < minItems_) { + context.error_handler.TooFewItems(elementCount, minItems_); RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinItemsString()); + } - if (elementCount > maxItems_) + if (elementCount > maxItems_) { + context.error_handler.TooManyItems(elementCount, maxItems_); RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxItemsString()); + } return true; } @@ -881,7 +1017,7 @@ public: #define RAPIDJSON_STRING_(name, ...) \ static const ValueType& Get##name##String() {\ static const Ch s[] = { __VA_ARGS__, '\0' };\ - static const ValueType v(s, sizeof(s) / sizeof(Ch) - 1);\ + static const ValueType v(s, static_cast(sizeof(s) / sizeof(Ch) - 1));\ return v;\ } @@ -918,6 +1054,7 @@ public: RAPIDJSON_STRING_(ExclusiveMinimum, 'e', 'x', 'c', 'l', 'u', 's', 'i', 'v', 'e', 'M', 'i', 'n', 'i', 'm', 'u', 'm') RAPIDJSON_STRING_(ExclusiveMaximum, 'e', 'x', 'c', 'l', 'u', 's', 'i', 'v', 'e', 'M', 'a', 'x', 'i', 'm', 'u', 'm') RAPIDJSON_STRING_(MultipleOf, 'm', 'u', 'l', 't', 'i', 'p', 'l', 'e', 'O', 'f') + RAPIDJSON_STRING_(DefaultValue, 'd', 'e', 'f', 'a', 'u', 'l', 't') #undef RAPIDJSON_STRING_ @@ -934,7 +1071,7 @@ private: }; #if RAPIDJSON_SCHEMA_USE_INTERNALREGEX - typedef internal::GenericRegex RegexType; + typedef internal::GenericRegex RegexType; #elif RAPIDJSON_SCHEMA_USE_STDREGEX typedef std::basic_regex RegexType; #else @@ -949,11 +1086,6 @@ private: SizeType count; }; - static const SchemaType* GetTypeless() { - static SchemaType typeless(0, PointerType(), ValueType(kObjectType).Move(), ValueType(kObjectType).Move(), 0); - return &typeless; - } - template void AddUniqueElement(V1& a, const V2& v) { for (typename V1::ConstValueIterator itr = a.Begin(); itr != a.End(); ++itr) @@ -999,7 +1131,7 @@ private: template RegexType* CreatePattern(const ValueType& value) { if (value.IsString()) { - RegexType* r = new (allocator_->Malloc(sizeof(RegexType))) RegexType(value.GetString()); + RegexType* r = new (allocator_->Malloc(sizeof(RegexType))) RegexType(value.GetString(), allocator_); if (!r->IsValid()) { r->~RegexType(); AllocatorType::Free(r); @@ -1011,17 +1143,21 @@ private: } static bool IsPatternMatch(const RegexType* pattern, const Ch *str, SizeType) { - return pattern->Search(str); + GenericRegexSearch rs(*pattern); + return rs.Search(str); } #elif RAPIDJSON_SCHEMA_USE_STDREGEX template RegexType* CreatePattern(const ValueType& value) { - if (value.IsString()) + if (value.IsString()) { + RegexType *r = static_cast(allocator_->Malloc(sizeof(RegexType))); try { - return new (allocator_->Malloc(sizeof(RegexType))) RegexType(value.GetString(), std::size_t(value.GetStringLength()), std::regex_constants::ECMAScript); + return new (r) RegexType(value.GetString(), std::size_t(value.GetStringLength()), std::regex_constants::ECMAScript); } catch (const std::regex_error&) { + AllocatorType::Free(r); } + } return 0; } @@ -1097,15 +1233,20 @@ private: } bool CheckInt(Context& context, int64_t i) const { - if (!(type_ & ((1 << kIntegerSchemaType) | (1 << kNumberSchemaType)))) + if (!(type_ & ((1 << kIntegerSchemaType) | (1 << kNumberSchemaType)))) { + DisallowedType(context, GetIntegerString()); RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + } if (!minimum_.IsNull()) { if (minimum_.IsInt64()) { - if (exclusiveMinimum_ ? i <= minimum_.GetInt64() : i < minimum_.GetInt64()) + if (exclusiveMinimum_ ? i <= minimum_.GetInt64() : i < minimum_.GetInt64()) { + context.error_handler.BelowMinimum(i, minimum_, exclusiveMinimum_); RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); + } } else if (minimum_.IsUint64()) { + context.error_handler.BelowMinimum(i, minimum_, exclusiveMinimum_); RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); // i <= max(int64_t) < minimum.GetUint64() } else if (!CheckDoubleMinimum(context, static_cast(i))) @@ -1114,19 +1255,23 @@ private: if (!maximum_.IsNull()) { if (maximum_.IsInt64()) { - if (exclusiveMaximum_ ? i >= maximum_.GetInt64() : i > maximum_.GetInt64()) + if (exclusiveMaximum_ ? i >= maximum_.GetInt64() : i > maximum_.GetInt64()) { + context.error_handler.AboveMaximum(i, maximum_, exclusiveMaximum_); RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); + } } - else if (maximum_.IsUint64()) - /* do nothing */; // i <= max(int64_t) < maximum_.GetUint64() + else if (maximum_.IsUint64()) { } + /* do nothing */ // i <= max(int64_t) < maximum_.GetUint64() else if (!CheckDoubleMaximum(context, static_cast(i))) return false; } if (!multipleOf_.IsNull()) { if (multipleOf_.IsUint64()) { - if (static_cast(i >= 0 ? i : -i) % multipleOf_.GetUint64() != 0) + if (static_cast(i >= 0 ? i : -i) % multipleOf_.GetUint64() != 0) { + context.error_handler.NotMultipleOf(i, multipleOf_); RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString()); + } } else if (!CheckDoubleMultipleOf(context, static_cast(i))) return false; @@ -1136,13 +1281,17 @@ private: } bool CheckUint(Context& context, uint64_t i) const { - if (!(type_ & ((1 << kIntegerSchemaType) | (1 << kNumberSchemaType)))) + if (!(type_ & ((1 << kIntegerSchemaType) | (1 << kNumberSchemaType)))) { + DisallowedType(context, GetIntegerString()); RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + } if (!minimum_.IsNull()) { if (minimum_.IsUint64()) { - if (exclusiveMinimum_ ? i <= minimum_.GetUint64() : i < minimum_.GetUint64()) + if (exclusiveMinimum_ ? i <= minimum_.GetUint64() : i < minimum_.GetUint64()) { + context.error_handler.BelowMinimum(i, minimum_, exclusiveMinimum_); RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); + } } else if (minimum_.IsInt64()) /* do nothing */; // i >= 0 > minimum.Getint64() @@ -1152,19 +1301,25 @@ private: if (!maximum_.IsNull()) { if (maximum_.IsUint64()) { - if (exclusiveMaximum_ ? i >= maximum_.GetUint64() : i > maximum_.GetUint64()) + if (exclusiveMaximum_ ? i >= maximum_.GetUint64() : i > maximum_.GetUint64()) { + context.error_handler.AboveMaximum(i, maximum_, exclusiveMaximum_); RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); + } } - else if (maximum_.IsInt64()) + else if (maximum_.IsInt64()) { + context.error_handler.AboveMaximum(i, maximum_, exclusiveMaximum_); RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); // i >= 0 > maximum_ + } else if (!CheckDoubleMaximum(context, static_cast(i))) return false; } if (!multipleOf_.IsNull()) { if (multipleOf_.IsUint64()) { - if (i % multipleOf_.GetUint64() != 0) + if (i % multipleOf_.GetUint64() != 0) { + context.error_handler.NotMultipleOf(i, multipleOf_); RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString()); + } } else if (!CheckDoubleMultipleOf(context, static_cast(i))) return false; @@ -1174,14 +1329,18 @@ private: } bool CheckDoubleMinimum(Context& context, double d) const { - if (exclusiveMinimum_ ? d <= minimum_.GetDouble() : d < minimum_.GetDouble()) + if (exclusiveMinimum_ ? d <= minimum_.GetDouble() : d < minimum_.GetDouble()) { + context.error_handler.BelowMinimum(d, minimum_, exclusiveMinimum_); RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); + } return true; } bool CheckDoubleMaximum(Context& context, double d) const { - if (exclusiveMaximum_ ? d >= maximum_.GetDouble() : d > maximum_.GetDouble()) + if (exclusiveMaximum_ ? d >= maximum_.GetDouble() : d > maximum_.GetDouble()) { + context.error_handler.AboveMaximum(d, maximum_, exclusiveMaximum_); RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); + } return true; } @@ -1189,11 +1348,29 @@ private: double a = std::abs(d), b = std::abs(multipleOf_.GetDouble()); double q = std::floor(a / b); double r = a - q * b; - if (r > 0.0) + if (r > 0.0) { + context.error_handler.NotMultipleOf(d, multipleOf_); RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString()); + } return true; } + void DisallowedType(Context& context, const ValueType& actualType) const { + ErrorHandler& eh = context.error_handler; + eh.StartDisallowedType(); + + if (type_ & (1 << kNullSchemaType)) eh.AddExpectedType(GetNullString()); + if (type_ & (1 << kBooleanSchemaType)) eh.AddExpectedType(GetBooleanString()); + if (type_ & (1 << kObjectSchemaType)) eh.AddExpectedType(GetObjectString()); + if (type_ & (1 << kArraySchemaType)) eh.AddExpectedType(GetArrayString()); + if (type_ & (1 << kStringSchemaType)) eh.AddExpectedType(GetStringString()); + + if (type_ & (1 << kNumberSchemaType)) eh.AddExpectedType(GetNumberString()); + else if (type_ & (1 << kIntegerSchemaType)) eh.AddExpectedType(GetIntegerString()); + + eh.EndDisallowedType(actualType); + } + struct Property { Property() : schema(), dependenciesSchema(), dependenciesValidatorIndex(), dependencies(), required(false) {} ~Property() { AllocatorType::Free(dependencies); } @@ -1218,6 +1395,9 @@ private: }; AllocatorType* allocator_; + SValue uri_; + PointerType pointer_; + const SchemaType* typeless_; uint64_t* enum_; SizeType enumCount_; SchemaArray allOf_; @@ -1258,6 +1438,8 @@ private: SValue multipleOf_; bool exclusiveMinimum_; bool exclusiveMaximum_; + + SizeType defaultValueLength_; }; template @@ -1267,7 +1449,7 @@ struct TokenHelper { char buffer[21]; size_t length = static_cast((sizeof(SizeType) == 4 ? u32toa(index, buffer) : u64toa(index, buffer)) - buffer); for (size_t i = 0; i < length; i++) - *documentStack.template Push() = buffer[i]; + *documentStack.template Push() = static_cast(buffer[i]); } }; @@ -1326,6 +1508,7 @@ public: typedef typename EncodingType::Ch Ch; typedef internal::Schema SchemaType; typedef GenericPointer PointerType; + typedef GenericValue URIType; friend class internal::Schema; template friend class GenericSchemaValidator; @@ -1335,19 +1518,29 @@ public: Compile a JSON document into schema document. \param document A JSON document as source. + \param uri The base URI of this schema document for purposes of violation reporting. + \param uriLength Length of \c name, in code points. \param remoteProvider An optional remote schema document provider for resolving remote reference. Can be null. \param allocator An optional allocator instance for allocating memory. Can be null. */ - explicit GenericSchemaDocument(const ValueType& document, IRemoteSchemaDocumentProviderType* remoteProvider = 0, Allocator* allocator = 0) : + explicit GenericSchemaDocument(const ValueType& document, const Ch* uri = 0, SizeType uriLength = 0, + IRemoteSchemaDocumentProviderType* remoteProvider = 0, Allocator* allocator = 0) : remoteProvider_(remoteProvider), allocator_(allocator), ownAllocator_(), root_(), + typeless_(), schemaMap_(allocator, kInitialSchemaMapSize), schemaRef_(allocator, kInitialSchemaRefSize) { if (!allocator_) - ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)(); + + Ch noUri[1] = {0}; + uri_.SetString(uri ? uri : noUri, uriLength, *allocator_); + + typeless_ = static_cast(allocator_->Malloc(sizeof(SchemaType))); + new (typeless_) SchemaType(this, PointerType(), ValueType(kObjectType).Move(), ValueType(kObjectType).Move(), allocator_); // Generate root schema, it will call CreateSchema() to create sub-schemas, // And call AddRefSchema() if there are $ref. @@ -1365,6 +1558,9 @@ public: new (schemaMap_.template Push()) SchemaEntry(refEntry->source, const_cast(s), false, allocator_); } } + else if (refEntry->schema) + *refEntry->schema = typeless_; + refEntry->~SchemaRefEntry(); } @@ -1380,12 +1576,15 @@ public: allocator_(rhs.allocator_), ownAllocator_(rhs.ownAllocator_), root_(rhs.root_), + typeless_(rhs.typeless_), schemaMap_(std::move(rhs.schemaMap_)), - schemaRef_(std::move(rhs.schemaRef_)) + schemaRef_(std::move(rhs.schemaRef_)), + uri_(std::move(rhs.uri_)) { rhs.remoteProvider_ = 0; rhs.allocator_ = 0; rhs.ownAllocator_ = 0; + rhs.typeless_ = 0; } #endif @@ -1394,9 +1593,16 @@ public: while (!schemaMap_.Empty()) schemaMap_.template Pop(1)->~SchemaEntry(); + if (typeless_) { + typeless_->~SchemaType(); + Allocator::Free(typeless_); + } + RAPIDJSON_DELETE(ownAllocator_); } + const URIType& GetURI() const { return uri_; } + //! Get the root schema. const SchemaType& GetRoot() const { return *root_; } @@ -1428,7 +1634,7 @@ private: void CreateSchemaRecursive(const SchemaType** schema, const PointerType& pointer, const ValueType& v, const ValueType& document) { if (schema) - *schema = SchemaType::GetTypeless(); + *schema = typeless_; if (v.GetType() == kObjectType) { const SchemaType* s = GetSchema(pointer); @@ -1473,12 +1679,13 @@ private: if (i > 0) { // Remote reference, resolve immediately if (remoteProvider_) { - if (const GenericSchemaDocument* remoteDocument = remoteProvider_->GetRemoteDocument(s, i - 1)) { + if (const GenericSchemaDocument* remoteDocument = remoteProvider_->GetRemoteDocument(s, i)) { PointerType pointer(&s[i], len - i, allocator_); if (pointer.IsValid()) { if (const SchemaType* sc = remoteDocument->GetSchema(pointer)) { if (schema) *schema = sc; + new (schemaMap_.template Push()) SchemaEntry(source, const_cast(sc), false, allocator_); return true; } } @@ -1515,6 +1722,8 @@ private: return PointerType(); } + const SchemaType* GetTypeless() const { return typeless_; } + static const size_t kInitialSchemaMapSize = 64; static const size_t kInitialSchemaRefSize = 64; @@ -1522,8 +1731,10 @@ private: Allocator *allocator_; Allocator *ownAllocator_; const SchemaType* root_; //!< Root schema. + SchemaType* typeless_; internal::Stack schemaMap_; // Stores created Pointer -> Schemas internal::Stack schemaRef_; // Stores Pointer from $ref and schema which holds the $ref + URIType uri_; }; //! GenericSchemaDocument using Value type. @@ -1552,13 +1763,17 @@ template < typename StateAllocator = CrtAllocator> class GenericSchemaValidator : public internal::ISchemaStateFactory, - public internal::ISchemaValidator + public internal::ISchemaValidator, + public internal::IValidationErrorHandler { public: typedef typename SchemaDocumentType::SchemaType SchemaType; typedef typename SchemaDocumentType::PointerType PointerType; typedef typename SchemaType::EncodingType EncodingType; + typedef typename SchemaType::SValue SValue; typedef typename EncodingType::Ch Ch; + typedef GenericStringRef StringRefType; + typedef GenericValue ValueType; //! Constructor without output handler. /*! @@ -1575,11 +1790,14 @@ public: : schemaDocument_(&schemaDocument), root_(schemaDocument.GetRoot()), - outputHandler_(GetNullHandler()), stateAllocator_(allocator), ownStateAllocator_(0), schemaStack_(allocator, schemaStackCapacity), documentStack_(allocator, documentStackCapacity), + outputHandler_(0), + error_(kObjectType), + currentError_(), + missingDependents_(), valid_(true) #if RAPIDJSON_SCHEMA_VERBOSE , depth_(0) @@ -1603,11 +1821,14 @@ public: : schemaDocument_(&schemaDocument), root_(schemaDocument.GetRoot()), - outputHandler_(outputHandler), stateAllocator_(allocator), ownStateAllocator_(0), schemaStack_(allocator, schemaStackCapacity), documentStack_(allocator, documentStackCapacity), + outputHandler_(&outputHandler), + error_(kObjectType), + currentError_(), + missingDependents_(), valid_(true) #if RAPIDJSON_SCHEMA_VERBOSE , depth_(0) @@ -1626,6 +1847,9 @@ public: while (!schemaStack_.Empty()) PopSchema(); documentStack_.Clear(); + error_.SetObject(); + currentError_.SetNull(); + missingDependents_.SetNull(); valid_ = true; } @@ -1633,9 +1857,13 @@ public: // Implementation of ISchemaValidator virtual bool IsValid() const { return valid_; } + //! Gets the error object. + ValueType& GetError() { return error_; } + const ValueType& GetError() const { return error_; } + //! Gets the JSON pointer pointed to the invalid schema. PointerType GetInvalidSchemaPointer() const { - return schemaStack_.Empty() ? PointerType() : schemaDocument_->GetPointer(&CurrentSchema()); + return schemaStack_.Empty() ? PointerType() : CurrentSchema().GetPointer(); } //! Gets the keyword of invalid schema. @@ -1645,9 +1873,196 @@ public: //! Gets the JSON pointer pointed to the invalid value. PointerType GetInvalidDocumentPointer() const { - return documentStack_.Empty() ? PointerType() : PointerType(documentStack_.template Bottom(), documentStack_.GetSize() / sizeof(Ch)); + if (documentStack_.Empty()) { + return PointerType(); + } + else { + return PointerType(documentStack_.template Bottom(), documentStack_.GetSize() / sizeof(Ch)); + } } + void NotMultipleOf(int64_t actual, const SValue& expected) { + AddNumberError(SchemaType::GetMultipleOfString(), ValueType(actual).Move(), expected); + } + void NotMultipleOf(uint64_t actual, const SValue& expected) { + AddNumberError(SchemaType::GetMultipleOfString(), ValueType(actual).Move(), expected); + } + void NotMultipleOf(double actual, const SValue& expected) { + AddNumberError(SchemaType::GetMultipleOfString(), ValueType(actual).Move(), expected); + } + void AboveMaximum(int64_t actual, const SValue& expected, bool exclusive) { + AddNumberError(SchemaType::GetMaximumString(), ValueType(actual).Move(), expected, + exclusive ? &SchemaType::GetExclusiveMaximumString : 0); + } + void AboveMaximum(uint64_t actual, const SValue& expected, bool exclusive) { + AddNumberError(SchemaType::GetMaximumString(), ValueType(actual).Move(), expected, + exclusive ? &SchemaType::GetExclusiveMaximumString : 0); + } + void AboveMaximum(double actual, const SValue& expected, bool exclusive) { + AddNumberError(SchemaType::GetMaximumString(), ValueType(actual).Move(), expected, + exclusive ? &SchemaType::GetExclusiveMaximumString : 0); + } + void BelowMinimum(int64_t actual, const SValue& expected, bool exclusive) { + AddNumberError(SchemaType::GetMinimumString(), ValueType(actual).Move(), expected, + exclusive ? &SchemaType::GetExclusiveMinimumString : 0); + } + void BelowMinimum(uint64_t actual, const SValue& expected, bool exclusive) { + AddNumberError(SchemaType::GetMinimumString(), ValueType(actual).Move(), expected, + exclusive ? &SchemaType::GetExclusiveMinimumString : 0); + } + void BelowMinimum(double actual, const SValue& expected, bool exclusive) { + AddNumberError(SchemaType::GetMinimumString(), ValueType(actual).Move(), expected, + exclusive ? &SchemaType::GetExclusiveMinimumString : 0); + } + + void TooLong(const Ch* str, SizeType length, SizeType expected) { + AddNumberError(SchemaType::GetMaxLengthString(), + ValueType(str, length, GetStateAllocator()).Move(), SValue(expected).Move()); + } + void TooShort(const Ch* str, SizeType length, SizeType expected) { + AddNumberError(SchemaType::GetMinLengthString(), + ValueType(str, length, GetStateAllocator()).Move(), SValue(expected).Move()); + } + void DoesNotMatch(const Ch* str, SizeType length) { + currentError_.SetObject(); + currentError_.AddMember(GetActualString(), ValueType(str, length, GetStateAllocator()).Move(), GetStateAllocator()); + AddCurrentError(SchemaType::GetPatternString()); + } + + void DisallowedItem(SizeType index) { + currentError_.SetObject(); + currentError_.AddMember(GetDisallowedString(), ValueType(index).Move(), GetStateAllocator()); + AddCurrentError(SchemaType::GetAdditionalItemsString(), true); + } + void TooFewItems(SizeType actualCount, SizeType expectedCount) { + AddNumberError(SchemaType::GetMinItemsString(), + ValueType(actualCount).Move(), SValue(expectedCount).Move()); + } + void TooManyItems(SizeType actualCount, SizeType expectedCount) { + AddNumberError(SchemaType::GetMaxItemsString(), + ValueType(actualCount).Move(), SValue(expectedCount).Move()); + } + void DuplicateItems(SizeType index1, SizeType index2) { + ValueType duplicates(kArrayType); + duplicates.PushBack(index1, GetStateAllocator()); + duplicates.PushBack(index2, GetStateAllocator()); + currentError_.SetObject(); + currentError_.AddMember(GetDuplicatesString(), duplicates, GetStateAllocator()); + AddCurrentError(SchemaType::GetUniqueItemsString(), true); + } + + void TooManyProperties(SizeType actualCount, SizeType expectedCount) { + AddNumberError(SchemaType::GetMaxPropertiesString(), + ValueType(actualCount).Move(), SValue(expectedCount).Move()); + } + void TooFewProperties(SizeType actualCount, SizeType expectedCount) { + AddNumberError(SchemaType::GetMinPropertiesString(), + ValueType(actualCount).Move(), SValue(expectedCount).Move()); + } + void StartMissingProperties() { + currentError_.SetArray(); + } + void AddMissingProperty(const SValue& name) { + currentError_.PushBack(ValueType(name, GetStateAllocator()).Move(), GetStateAllocator()); + } + bool EndMissingProperties() { + if (currentError_.Empty()) + return false; + ValueType error(kObjectType); + error.AddMember(GetMissingString(), currentError_, GetStateAllocator()); + currentError_ = error; + AddCurrentError(SchemaType::GetRequiredString()); + return true; + } + void PropertyViolations(ISchemaValidator** subvalidators, SizeType count) { + for (SizeType i = 0; i < count; ++i) + MergeError(static_cast(subvalidators[i])->GetError()); + } + void DisallowedProperty(const Ch* name, SizeType length) { + currentError_.SetObject(); + currentError_.AddMember(GetDisallowedString(), ValueType(name, length, GetStateAllocator()).Move(), GetStateAllocator()); + AddCurrentError(SchemaType::GetAdditionalPropertiesString(), true); + } + + void StartDependencyErrors() { + currentError_.SetObject(); + } + void StartMissingDependentProperties() { + missingDependents_.SetArray(); + } + void AddMissingDependentProperty(const SValue& targetName) { + missingDependents_.PushBack(ValueType(targetName, GetStateAllocator()).Move(), GetStateAllocator()); + } + void EndMissingDependentProperties(const SValue& sourceName) { + if (!missingDependents_.Empty()) + currentError_.AddMember(ValueType(sourceName, GetStateAllocator()).Move(), + missingDependents_, GetStateAllocator()); + } + void AddDependencySchemaError(const SValue& sourceName, ISchemaValidator* subvalidator) { + currentError_.AddMember(ValueType(sourceName, GetStateAllocator()).Move(), + static_cast(subvalidator)->GetError(), GetStateAllocator()); + } + bool EndDependencyErrors() { + if (currentError_.ObjectEmpty()) + return false; + ValueType error(kObjectType); + error.AddMember(GetErrorsString(), currentError_, GetStateAllocator()); + currentError_ = error; + AddCurrentError(SchemaType::GetDependenciesString()); + return true; + } + + void DisallowedValue() { + currentError_.SetObject(); + AddCurrentError(SchemaType::GetEnumString()); + } + void StartDisallowedType() { + currentError_.SetArray(); + } + void AddExpectedType(const typename SchemaType::ValueType& expectedType) { + currentError_.PushBack(ValueType(expectedType, GetStateAllocator()).Move(), GetStateAllocator()); + } + void EndDisallowedType(const typename SchemaType::ValueType& actualType) { + ValueType error(kObjectType); + error.AddMember(GetExpectedString(), currentError_, GetStateAllocator()); + error.AddMember(GetActualString(), ValueType(actualType, GetStateAllocator()).Move(), GetStateAllocator()); + currentError_ = error; + AddCurrentError(SchemaType::GetTypeString()); + } + void NotAllOf(ISchemaValidator** subvalidators, SizeType count) { + for (SizeType i = 0; i < count; ++i) { + MergeError(static_cast(subvalidators[i])->GetError()); + } + } + void NoneOf(ISchemaValidator** subvalidators, SizeType count) { + AddErrorArray(SchemaType::GetAnyOfString(), subvalidators, count); + } + void NotOneOf(ISchemaValidator** subvalidators, SizeType count) { + AddErrorArray(SchemaType::GetOneOfString(), subvalidators, count); + } + void Disallowed() { + currentError_.SetObject(); + AddCurrentError(SchemaType::GetNotString()); + } + +#define RAPIDJSON_STRING_(name, ...) \ + static const StringRefType& Get##name##String() {\ + static const Ch s[] = { __VA_ARGS__, '\0' };\ + static const StringRefType v(s, static_cast(sizeof(s) / sizeof(Ch) - 1)); \ + return v;\ + } + + RAPIDJSON_STRING_(InstanceRef, 'i', 'n', 's', 't', 'a', 'n', 'c', 'e', 'R', 'e', 'f') + RAPIDJSON_STRING_(SchemaRef, 's', 'c', 'h', 'e', 'm', 'a', 'R', 'e', 'f') + RAPIDJSON_STRING_(Expected, 'e', 'x', 'p', 'e', 'c', 't', 'e', 'd') + RAPIDJSON_STRING_(Actual, 'a', 'c', 't', 'u', 'a', 'l') + RAPIDJSON_STRING_(Disallowed, 'd', 'i', 's', 'a', 'l', 'l', 'o', 'w', 'e', 'd') + RAPIDJSON_STRING_(Missing, 'm', 'i', 's', 's', 'i', 'n', 'g') + RAPIDJSON_STRING_(Errors, 'e', 'r', 'r', 'o', 'r', 's') + RAPIDJSON_STRING_(Duplicates, 'd', 'u', 'p', 'l', 'i', 'c', 'a', 't', 'e', 's') + +#undef RAPIDJSON_STRING_ + #if RAPIDJSON_SCHEMA_VERBOSE #define RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_() \ RAPIDJSON_MULTILINEMACRO_BEGIN\ @@ -1679,14 +2094,14 @@ RAPIDJSON_MULTILINEMACRO_END } #define RAPIDJSON_SCHEMA_HANDLE_END_(method, arg2)\ - return valid_ = EndValue() && outputHandler_.method arg2 + return valid_ = EndValue() && (!outputHandler_ || outputHandler_->method arg2) #define RAPIDJSON_SCHEMA_HANDLE_VALUE_(method, arg1, arg2) \ RAPIDJSON_SCHEMA_HANDLE_BEGIN_ (method, arg1);\ RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(method, arg2);\ RAPIDJSON_SCHEMA_HANDLE_END_ (method, arg2) - bool Null() { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Null, (CurrentContext() ), ( )); } + bool Null() { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Null, (CurrentContext()), ( )); } bool Bool(bool b) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Bool, (CurrentContext(), b), (b)); } bool Int(int i) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Int, (CurrentContext(), i), (i)); } bool Uint(unsigned u) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Uint, (CurrentContext(), u), (u)); } @@ -1701,7 +2116,7 @@ RAPIDJSON_MULTILINEMACRO_END bool StartObject() { RAPIDJSON_SCHEMA_HANDLE_BEGIN_(StartObject, (CurrentContext())); RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(StartObject, ()); - return valid_ = outputHandler_.StartObject(); + return valid_ = !outputHandler_ || outputHandler_->StartObject(); } bool Key(const Ch* str, SizeType len, bool copy) { @@ -1709,7 +2124,7 @@ RAPIDJSON_MULTILINEMACRO_END AppendToken(str, len); if (!CurrentSchema().Key(CurrentContext(), str, len, copy)) return valid_ = false; RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(Key, (str, len, copy)); - return valid_ = outputHandler_.Key(str, len, copy); + return valid_ = !outputHandler_ || outputHandler_->Key(str, len, copy); } bool EndObject(SizeType memberCount) { @@ -1722,7 +2137,7 @@ RAPIDJSON_MULTILINEMACRO_END bool StartArray() { RAPIDJSON_SCHEMA_HANDLE_BEGIN_(StartArray, (CurrentContext())); RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(StartArray, ()); - return valid_ = outputHandler_.StartArray(); + return valid_ = !outputHandler_ || outputHandler_->StartArray(); } bool EndArray(SizeType elementCount) { @@ -1739,7 +2154,7 @@ RAPIDJSON_MULTILINEMACRO_END // Implementation of ISchemaStateFactory virtual ISchemaValidator* CreateSchemaValidator(const SchemaType& root) { - return new (GetStateAllocator().Malloc(sizeof(GenericSchemaValidator))) GenericSchemaValidator(*schemaDocument_, root, + return new (GetStateAllocator().Malloc(sizeof(GenericSchemaValidator))) GenericSchemaValidator(*schemaDocument_, root, documentStack_.template Bottom(), documentStack_.GetSize(), #if RAPIDJSON_SCHEMA_VERBOSE depth_ + 1, #endif @@ -1771,7 +2186,7 @@ RAPIDJSON_MULTILINEMACRO_END } virtual void FreeState(void* p) { - return StateAllocator::Free(p); + StateAllocator::Free(p); } private: @@ -1782,6 +2197,7 @@ private: GenericSchemaValidator( const SchemaDocumentType& schemaDocument, const SchemaType& root, + const char* basePath, size_t basePathSize, #if RAPIDJSON_SCHEMA_VERBOSE unsigned depth, #endif @@ -1791,21 +2207,26 @@ private: : schemaDocument_(&schemaDocument), root_(root), - outputHandler_(GetNullHandler()), stateAllocator_(allocator), ownStateAllocator_(0), schemaStack_(allocator, schemaStackCapacity), documentStack_(allocator, documentStackCapacity), + outputHandler_(0), + error_(kObjectType), + currentError_(), + missingDependents_(), valid_(true) #if RAPIDJSON_SCHEMA_VERBOSE , depth_(depth) #endif { + if (basePath && basePathSize) + memcpy(documentStack_.template Push(basePathSize), basePath, basePathSize); } StateAllocator& GetStateAllocator() { if (!stateAllocator_) - stateAllocator_ = ownStateAllocator_ = RAPIDJSON_NEW(StateAllocator()); + stateAllocator_ = ownStateAllocator_ = RAPIDJSON_NEW(StateAllocator)(); return *stateAllocator_; } @@ -1823,8 +2244,8 @@ private: const SchemaType** sa = CurrentContext().patternPropertiesSchemas; typename Context::PatternValidatorType patternValidatorType = CurrentContext().valuePatternValidatorType; bool valueUniqueness = CurrentContext().valueUniqueness; - if (CurrentContext().valueSchema) - PushSchema(*CurrentContext().valueSchema); + RAPIDJSON_ASSERT(CurrentContext().valueSchema); + PushSchema(*CurrentContext().valueSchema); if (count > 0) { CurrentContext().objectPatternValidatorType = patternValidatorType; @@ -1864,8 +2285,10 @@ private: if (!a) CurrentContext().arrayElementHashCodes = a = new (GetStateAllocator().Malloc(sizeof(HashCodeArray))) HashCodeArray(kArrayType); for (typename HashCodeArray::ConstValueIterator itr = a->Begin(); itr != a->End(); ++itr) - if (itr->GetUint64() == h) + if (itr->GetUint64() == h) { + DuplicateItems(static_cast(itr - a->Begin()), a->Size()); RAPIDJSON_INVALID_KEYWORD_RETURN(SchemaType::GetUniqueItemsString()); + } a->PushBack(h, GetStateAllocator()); } } @@ -1894,7 +2317,7 @@ private: } } - RAPIDJSON_FORCEINLINE void PushSchema(const SchemaType& schema) { new (schemaStack_.template Push()) Context(*this, &schema); } + RAPIDJSON_FORCEINLINE void PushSchema(const SchemaType& schema) { new (schemaStack_.template Push()) Context(*this, *this, &schema); } RAPIDJSON_FORCEINLINE void PopSchema() { Context* c = schemaStack_.template Pop(1); @@ -1905,24 +2328,86 @@ private: c->~Context(); } + void AddErrorLocation(ValueType& result, bool parent) { + GenericStringBuffer sb; + PointerType instancePointer = GetInvalidDocumentPointer(); + ((parent && instancePointer.GetTokenCount() > 0) + ? PointerType(instancePointer.GetTokens(), instancePointer.GetTokenCount() - 1) + : instancePointer).StringifyUriFragment(sb); + ValueType instanceRef(sb.GetString(), static_cast(sb.GetSize() / sizeof(Ch)), + GetStateAllocator()); + result.AddMember(GetInstanceRefString(), instanceRef, GetStateAllocator()); + sb.Clear(); + memcpy(sb.Push(CurrentSchema().GetURI().GetStringLength()), + CurrentSchema().GetURI().GetString(), + CurrentSchema().GetURI().GetStringLength() * sizeof(Ch)); + GetInvalidSchemaPointer().StringifyUriFragment(sb); + ValueType schemaRef(sb.GetString(), static_cast(sb.GetSize() / sizeof(Ch)), + GetStateAllocator()); + result.AddMember(GetSchemaRefString(), schemaRef, GetStateAllocator()); + } + + void AddError(ValueType& keyword, ValueType& error) { + typename ValueType::MemberIterator member = error_.FindMember(keyword); + if (member == error_.MemberEnd()) + error_.AddMember(keyword, error, GetStateAllocator()); + else { + if (member->value.IsObject()) { + ValueType errors(kArrayType); + errors.PushBack(member->value, GetStateAllocator()); + member->value = errors; + } + member->value.PushBack(error, GetStateAllocator()); + } + } + + void AddCurrentError(const typename SchemaType::ValueType& keyword, bool parent = false) { + AddErrorLocation(currentError_, parent); + AddError(ValueType(keyword, GetStateAllocator(), false).Move(), currentError_); + } + + void MergeError(ValueType& other) { + for (typename ValueType::MemberIterator it = other.MemberBegin(), end = other.MemberEnd(); it != end; ++it) { + AddError(it->name, it->value); + } + } + + void AddNumberError(const typename SchemaType::ValueType& keyword, ValueType& actual, const SValue& expected, + const typename SchemaType::ValueType& (*exclusive)() = 0) { + currentError_.SetObject(); + currentError_.AddMember(GetActualString(), actual, GetStateAllocator()); + currentError_.AddMember(GetExpectedString(), ValueType(expected, GetStateAllocator()).Move(), GetStateAllocator()); + if (exclusive) + currentError_.AddMember(ValueType(exclusive(), GetStateAllocator()).Move(), true, GetStateAllocator()); + AddCurrentError(keyword); + } + + void AddErrorArray(const typename SchemaType::ValueType& keyword, + ISchemaValidator** subvalidators, SizeType count) { + ValueType errors(kArrayType); + for (SizeType i = 0; i < count; ++i) + errors.PushBack(static_cast(subvalidators[i])->GetError(), GetStateAllocator()); + currentError_.SetObject(); + currentError_.AddMember(GetErrorsString(), errors, GetStateAllocator()); + AddCurrentError(keyword); + } + const SchemaType& CurrentSchema() const { return *schemaStack_.template Top()->schema; } Context& CurrentContext() { return *schemaStack_.template Top(); } const Context& CurrentContext() const { return *schemaStack_.template Top(); } - static OutputHandler& GetNullHandler() { - static OutputHandler nullHandler; - return nullHandler; - } - static const size_t kDefaultSchemaStackCapacity = 1024; static const size_t kDefaultDocumentStackCapacity = 256; const SchemaDocumentType* schemaDocument_; const SchemaType& root_; - OutputHandler& outputHandler_; StateAllocator* stateAllocator_; StateAllocator* ownStateAllocator_; internal::Stack schemaStack_; //!< stack to store the current path of schema (BaseSchemaType *) internal::Stack documentStack_; //!< stack to store the current path of validating document (Ch) + OutputHandler* outputHandler_; + ValueType error_; + ValueType currentError_; + ValueType missingDependents_; bool valid_; #if RAPIDJSON_SCHEMA_VERBOSE unsigned depth_; @@ -1954,13 +2439,14 @@ class SchemaValidatingReader { public: typedef typename SchemaDocumentType::PointerType PointerType; typedef typename InputStream::Ch Ch; + typedef GenericValue ValueType; //! Constructor /*! \param is Input stream. \param sd Schema document. */ - SchemaValidatingReader(InputStream& is, const SchemaDocumentType& sd) : is_(is), sd_(sd), invalidSchemaKeyword_(), isValid_(true) {} + SchemaValidatingReader(InputStream& is, const SchemaDocumentType& sd) : is_(is), sd_(sd), invalidSchemaKeyword_(), error_(kObjectType), isValid_(true) {} template bool operator()(Handler& handler) { @@ -1973,11 +2459,13 @@ public: invalidSchemaPointer_ = PointerType(); invalidSchemaKeyword_ = 0; invalidDocumentPointer_ = PointerType(); + error_.SetObject(); } else { invalidSchemaPointer_ = validator.GetInvalidSchemaPointer(); invalidSchemaKeyword_ = validator.GetInvalidSchemaKeyword(); invalidDocumentPointer_ = validator.GetInvalidDocumentPointer(); + error_.CopyFrom(validator.GetError(), allocator_); } return parseResult_; @@ -1988,6 +2476,7 @@ public: const PointerType& GetInvalidSchemaPointer() const { return invalidSchemaPointer_; } const Ch* GetInvalidSchemaKeyword() const { return invalidSchemaKeyword_; } const PointerType& GetInvalidDocumentPointer() const { return invalidDocumentPointer_; } + const ValueType& GetError() const { return error_; } private: InputStream& is_; @@ -1997,6 +2486,8 @@ private: PointerType invalidSchemaPointer_; const Ch* invalidSchemaKeyword_; PointerType invalidDocumentPointer_; + StackAllocator allocator_; + ValueType error_; bool isValid_; }; diff --git a/src/3rdparty/rapidjson/stream.h b/src/3rdparty/rapidjson/stream.h index fef82c25..7f2643e4 100644 --- a/src/3rdparty/rapidjson/stream.h +++ b/src/3rdparty/rapidjson/stream.h @@ -1,5 +1,5 @@ // Tencent is pleased to support the open source community by making RapidJSON available. -// +// // Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. // // Licensed under the MIT License (the "License"); you may not use this file except @@ -7,9 +7,9 @@ // // http://opensource.org/licenses/MIT // -// Unless required by applicable law or agreed to in writing, software distributed -// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "rapidjson.h" @@ -100,6 +100,50 @@ inline void PutN(Stream& stream, Ch c, size_t n) { PutUnsafe(stream, c); } +/////////////////////////////////////////////////////////////////////////////// +// GenericStreamWrapper + +//! A Stream Wrapper +/*! \tThis string stream is a wrapper for any stream by just forwarding any + \treceived message to the origin stream. + \note implements Stream concept +*/ + +#if defined(_MSC_VER) && _MSC_VER <= 1800 +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4702) // unreachable code +RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated +#endif + +template > +class GenericStreamWrapper { +public: + typedef typename Encoding::Ch Ch; + GenericStreamWrapper(InputStream& is): is_(is) {} + + Ch Peek() const { return is_.Peek(); } + Ch Take() { return is_.Take(); } + size_t Tell() { return is_.Tell(); } + Ch* PutBegin() { return is_.PutBegin(); } + void Put(Ch ch) { is_.Put(ch); } + void Flush() { is_.Flush(); } + size_t PutEnd(Ch* ch) { return is_.PutEnd(ch); } + + // wrapper for MemoryStream + const Ch* Peek4() const { return is_.Peek4(); } + + // wrapper for AutoUTFInputStream + UTFType GetType() const { return is_.GetType(); } + bool HasBOM() const { return is_.HasBOM(); } + +protected: + InputStream& is_; +}; + +#if defined(_MSC_VER) && _MSC_VER <= 1800 +RAPIDJSON_DIAG_POP +#endif + /////////////////////////////////////////////////////////////////////////////// // StringStream diff --git a/src/3rdparty/rapidjson/stringbuffer.h b/src/3rdparty/rapidjson/stringbuffer.h index 78f34d20..4e38b82c 100644 --- a/src/3rdparty/rapidjson/stringbuffer.h +++ b/src/3rdparty/rapidjson/stringbuffer.h @@ -78,8 +78,12 @@ public: return stack_.template Bottom(); } + //! Get the size of string in bytes in the string buffer. size_t GetSize() const { return stack_.GetSize(); } + //! Get the length of string in Ch in the string buffer. + size_t GetLength() const { return stack_.GetSize() / sizeof(Ch); } + static const size_t kDefaultCapacity = 256; mutable internal::Stack stack_; diff --git a/src/3rdparty/rapidjson/writer.h b/src/3rdparty/rapidjson/writer.h index 94f22dd5..6f5b6903 100644 --- a/src/3rdparty/rapidjson/writer.h +++ b/src/3rdparty/rapidjson/writer.h @@ -16,6 +16,7 @@ #define RAPIDJSON_WRITER_H_ #include "stream.h" +#include "internal/meta.h" #include "internal/stack.h" #include "internal/strfunc.h" #include "internal/dtoa.h" @@ -31,17 +32,18 @@ #include #elif defined(RAPIDJSON_SSE2) #include -#endif - -#ifdef _MSC_VER -RAPIDJSON_DIAG_PUSH -RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant +#elif defined(RAPIDJSON_NEON) +#include #endif #ifdef __clang__ RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_OFF(padded) RAPIDJSON_DIAG_OFF(unreachable-code) +RAPIDJSON_DIAG_OFF(c++98-compat) +#elif defined(_MSC_VER) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant #endif RAPIDJSON_NAMESPACE_BEGIN @@ -103,6 +105,13 @@ public: Writer(StackAllocator* allocator = 0, size_t levelDepth = kDefaultLevelDepth) : os_(0), level_stack_(allocator, levelDepth * sizeof(Level)), maxDecimalPlaces_(kDefaultMaxDecimalPlaces), hasRoot_(false) {} +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + Writer(Writer&& rhs) : + os_(rhs.os_), level_stack_(std::move(rhs.level_stack_)), maxDecimalPlaces_(rhs.maxDecimalPlaces_), hasRoot_(rhs.hasRoot_) { + rhs.os_ = 0; + } +#endif + //! Reset the writer with a new stream. /*! This function reset the writer with a new stream and default settings, @@ -184,12 +193,14 @@ public: bool Double(double d) { Prefix(kNumberType); return EndValue(WriteDouble(d)); } bool RawNumber(const Ch* str, SizeType length, bool copy = false) { + RAPIDJSON_ASSERT(str != 0); (void)copy; Prefix(kNumberType); return EndValue(WriteString(str, length)); } bool String(const Ch* str, SizeType length, bool copy = false) { + RAPIDJSON_ASSERT(str != 0); (void)copy; Prefix(kStringType); return EndValue(WriteString(str, length)); @@ -209,10 +220,18 @@ public: bool Key(const Ch* str, SizeType length, bool copy = false) { return String(str, length, copy); } +#if RAPIDJSON_HAS_STDSTRING + bool Key(const std::basic_string& str) + { + return Key(str.data(), SizeType(str.size())); + } +#endif + bool EndObject(SizeType memberCount = 0) { (void)memberCount; - RAPIDJSON_ASSERT(level_stack_.GetSize() >= sizeof(Level)); - RAPIDJSON_ASSERT(!level_stack_.template Top()->inArray); + RAPIDJSON_ASSERT(level_stack_.GetSize() >= sizeof(Level)); // not inside an Object + RAPIDJSON_ASSERT(!level_stack_.template Top()->inArray); // currently inside an Array, not Object + RAPIDJSON_ASSERT(0 == level_stack_.template Top()->valueCount % 2); // Object has a Key without a Value level_stack_.template Pop(1); return EndValue(WriteEndObject()); } @@ -236,9 +255,9 @@ public: //@{ //! Simpler but slower overload. - bool String(const Ch* str) { return String(str, internal::StrLen(str)); } - bool Key(const Ch* str) { return Key(str, internal::StrLen(str)); } - + bool String(const Ch* const& str) { return String(str, internal::StrLen(str)); } + bool Key(const Ch* const& str) { return Key(str, internal::StrLen(str)); } + //@} //! Write a raw JSON value. @@ -249,7 +268,19 @@ public: \param length Length of the json. \param type Type of the root of json. */ - bool RawValue(const Ch* json, size_t length, Type type) { Prefix(type); return EndValue(WriteRawValue(json, length)); } + bool RawValue(const Ch* json, size_t length, Type type) { + RAPIDJSON_ASSERT(json != 0); + Prefix(type); + return EndValue(WriteRawValue(json, length)); + } + + //! Flush the output stream. + /*! + Allows the user to flush the output stream immediately. + */ + void Flush() { + os_->Flush(); + } protected: //! Information for each nested level @@ -283,7 +314,7 @@ protected: const char* end = internal::i32toa(i, buffer); PutReserve(*os_, static_cast(end - buffer)); for (const char* p = buffer; p != end; ++p) - PutUnsafe(*os_, static_cast(*p)); + PutUnsafe(*os_, static_cast(*p)); return true; } @@ -292,7 +323,7 @@ protected: const char* end = internal::u32toa(u, buffer); PutReserve(*os_, static_cast(end - buffer)); for (const char* p = buffer; p != end; ++p) - PutUnsafe(*os_, static_cast(*p)); + PutUnsafe(*os_, static_cast(*p)); return true; } @@ -301,7 +332,7 @@ protected: const char* end = internal::i64toa(i64, buffer); PutReserve(*os_, static_cast(end - buffer)); for (const char* p = buffer; p != end; ++p) - PutUnsafe(*os_, static_cast(*p)); + PutUnsafe(*os_, static_cast(*p)); return true; } @@ -310,7 +341,7 @@ protected: char* end = internal::u64toa(u64, buffer); PutReserve(*os_, static_cast(end - buffer)); for (char* p = buffer; p != end; ++p) - PutUnsafe(*os_, static_cast(*p)); + PutUnsafe(*os_, static_cast(*p)); return true; } @@ -338,12 +369,12 @@ protected: char* end = internal::dtoa(d, buffer, maxDecimalPlaces_); PutReserve(*os_, static_cast(end - buffer)); for (char* p = buffer; p != end; ++p) - PutUnsafe(*os_, static_cast(*p)); + PutUnsafe(*os_, static_cast(*p)); return true; } bool WriteString(const Ch* str, SizeType length) { - static const typename TargetEncoding::Ch hexDigits[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; + static const typename OutputStream::Ch hexDigits[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; static const char escape[256] = { #define Z16 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 //0 1 2 3 4 5 6 7 8 9 A B C D E F @@ -399,7 +430,7 @@ protected: else if ((sizeof(Ch) == 1 || static_cast(c) < 256) && RAPIDJSON_UNLIKELY(escape[static_cast(c)])) { is.Take(); PutUnsafe(*os_, '\\'); - PutUnsafe(*os_, static_cast(escape[static_cast(c)])); + PutUnsafe(*os_, static_cast(escape[static_cast(c)])); if (escape[static_cast(c)] == 'u') { PutUnsafe(*os_, '0'); PutUnsafe(*os_, '0'); @@ -427,9 +458,13 @@ protected: bool WriteRawValue(const Ch* json, size_t length) { PutReserve(*os_, length); - for (size_t i = 0; i < length; i++) { - RAPIDJSON_ASSERT(json[i] != '\0'); - PutUnsafe(*os_, json[i]); + GenericStringStream is(json); + while (RAPIDJSON_LIKELY(is.Tell() < length)) { + RAPIDJSON_ASSERT(is.Peek() != '\0'); + if (RAPIDJSON_UNLIKELY(!(writeFlags & kWriteValidateEncodingFlag ? + Transcoder::Validate(is, *os_) : + Transcoder::TranscodeUnsafe(is, *os_)))) + return false; } return true; } @@ -457,7 +492,7 @@ protected: // Flush the value if it is the top level one. bool EndValue(bool ret) { if (RAPIDJSON_UNLIKELY(level_stack_.Empty())) // end of json text - os_->Flush(); + Flush(); return ret; } @@ -561,7 +596,7 @@ inline bool Writer::ScanWriteUnescapedString(StringStream& is, siz // The rest of string using SIMD static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; - static const char space[16] = { 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19 }; + static const char space[16] = { 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F }; const __m128i dq = _mm_loadu_si128(reinterpret_cast(&dquote[0])); const __m128i bs = _mm_loadu_si128(reinterpret_cast(&bslash[0])); const __m128i sp = _mm_loadu_si128(reinterpret_cast(&space[0])); @@ -570,7 +605,7 @@ inline bool Writer::ScanWriteUnescapedString(StringStream& is, siz const __m128i s = _mm_load_si128(reinterpret_cast(p)); const __m128i t1 = _mm_cmpeq_epi8(s, dq); const __m128i t2 = _mm_cmpeq_epi8(s, bs); - const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x19) == 0x19 + const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x1F) == 0x1F const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); unsigned short r = static_cast(_mm_movemask_epi8(x)); if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped @@ -595,15 +630,79 @@ inline bool Writer::ScanWriteUnescapedString(StringStream& is, siz is.src_ = p; return RAPIDJSON_LIKELY(is.Tell() < length); } -#endif // defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) +#elif defined(RAPIDJSON_NEON) +template<> +inline bool Writer::ScanWriteUnescapedString(StringStream& is, size_t length) { + if (length < 16) + return RAPIDJSON_LIKELY(is.Tell() < length); + + if (!RAPIDJSON_LIKELY(is.Tell() < length)) + return false; + + const char* p = is.src_; + const char* end = is.head_ + length; + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + const char* endAligned = reinterpret_cast(reinterpret_cast(end) & static_cast(~15)); + if (nextAligned > end) + return true; + + while (p != nextAligned) + if (*p < 0x20 || *p == '\"' || *p == '\\') { + is.src_ = p; + return RAPIDJSON_LIKELY(is.Tell() < length); + } + else + os_->PutUnsafe(*p++); + + // The rest of string using SIMD + const uint8x16_t s0 = vmovq_n_u8('"'); + const uint8x16_t s1 = vmovq_n_u8('\\'); + const uint8x16_t s2 = vmovq_n_u8('\b'); + const uint8x16_t s3 = vmovq_n_u8(32); + + for (; p != endAligned; p += 16) { + const uint8x16_t s = vld1q_u8(reinterpret_cast(p)); + uint8x16_t x = vceqq_u8(s, s0); + x = vorrq_u8(x, vceqq_u8(s, s1)); + x = vorrq_u8(x, vceqq_u8(s, s2)); + x = vorrq_u8(x, vcltq_u8(s, s3)); + + x = vrev64q_u8(x); // Rev in 64 + uint64_t low = vgetq_lane_u64(reinterpret_cast(x), 0); // extract + uint64_t high = vgetq_lane_u64(reinterpret_cast(x), 1); // extract + + SizeType len = 0; + bool escaped = false; + if (low == 0) { + if (high != 0) { + unsigned lz = (unsigned)__builtin_clzll(high); + len = 8 + (lz >> 3); + escaped = true; + } + } else { + unsigned lz = (unsigned)__builtin_clzll(low); + len = lz >> 3; + escaped = true; + } + if (RAPIDJSON_UNLIKELY(escaped)) { // some of characters is escaped + char* q = reinterpret_cast(os_->PushUnsafe(len)); + for (size_t i = 0; i < len; i++) + q[i] = p[i]; + + p += len; + break; + } + vst1q_u8(reinterpret_cast(os_->PushUnsafe(16)), s); + } + + is.src_ = p; + return RAPIDJSON_LIKELY(is.Tell() < length); +} +#endif // RAPIDJSON_NEON RAPIDJSON_NAMESPACE_END -#ifdef _MSC_VER -RAPIDJSON_DIAG_POP -#endif - -#ifdef __clang__ +#if defined(_MSC_VER) || defined(__clang__) RAPIDJSON_DIAG_POP #endif From a8e86c3530ae6376c7a752dd07c10d6cfe101967 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sun, 4 Aug 2019 22:07:05 +0700 Subject: [PATCH 123/172] More compact JSON formatting. --- src/3rdparty/rapidjson/prettywriter.h | 12 ++++++++---- src/3rdparty/rapidjson/rapidjson.h | 2 +- src/3rdparty/rapidjson/writer.h | 1 + src/base/io/json/Json_unix.cpp | 2 ++ src/base/io/json/Json_win.cpp | 2 ++ src/base/net/http/HttpApiResponse.cpp | 2 ++ 6 files changed, 16 insertions(+), 5 deletions(-) diff --git a/src/3rdparty/rapidjson/prettywriter.h b/src/3rdparty/rapidjson/prettywriter.h index 45afb694..c7c29b21 100644 --- a/src/3rdparty/rapidjson/prettywriter.h +++ b/src/3rdparty/rapidjson/prettywriter.h @@ -164,9 +164,10 @@ public: (void)memberCount; RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level)); RAPIDJSON_ASSERT(Base::level_stack_.template Top()->inArray); - bool empty = Base::level_stack_.template Pop(1)->valueCount == 0; + typename Base::Level* level = Base::level_stack_.template Pop(1); + bool empty = level->valueCount == 0; - if (!empty && !(formatOptions_ & kFormatSingleLineArray)) { + if (!empty && !level->inLine) { Base::os_->Put('\n'); WriteIndent(); } @@ -211,13 +212,16 @@ protected: typename Base::Level* level = Base::level_stack_.template Top(); if (level->inArray) { + level->inLine = (formatOptions_ & kFormatSingleLineArray) && type != kObjectType && type != kArrayType; + if (level->valueCount > 0) { Base::os_->Put(','); // add comma if it is not the first element in array - if (formatOptions_ & kFormatSingleLineArray) + if (level->inLine) { Base::os_->Put(' '); + } } - if (!(formatOptions_ & kFormatSingleLineArray)) { + if (!level->inLine) { Base::os_->Put('\n'); WriteIndent(); } diff --git a/src/3rdparty/rapidjson/rapidjson.h b/src/3rdparty/rapidjson/rapidjson.h index 549936ff..78c8aae0 100644 --- a/src/3rdparty/rapidjson/rapidjson.h +++ b/src/3rdparty/rapidjson/rapidjson.h @@ -403,7 +403,7 @@ RAPIDJSON_NAMESPACE_END */ #ifndef RAPIDJSON_ASSERT #include -#define RAPIDJSON_ASSERT(x) assert(x) +#define RAPIDJSON_ASSERT(x) #endif // RAPIDJSON_ASSERT /////////////////////////////////////////////////////////////////////////////// diff --git a/src/3rdparty/rapidjson/writer.h b/src/3rdparty/rapidjson/writer.h index 6f5b6903..1d33b2f9 100644 --- a/src/3rdparty/rapidjson/writer.h +++ b/src/3rdparty/rapidjson/writer.h @@ -288,6 +288,7 @@ protected: Level(bool inArray_) : valueCount(0), inArray(inArray_) {} size_t valueCount; //!< number of values in this level bool inArray; //!< true if in array, otherwise in object + bool inLine = false; }; static const size_t kDefaultLevelDepth = 32; diff --git a/src/base/io/json/Json_unix.cpp b/src/base/io/json/Json_unix.cpp index eeef9564..dedea947 100644 --- a/src/base/io/json/Json_unix.cpp +++ b/src/base/io/json/Json_unix.cpp @@ -56,6 +56,8 @@ bool xmrig::Json::save(const char *fileName, const rapidjson::Document &doc) rapidjson::OStreamWrapper osw(ofs); rapidjson::PrettyWriter writer(osw); + writer.SetFormatOptions(rapidjson::kFormatSingleLineArray); + doc.Accept(writer); return true; diff --git a/src/base/io/json/Json_win.cpp b/src/base/io/json/Json_win.cpp index 0faccdea..73aff2c5 100644 --- a/src/base/io/json/Json_win.cpp +++ b/src/base/io/json/Json_win.cpp @@ -118,6 +118,8 @@ bool xmrig::Json::save(const char *fileName, const rapidjson::Document &doc) OStreamWrapper osw(ofs); PrettyWriter writer(osw); + writer.SetFormatOptions(kFormatSingleLineArray); + doc.Accept(writer); return true; diff --git a/src/base/net/http/HttpApiResponse.cpp b/src/base/net/http/HttpApiResponse.cpp index bf91445a..5fe92636 100644 --- a/src/base/net/http/HttpApiResponse.cpp +++ b/src/base/net/http/HttpApiResponse.cpp @@ -80,6 +80,8 @@ void xmrig::HttpApiResponse::end() StringBuffer buffer(nullptr, 4096); PrettyWriter writer(buffer); writer.SetMaxDecimalPlaces(10); + writer.SetFormatOptions(kFormatSingleLineArray); + m_doc.Accept(writer); HttpResponse::end(buffer.GetString(), buffer.GetSize()); From 044fbd7e8289f65e89cd07df3f048e854616b847 Mon Sep 17 00:00:00 2001 From: XMRig Date: Mon, 5 Aug 2019 16:00:48 +0700 Subject: [PATCH 124/172] Changed init signature. --- src/crypto/rx/RxCache.cpp | 4 ++-- src/crypto/rx/RxCache.h | 4 ++-- src/crypto/rx/RxDataset.cpp | 2 +- src/crypto/rx/RxDataset.h | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/crypto/rx/RxCache.cpp b/src/crypto/rx/RxCache.cpp index a5e9efb3..13ff0cd3 100644 --- a/src/crypto/rx/RxCache.cpp +++ b/src/crypto/rx/RxCache.cpp @@ -62,7 +62,7 @@ xmrig::RxCache::~RxCache() } -bool xmrig::RxCache::init(const void *seed) +bool xmrig::RxCache::init(const uint8_t *seed) { if (isReady(seed)) { return false; @@ -75,7 +75,7 @@ bool xmrig::RxCache::init(const void *seed) } -bool xmrig::RxCache::isReady(const void *seed) const +bool xmrig::RxCache::isReady(const uint8_t *seed) const { return memcmp(m_seed, seed, sizeof(m_seed)) == 0; } diff --git a/src/crypto/rx/RxCache.h b/src/crypto/rx/RxCache.h index 80c1faba..433058b7 100644 --- a/src/crypto/rx/RxCache.h +++ b/src/crypto/rx/RxCache.h @@ -52,12 +52,12 @@ public: inline const uint8_t *seed() const { return m_seed; } inline randomx_cache *get() const { return m_cache; } - bool init(const void *seed); + bool init(const uint8_t *seed); static inline constexpr size_t size() { return RANDOMX_CACHE_MAX_SIZE; } private: - bool isReady(const void *seed) const; + bool isReady(const uint8_t *seed) const; int m_flags = 0; randomx_cache *m_cache = nullptr; diff --git a/src/crypto/rx/RxDataset.cpp b/src/crypto/rx/RxDataset.cpp index 7d498c4d..50459a55 100644 --- a/src/crypto/rx/RxDataset.cpp +++ b/src/crypto/rx/RxDataset.cpp @@ -64,7 +64,7 @@ xmrig::RxDataset::~RxDataset() } -bool xmrig::RxDataset::init(const void *seed, uint32_t numThreads) +bool xmrig::RxDataset::init(const uint8_t *seed, uint32_t numThreads) { cache()->init(seed); diff --git a/src/crypto/rx/RxDataset.h b/src/crypto/rx/RxDataset.h index d3488668..932f4ed9 100644 --- a/src/crypto/rx/RxDataset.h +++ b/src/crypto/rx/RxDataset.h @@ -52,7 +52,7 @@ public: inline randomx_dataset *get() const { return m_dataset; } inline RxCache *cache() const { return m_cache; } - bool init(const void *seed, uint32_t numThreads); + bool init(const uint8_t *seed, uint32_t numThreads); std::pair hugePages() const; static inline constexpr size_t size() { return RANDOMX_DATASET_MAX_SIZE; } From 3543abcc3cb99155b3620b53daa304daae373b2a Mon Sep 17 00:00:00 2001 From: XMRig Date: Mon, 5 Aug 2019 17:48:12 +0700 Subject: [PATCH 125/172] Fixed crash and added error message about seed_hash field. --- src/base/net/stratum/Client.cpp | 13 +++++++++++-- src/crypto/rx/RxCache.cpp | 2 +- src/crypto/rx/RxCache.h | 2 ++ 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/base/net/stratum/Client.cpp b/src/base/net/stratum/Client.cpp index 618e132c..fe182336 100644 --- a/src/base/net/stratum/Client.cpp +++ b/src/base/net/stratum/Client.cpp @@ -335,13 +335,19 @@ bool xmrig::Client::parseJob(const rapidjson::Value ¶ms, int *code) job.setAlgorithm(algo); } - job.setSeedHash(Json::getString(params, "seed_hash")); job.setHeight(Json::getUint64(params, "height")); if (!verifyAlgorithm(job.algorithm(), algo)) { *code = 6; + return false; + } - close(); + if (job.algorithm().family() == Algorithm::RANDOM_X && !job.setSeedHash(Json::getString(params, "seed_hash"))) { + if (!isQuiet()) { + LOG_ERR("[%s] failed to parse field \"seed_hash\" required by RandomX", url(), algo); + } + + *code = 7; return false; } @@ -693,6 +699,9 @@ void xmrig::Client::parseNotification(const char *method, const rapidjson::Value if (parseJob(params, &code)) { m_listener->onJobReceived(this, m_job, params); } + else { + close(); + } return; } diff --git a/src/crypto/rx/RxCache.cpp b/src/crypto/rx/RxCache.cpp index 13ff0cd3..985f4bf3 100644 --- a/src/crypto/rx/RxCache.cpp +++ b/src/crypto/rx/RxCache.cpp @@ -77,5 +77,5 @@ bool xmrig::RxCache::init(const uint8_t *seed) bool xmrig::RxCache::isReady(const uint8_t *seed) const { - return memcmp(m_seed, seed, sizeof(m_seed)) == 0; + return m_initCount && memcmp(m_seed, seed, sizeof(m_seed)) == 0; } diff --git a/src/crypto/rx/RxCache.h b/src/crypto/rx/RxCache.h index 433058b7..e6b2397c 100644 --- a/src/crypto/rx/RxCache.h +++ b/src/crypto/rx/RxCache.h @@ -51,6 +51,7 @@ public: inline bool isJIT() const { return m_flags & 8; } inline const uint8_t *seed() const { return m_seed; } inline randomx_cache *get() const { return m_cache; } + inline uint64_t initCount() const { return m_initCount; } bool init(const uint8_t *seed); @@ -61,6 +62,7 @@ private: int m_flags = 0; randomx_cache *m_cache = nullptr; + uint64_t m_initCount = 0; uint8_t m_seed[32]; }; From 4afc987111652b3116518c9eb9ac5d4ba2dd7821 Mon Sep 17 00:00:00 2001 From: XMRig Date: Mon, 5 Aug 2019 18:20:56 +0700 Subject: [PATCH 126/172] Fix for previous commit. --- src/crypto/rx/RxCache.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/crypto/rx/RxCache.cpp b/src/crypto/rx/RxCache.cpp index 985f4bf3..92c366a2 100644 --- a/src/crypto/rx/RxCache.cpp +++ b/src/crypto/rx/RxCache.cpp @@ -71,6 +71,8 @@ bool xmrig::RxCache::init(const uint8_t *seed) memcpy(m_seed, seed, sizeof(m_seed)); randomx_init_cache(m_cache, m_seed, sizeof(m_seed)); + m_initCount++; + return true; } From 66d8598f9f6e0315367dda36999180b3b78c909a Mon Sep 17 00:00:00 2001 From: XMRig Date: Mon, 5 Aug 2019 21:55:52 +0700 Subject: [PATCH 127/172] #1092 Fixed crash if use wrong affinity on NUMA systems. --- src/crypto/common/VirtualMemory.cpp | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/src/crypto/common/VirtualMemory.cpp b/src/crypto/common/VirtualMemory.cpp index 5f7c4551..081b6c0f 100644 --- a/src/crypto/common/VirtualMemory.cpp +++ b/src/crypto/common/VirtualMemory.cpp @@ -62,15 +62,18 @@ uint32_t xmrig::VirtualMemory::bindToNUMANode(int64_t affinity) LOG_WARN("CPU #%02u warning: \"can't bind memory\"", puId); } - hwloc_obj_t node = nullptr; - uint32_t nodeId = 0; + uint32_t nodeId = 0; - while ((node = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_NUMANODE, node)) != nullptr) { - if (hwloc_bitmap_intersects(node->cpuset, pu->cpuset)) { - nodeId = node->os_index; + if (pu) { + hwloc_obj_t node = nullptr; - break; - } + while ((node = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_NUMANODE, node)) != nullptr) { + if (hwloc_bitmap_intersects(node->cpuset, pu->cpuset)) { + nodeId = node->os_index; + + break; + } + } } hwloc_topology_destroy(topology); From d4c5e414c27087161c7e8c7936a049ef03a0319e Mon Sep 17 00:00:00 2001 From: XMRig Date: Tue, 6 Aug 2019 12:28:37 +0700 Subject: [PATCH 128/172] Added hwloc topology examples. --- doc/topology/AMD_FX_8320_windows_2_0_4.xml | 86 +++ ...AMD_Opteron_6272_x4_N8_linux_2_0_4_LXC.xml | 234 ++++++ .../AMD_Opteron_6278_x2_UMA_windows_2_0_4.xml | 294 ++++++++ .../AMD_Opteron_6348_x4_N8_linux_1_11_2.xml | 550 ++++++++++++++ .../AMD_Opteron_6380_x4_N8_linux_1_11_5.xml | 670 ++++++++++++++++++ .../AMD_Ryzen_7_2700X_windows_2_0_4.xml | 105 +++ ...yzen_Threadripper_2950X_N2_linux_2_0_4.xml | 226 ++++++ ...en_Threadripper_2950X_UMA_linux_1_11_9.xml | 328 +++++++++ .../Intel_Core_i7-3770_linux_2_0_4.xml | 87 +++ .../Intel_Core_i7-6700_linux_2_0_4.xml | 88 +++ .../Intel_Core_i7-6700_windows_2_0_4.xml | 61 ++ .../Intel_Core_i7-7660U_windows_2_0_4.xml | 41 ++ ...tel_Xeon_E5-4650_0_x4_N4_windows_2_0_4.xml | 477 +++++++++++++ .../Intel_Xeon_E5620_x2_UMA_windows_2_0_4.xml | 111 +++ ...Intel_Xeon_E7-4870_x4_N4_windows_2_0_4.xml | 541 ++++++++++++++ ...el_Xeon_Silver_4114_x2_N2_linux_1_11_9.xml | 403 +++++++++++ ...tel_Xeon_Silver_4114_x2_N2_linux_2_0_4.xml | 263 +++++++ 17 files changed, 4565 insertions(+) create mode 100644 doc/topology/AMD_FX_8320_windows_2_0_4.xml create mode 100644 doc/topology/AMD_Opteron_6272_x4_N8_linux_2_0_4_LXC.xml create mode 100644 doc/topology/AMD_Opteron_6278_x2_UMA_windows_2_0_4.xml create mode 100644 doc/topology/AMD_Opteron_6348_x4_N8_linux_1_11_2.xml create mode 100644 doc/topology/AMD_Opteron_6380_x4_N8_linux_1_11_5.xml create mode 100644 doc/topology/AMD_Ryzen_7_2700X_windows_2_0_4.xml create mode 100644 doc/topology/AMD_Ryzen_Threadripper_2950X_N2_linux_2_0_4.xml create mode 100644 doc/topology/AMD_Ryzen_Threadripper_2950X_UMA_linux_1_11_9.xml create mode 100644 doc/topology/Intel_Core_i7-3770_linux_2_0_4.xml create mode 100644 doc/topology/Intel_Core_i7-6700_linux_2_0_4.xml create mode 100644 doc/topology/Intel_Core_i7-6700_windows_2_0_4.xml create mode 100644 doc/topology/Intel_Core_i7-7660U_windows_2_0_4.xml create mode 100644 doc/topology/Intel_Xeon_E5-4650_0_x4_N4_windows_2_0_4.xml create mode 100644 doc/topology/Intel_Xeon_E5620_x2_UMA_windows_2_0_4.xml create mode 100644 doc/topology/Intel_Xeon_E7-4870_x4_N4_windows_2_0_4.xml create mode 100644 doc/topology/Intel_Xeon_Silver_4114_x2_N2_linux_1_11_9.xml create mode 100644 doc/topology/Intel_Xeon_Silver_4114_x2_N2_linux_2_0_4.xml diff --git a/doc/topology/AMD_FX_8320_windows_2_0_4.xml b/doc/topology/AMD_FX_8320_windows_2_0_4.xml new file mode 100644 index 00000000..55fe6b5d --- /dev/null +++ b/doc/topology/AMD_FX_8320_windows_2_0_4.xml @@ -0,0 +1,86 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/topology/AMD_Opteron_6272_x4_N8_linux_2_0_4_LXC.xml b/doc/topology/AMD_Opteron_6272_x4_N8_linux_2_0_4_LXC.xml new file mode 100644 index 00000000..39576bb4 --- /dev/null +++ b/doc/topology/AMD_Opteron_6272_x4_N8_linux_2_0_4_LXC.xml @@ -0,0 +1,234 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 1 2 3 4 5 6 7 + 10 16 16 22 16 22 16 22 16 10 + 22 16 22 16 22 16 16 22 10 16 + 16 22 16 22 22 16 16 10 22 16 + 22 16 16 22 16 22 10 16 16 22 + 22 16 22 16 16 10 22 16 16 22 + 16 22 16 22 10 16 22 16 22 16 + 22 16 16 10 + + \ No newline at end of file diff --git a/doc/topology/AMD_Opteron_6278_x2_UMA_windows_2_0_4.xml b/doc/topology/AMD_Opteron_6278_x2_UMA_windows_2_0_4.xml new file mode 100644 index 00000000..b59f773c --- /dev/null +++ b/doc/topology/AMD_Opteron_6278_x2_UMA_windows_2_0_4.xml @@ -0,0 +1,294 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/topology/AMD_Opteron_6348_x4_N8_linux_1_11_2.xml b/doc/topology/AMD_Opteron_6348_x4_N8_linux_1_11_2.xml new file mode 100644 index 00000000..2d889819 --- /dev/null +++ b/doc/topology/AMD_Opteron_6348_x4_N8_linux_1_11_2.xml @@ -0,0 +1,550 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/topology/AMD_Opteron_6380_x4_N8_linux_1_11_5.xml b/doc/topology/AMD_Opteron_6380_x4_N8_linux_1_11_5.xml new file mode 100644 index 00000000..2ecbe3cb --- /dev/null +++ b/doc/topology/AMD_Opteron_6380_x4_N8_linux_1_11_5.xml @@ -0,0 +1,670 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/topology/AMD_Ryzen_7_2700X_windows_2_0_4.xml b/doc/topology/AMD_Ryzen_7_2700X_windows_2_0_4.xml new file mode 100644 index 00000000..e3ecb6fd --- /dev/null +++ b/doc/topology/AMD_Ryzen_7_2700X_windows_2_0_4.xml @@ -0,0 +1,105 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/topology/AMD_Ryzen_Threadripper_2950X_N2_linux_2_0_4.xml b/doc/topology/AMD_Ryzen_Threadripper_2950X_N2_linux_2_0_4.xml new file mode 100644 index 00000000..c168e2a0 --- /dev/null +++ b/doc/topology/AMD_Ryzen_Threadripper_2950X_N2_linux_2_0_4.xml @@ -0,0 +1,226 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 1 + 10 16 16 10 + + diff --git a/doc/topology/AMD_Ryzen_Threadripper_2950X_UMA_linux_1_11_9.xml b/doc/topology/AMD_Ryzen_Threadripper_2950X_UMA_linux_1_11_9.xml new file mode 100644 index 00000000..ed3776c0 --- /dev/null +++ b/doc/topology/AMD_Ryzen_Threadripper_2950X_UMA_linux_1_11_9.xml @@ -0,0 +1,328 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/topology/Intel_Core_i7-3770_linux_2_0_4.xml b/doc/topology/Intel_Core_i7-3770_linux_2_0_4.xml new file mode 100644 index 00000000..18c80210 --- /dev/null +++ b/doc/topology/Intel_Core_i7-3770_linux_2_0_4.xml @@ -0,0 +1,87 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/topology/Intel_Core_i7-6700_linux_2_0_4.xml b/doc/topology/Intel_Core_i7-6700_linux_2_0_4.xml new file mode 100644 index 00000000..c80c7403 --- /dev/null +++ b/doc/topology/Intel_Core_i7-6700_linux_2_0_4.xml @@ -0,0 +1,88 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/topology/Intel_Core_i7-6700_windows_2_0_4.xml b/doc/topology/Intel_Core_i7-6700_windows_2_0_4.xml new file mode 100644 index 00000000..dd3c201c --- /dev/null +++ b/doc/topology/Intel_Core_i7-6700_windows_2_0_4.xml @@ -0,0 +1,61 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/topology/Intel_Core_i7-7660U_windows_2_0_4.xml b/doc/topology/Intel_Core_i7-7660U_windows_2_0_4.xml new file mode 100644 index 00000000..01a29e86 --- /dev/null +++ b/doc/topology/Intel_Core_i7-7660U_windows_2_0_4.xml @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/topology/Intel_Xeon_E5-4650_0_x4_N4_windows_2_0_4.xml b/doc/topology/Intel_Xeon_E5-4650_0_x4_N4_windows_2_0_4.xml new file mode 100644 index 00000000..7811a49f --- /dev/null +++ b/doc/topology/Intel_Xeon_E5-4650_0_x4_N4_windows_2_0_4.xml @@ -0,0 +1,477 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/topology/Intel_Xeon_E5620_x2_UMA_windows_2_0_4.xml b/doc/topology/Intel_Xeon_E5620_x2_UMA_windows_2_0_4.xml new file mode 100644 index 00000000..9dad397a --- /dev/null +++ b/doc/topology/Intel_Xeon_E5620_x2_UMA_windows_2_0_4.xml @@ -0,0 +1,111 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/topology/Intel_Xeon_E7-4870_x4_N4_windows_2_0_4.xml b/doc/topology/Intel_Xeon_E7-4870_x4_N4_windows_2_0_4.xml new file mode 100644 index 00000000..3d0a6736 --- /dev/null +++ b/doc/topology/Intel_Xeon_E7-4870_x4_N4_windows_2_0_4.xml @@ -0,0 +1,541 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/topology/Intel_Xeon_Silver_4114_x2_N2_linux_1_11_9.xml b/doc/topology/Intel_Xeon_Silver_4114_x2_N2_linux_1_11_9.xml new file mode 100644 index 00000000..0dadfed2 --- /dev/null +++ b/doc/topology/Intel_Xeon_Silver_4114_x2_N2_linux_1_11_9.xml @@ -0,0 +1,403 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/topology/Intel_Xeon_Silver_4114_x2_N2_linux_2_0_4.xml b/doc/topology/Intel_Xeon_Silver_4114_x2_N2_linux_2_0_4.xml new file mode 100644 index 00000000..fd56a10c --- /dev/null +++ b/doc/topology/Intel_Xeon_Silver_4114_x2_N2_linux_2_0_4.xml @@ -0,0 +1,263 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 1 + 10 21 21 10 + + From 46f1661fd905e6c8634ef60a7c55a94744fbcef0 Mon Sep 17 00:00:00 2001 From: xmrig Date: Tue, 6 Aug 2019 12:48:00 +0700 Subject: [PATCH 129/172] Update API.md --- doc/API.md | 35 ++++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/doc/API.md b/doc/API.md index 3357eabb..2cd0fbbe 100644 --- a/doc/API.md +++ b/doc/API.md @@ -1,26 +1,39 @@ # HTTP API -If you want use API you need choice a port where is internal HTTP server will listen for incoming connections. API will not available if miner built without `libmicrohttpd`. +If you want use HTTP API you need enable it (`"enabled": true,`) then choice `port` and optionaly `host`. API not available if miner built without HTTP support (`-DWITH_HTTP=OFF`). + +Offical HTTP client for API: http://workers.xmrig.info/ Example configuration: ```json "api": { - "port": 44444, - "access-token": "TOKEN", - "worker-id": null, - "ipv6": false, - "restricted": false + "id": null, + "worker-id": null, }, +"http": { + "enabled": false, + "host": "127.0.0.1", + "port": 0, + "access-token": null, + "restricted": true +} ``` -* **port** Port for incoming connections `http://:`. -* **access-token** [Bearer](https://gist.github.com/xmrig/c75fdd1f8e0f3bac05500be2ab718f8e#file-api-html-L54) access token to secure access to API. +#### Global API options +* **id** Miner ID, if not set created automatically. * **worker-id** Optional worker name, if not set will be detected automatically. -* **ipv6** Enable (`true`) or disable (`false`) IPv6 for API. + +#### HTTP API options, +* **enabled** Enable (`true`) or disable (`false`) HTTP API. +* **host** Host for incoming connections `http://:`, to allow connections from all interfaces use `0.0.0.0` (IPv4) or `::` (IPv4+IPv6). +* **port** Port for incoming connections `http://:`, zero port is valid option and means random port. +* **access-token** [Bearer](https://gist.github.com/xmrig/c75fdd1f8e0f3bac05500be2ab718f8e#file-api-html-L54) access token to secure access to API. Miner support this token only via `Authorization` header. * **restricted** Use `false` to allow remote configuration. -If you prefer use command line options instead of config file, you can use options: `--api-port`, `--api-access-token`, `--api-worker-id`, `--api-ipv6` and `api-no-restricted`. +If you prefer use command line options instead of config file, you can use options: `--api-id`, `--api-worker-id`, `--http-enabled`, `--http-host`, `--http-access-token`, `--http-port`, `--http-no-restricted`. + +Versions before 2.15 was use another options for API https://github.com/xmrig/xmrig/issues/1007 ## Endpoints @@ -50,4 +63,4 @@ Curl example: ``` curl -v --data-binary @config.json -X PUT -H "Content-Type: application/json" -H "Authorization: Bearer SECRET" http://127.0.0.1:44444/1/config -``` \ No newline at end of file +``` From 3c5cb142cb10c1a24d5209c135e1690f76066150 Mon Sep 17 00:00:00 2001 From: XMRig Date: Tue, 6 Aug 2019 13:21:39 +0700 Subject: [PATCH 130/172] Added "/2/config" alias for config API. --- src/base/kernel/Base.cpp | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/base/kernel/Base.cpp b/src/base/kernel/Base.cpp index d290e7f4..03feef89 100644 --- a/src/base/kernel/Base.cpp +++ b/src/base/kernel/Base.cpp @@ -57,6 +57,14 @@ #endif +namespace xmrig { + +static const char *kConfigPathV1 = "/1/config"; +static const char *kConfigPathV2 = "/2/config"; + +} // namespace xmrig + + class xmrig::BasePrivate { public: @@ -296,7 +304,7 @@ void xmrig::Base::onFileChanged(const String &fileName) void xmrig::Base::onRequest(IApiRequest &request) { if (request.method() == IApiRequest::METHOD_GET) { - if (request.url() == "/1/config") { + if (request.url() == kConfigPathV1 || request.url() == kConfigPathV2) { if (request.isRestricted()) { return request.done(403); } @@ -306,7 +314,7 @@ void xmrig::Base::onRequest(IApiRequest &request) } } else if (request.method() == IApiRequest::METHOD_PUT || request.method() == IApiRequest::METHOD_POST) { - if (request.url() == "/1/config") { + if (request.url() == kConfigPathV1 || request.url() == kConfigPathV2) { request.accept(); if (!reload(request.json())) { From cc4351d49fe7b2cabf8a9356d5e42ae302a43119 Mon Sep 17 00:00:00 2001 From: xmrig Date: Tue, 6 Aug 2019 13:23:51 +0700 Subject: [PATCH 131/172] Update CPU.md --- doc/CPU.md | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/doc/CPU.md b/doc/CPU.md index b93651f4..f5d07745 100644 --- a/doc/CPU.md +++ b/doc/CPU.md @@ -22,16 +22,8 @@ Example below demonstrate all primary ideas of flexible profiles configuration: "hw-aes": null, "priority": null, "asm": true, - "rx/wow": [ - -1, - -1, - -1, - -1, - ], - "cn": [ - 0, - 2 - ], + "rx/wow": [-1, -1, -1, -1], + "cn": [0, 2], "cn-lite": [ { "intensity": 2, @@ -42,13 +34,8 @@ Example below demonstrate all primary ideas of flexible profiles configuration: "affinity": 2 } ], - "custom-profile": [ - 0, - 2, - ], - "*": [ - -1 - ], + "custom-profile": [0, 2], + "*": [-1], "cn/r": "custom-profile", "cn/0": false } From 0adab95ce424c48e53876e8e33c38116fe5359e6 Mon Sep 17 00:00:00 2001 From: xmrig Date: Wed, 7 Aug 2019 00:10:47 +0700 Subject: [PATCH 132/172] Update CHANGELOG.md --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 97216550..87cd1d4a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# v2.99.5-beta +- [#1066](https://github.com/xmrig/xmrig/issues/1066#issuecomment-518080529) Fixed crash and added error message if pool not ready for RandomX. +- [#1092](https://github.com/xmrig/xmrig/issues/1092) Fixed crash if wrong CPU affinity used. +- JSON arrays in config and API now more compact, single line if possible. + # v2.99.4-beta - [#1062](https://github.com/xmrig/xmrig/issues/1062) Fixed 32 bit support. **32 bit is slow and deprecated**. - [#1088](https://github.com/xmrig/xmrig/pull/1088) Fixed macOS compilation. From 96fd7545d1e37a0c89d0917ad35e2eb5d9b87e49 Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 7 Aug 2019 16:13:23 +0700 Subject: [PATCH 133/172] Added class CpuThreads. --- src/backend/common/Threads.cpp | 27 +++------- src/backend/common/Threads.h | 8 +-- src/backend/cpu/CpuBackend.cpp | 4 +- src/backend/cpu/CpuConfig.cpp | 10 ++-- src/backend/cpu/CpuConfig.h | 16 +++--- src/backend/cpu/CpuThread.h | 12 ++--- src/backend/cpu/CpuThreads.cpp | 54 +++++++++++++++++++ src/backend/cpu/CpuThreads.h | 63 +++++++++++++++++++++++ src/backend/cpu/cpu.cmake | 2 + src/backend/cpu/interfaces/ICpuInfo.h | 2 +- src/backend/cpu/platform/BasicCpuInfo.cpp | 14 ++--- src/backend/cpu/platform/HwlocCpuInfo.cpp | 6 +-- 12 files changed, 160 insertions(+), 58 deletions(-) create mode 100644 src/backend/cpu/CpuThreads.cpp create mode 100644 src/backend/cpu/CpuThreads.h diff --git a/src/backend/common/Threads.cpp b/src/backend/common/Threads.cpp index 894c404b..323352d1 100644 --- a/src/backend/common/Threads.cpp +++ b/src/backend/common/Threads.cpp @@ -24,7 +24,7 @@ #include "backend/common/Threads.h" -#include "backend/cpu/CpuThread.h" +#include "backend/cpu/CpuThreads.h" #include "rapidjson/document.h" @@ -38,9 +38,9 @@ static const char *kAsterisk = "*"; template -const std::vector &xmrig::Threads::get(const String &profileName) const +const T &xmrig::Threads::get(const String &profileName) const { - static std::vector empty; + static T empty; if (profileName.isNull() || !has(profileName)) { return empty; } @@ -56,16 +56,9 @@ size_t xmrig::Threads::read(const rapidjson::Value &value) for (auto &member : value.GetObject()) { if (member.value.IsArray()) { - std::vector threads; + T threads(member.value); - for (auto &v : member.value.GetArray()) { - T thread(v); - if (thread.isValid()) { - threads.push_back(std::move(thread)); - } - } - - if (!threads.empty()) { + if (!threads.isEmpty()) { move(member.name.GetString(), std::move(threads)); } @@ -138,13 +131,7 @@ void xmrig::Threads::toJSON(rapidjson::Value &out, rapidjson::Document &doc) auto &allocator = doc.GetAllocator(); for (const auto &kv : m_profiles) { - Value arr(kArrayType); - - for (const T &thread : kv.second) { - arr.PushBack(thread.toJSON(doc), allocator); - } - - out.AddMember(kv.first.toJSON(), arr, allocator); + out.AddMember(kv.first.toJSON(), kv.second.toJSON(doc), allocator); } for (const Algorithm &algo : m_disabled) { @@ -159,6 +146,6 @@ void xmrig::Threads::toJSON(rapidjson::Value &out, rapidjson::Document &doc) namespace xmrig { -template class Threads; +template class Threads; } // namespace xmrig diff --git a/src/backend/common/Threads.h b/src/backend/common/Threads.h index bc9e36fd..2cb333d6 100644 --- a/src/backend/common/Threads.h +++ b/src/backend/common/Threads.h @@ -45,18 +45,18 @@ public: inline bool has(const char *profile) const { return m_profiles.count(profile) > 0; } inline bool isDisabled(const Algorithm &algo) const { return m_disabled.count(algo) > 0; } inline bool isExist(const Algorithm &algo) const { return isDisabled(algo) || m_aliases.count(algo) > 0 || has(algo.shortName()); } - inline const std::vector &get(const Algorithm &algo, bool strict = false) const { return get(profileName(algo, strict)); } + inline const T &get(const Algorithm &algo, bool strict = false) const { return get(profileName(algo, strict)); } inline void disable(const Algorithm &algo) { m_disabled.insert(algo); } - inline void move(const char *profile, std::vector &&threads) { m_profiles.insert({ profile, threads }); } + inline void move(const char *profile, T &&threads) { m_profiles.insert({ profile, threads }); } - const std::vector &get(const String &profileName) const; + const T &get(const String &profileName) const; size_t read(const rapidjson::Value &value); String profileName(const Algorithm &algorithm, bool strict = false) const; void toJSON(rapidjson::Value &out, rapidjson::Document &doc) const; private: std::map m_aliases; - std::map > m_profiles; + std::map m_profiles; std::set m_disabled; }; diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp index 60ca8cf3..7f208c38 100644 --- a/src/backend/cpu/CpuBackend.cpp +++ b/src/backend/cpu/CpuBackend.cpp @@ -46,7 +46,7 @@ namespace xmrig { -extern template class Threads; +extern template class Threads; static const char *tag = CYAN_BG_BOLD(" cpu "); @@ -150,7 +150,7 @@ bool xmrig::CpuBackend::isEnabled() const bool xmrig::CpuBackend::isEnabled(const Algorithm &algorithm) const { - return !d_ptr->controller->config()->cpu().threads().get(algorithm).empty(); + return !d_ptr->controller->config()->cpu().threads().get(algorithm).isEmpty(); } diff --git a/src/backend/cpu/CpuConfig.cpp b/src/backend/cpu/CpuConfig.cpp index 582649ef..883ef506 100644 --- a/src/backend/cpu/CpuConfig.cpp +++ b/src/backend/cpu/CpuConfig.cpp @@ -62,7 +62,7 @@ static const char *kRx = "rx"; static const char *kRxWOW = "rx/wow"; #endif -extern template class Threads; +extern template class Threads; } @@ -103,15 +103,15 @@ rapidjson::Value xmrig::CpuConfig::toJSON(rapidjson::Document &doc) const std::vector xmrig::CpuConfig::get(const Miner *miner, const Algorithm &algorithm) const { std::vector out; - const std::vector &threads = m_threads.get(algorithm); + const CpuThreads &threads = m_threads.get(algorithm); - if (threads.empty()) { + if (threads.isEmpty()) { return out; } - out.reserve(threads.size()); + out.reserve(threads.count()); - for (const CpuThread &thread : threads) { + for (const CpuThread &thread : threads.data()) { out.push_back(CpuLaunchData(miner, algorithm, *this, thread)); } diff --git a/src/backend/cpu/CpuConfig.h b/src/backend/cpu/CpuConfig.h index 5b2f3f86..5aca5188 100644 --- a/src/backend/cpu/CpuConfig.h +++ b/src/backend/cpu/CpuConfig.h @@ -28,7 +28,7 @@ #include "backend/common/Threads.h" #include "backend/cpu/CpuLaunchData.h" -#include "backend/cpu/CpuThread.h" +#include "backend/cpu/CpuThreads.h" #include "crypto/common/Assembly.h" @@ -51,12 +51,12 @@ public: std::vector get(const Miner *miner, const Algorithm &algorithm) const; void read(const rapidjson::Value &value); - inline bool isEnabled() const { return m_enabled; } - inline bool isHugePages() const { return m_hugePages; } - inline bool isShouldSave() const { return m_shouldSave; } - inline const Assembly &assembly() const { return m_assembly; } - inline const Threads &threads() const { return m_threads; } - inline int priority() const { return m_priority; } + inline bool isEnabled() const { return m_enabled; } + inline bool isHugePages() const { return m_hugePages; } + inline bool isShouldSave() const { return m_shouldSave; } + inline const Assembly &assembly() const { return m_assembly; } + inline const Threads &threads() const { return m_threads; } + inline int priority() const { return m_priority; } private: void generate(); @@ -70,7 +70,7 @@ private: bool m_hugePages = true; bool m_shouldSave = false; int m_priority = -1; - Threads m_threads; + Threads m_threads; }; diff --git a/src/backend/cpu/CpuThread.h b/src/backend/cpu/CpuThread.h index adaffa68..7c7ce4be 100644 --- a/src/backend/cpu/CpuThread.h +++ b/src/backend/cpu/CpuThread.h @@ -22,11 +22,8 @@ * along with this program. If not, see . */ -#ifndef XMRIG_CPUTHREADCONFIG_H -#define XMRIG_CPUTHREADCONFIG_H - - -#include +#ifndef XMRIG_CPUTHREAD_H +#define XMRIG_CPUTHREAD_H #include "rapidjson/fwd.h" @@ -58,10 +55,7 @@ private: }; -typedef std::vector CpuThreads; - - } /* namespace xmrig */ -#endif /* XMRIG_CPUTHREADCONFIG_H */ +#endif /* XMRIG_CPUTHREAD_H */ diff --git a/src/backend/cpu/CpuThreads.cpp b/src/backend/cpu/CpuThreads.cpp new file mode 100644 index 00000000..f877c9d3 --- /dev/null +++ b/src/backend/cpu/CpuThreads.cpp @@ -0,0 +1,54 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#include "backend/cpu/CpuThreads.h" +#include "rapidjson/document.h" + + +xmrig::CpuThreads::CpuThreads(const rapidjson::Value &value) +{ + if (value.IsArray()) { + for (auto &v : value.GetArray()) { + CpuThread thread(v); + if (thread.isValid()) { + add(std::move(thread)); + } + } + } +} + + +rapidjson::Value xmrig::CpuThreads::toJSON(rapidjson::Document &doc) const +{ + using namespace rapidjson; + auto &allocator = doc.GetAllocator(); + + Value array(kArrayType); + for (const CpuThread &thread : m_data) { + array.PushBack(thread.toJSON(doc), allocator); + } + + return array; +} diff --git a/src/backend/cpu/CpuThreads.h b/src/backend/cpu/CpuThreads.h new file mode 100644 index 00000000..3951c202 --- /dev/null +++ b/src/backend/cpu/CpuThreads.h @@ -0,0 +1,63 @@ +/* XMRig + * Copyright 2010 Jeff Garzik + * Copyright 2012-2014 pooler + * Copyright 2014 Lucas Jones + * Copyright 2014-2016 Wolf9466 + * Copyright 2016 Jay D Dee + * Copyright 2017-2018 XMR-Stak , + * Copyright 2018-2019 SChernykh + * Copyright 2016-2019 XMRig , + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef XMRIG_CPUTHREADS_H +#define XMRIG_CPUTHREADS_H + + +#include + + +#include "backend/cpu/CpuThread.h" + + +namespace xmrig { + + +class CpuThreads +{ +public: + inline CpuThreads() {} + inline CpuThreads(size_t count) : m_data(count) {} + + CpuThreads(const rapidjson::Value &value); + + inline bool isEmpty() const { return m_data.empty(); } + inline const std::vector &data() const { return m_data; } + inline size_t count() const { return m_data.size(); } + inline void add(CpuThread &&thread) { m_data.push_back(thread); } + inline void add(int64_t affinity, int intensity = 1) { add(CpuThread(intensity, affinity)); } + inline void reserve(size_t capacity) { m_data.reserve(capacity); } + + rapidjson::Value toJSON(rapidjson::Document &doc) const; + +private: + std::vector m_data; +}; + + +} /* namespace xmrig */ + + +#endif /* XMRIG_CPUTHREADS_H */ diff --git a/src/backend/cpu/cpu.cmake b/src/backend/cpu/cpu.cmake index 2ae73db7..b6c8915b 100644 --- a/src/backend/cpu/cpu.cmake +++ b/src/backend/cpu/cpu.cmake @@ -4,6 +4,7 @@ set(HEADERS_BACKEND_CPU src/backend/cpu/CpuConfig.h src/backend/cpu/CpuLaunchData.cpp src/backend/cpu/CpuThread.h + src/backend/cpu/CpuThreads.h src/backend/cpu/CpuWorker.h src/backend/cpu/interfaces/ICpuInfo.h ) @@ -14,6 +15,7 @@ set(SOURCES_BACKEND_CPU src/backend/cpu/CpuConfig.cpp src/backend/cpu/CpuLaunchData.h src/backend/cpu/CpuThread.cpp + src/backend/cpu/CpuThreads.cpp src/backend/cpu/CpuWorker.cpp ) diff --git a/src/backend/cpu/interfaces/ICpuInfo.h b/src/backend/cpu/interfaces/ICpuInfo.h index daaa39c3..9bc3b11a 100644 --- a/src/backend/cpu/interfaces/ICpuInfo.h +++ b/src/backend/cpu/interfaces/ICpuInfo.h @@ -26,7 +26,7 @@ #define XMRIG_CPUINFO_H -#include "backend/cpu/CpuThread.h" +#include "backend/cpu/CpuThreads.h" #include "crypto/common/Assembly.h" #include "crypto/common/Algorithm.h" diff --git a/src/backend/cpu/platform/BasicCpuInfo.cpp b/src/backend/cpu/platform/BasicCpuInfo.cpp index 2b63edba..49d4b005 100644 --- a/src/backend/cpu/platform/BasicCpuInfo.cpp +++ b/src/backend/cpu/platform/BasicCpuInfo.cpp @@ -182,23 +182,25 @@ const char *xmrig::BasicCpuInfo::backend() const xmrig::CpuThreads xmrig::BasicCpuInfo::threads(const Algorithm &algorithm) const { - if (threads() == 1) { - return CpuThreads(1); + const size_t count = std::thread::hardware_concurrency(); + + if (count == 1) { + return 1; } # ifdef XMRIG_ALGO_CN_GPU if (algorithm == Algorithm::CN_GPU) { - return CpuThreads(threads()); + return count; } # endif if (algorithm.family() == Algorithm::CN_LITE || algorithm.family() == Algorithm::CN_PICO) { - return CpuThreads(threads()); + return count; } if (algorithm.family() == Algorithm::CN_HEAVY) { - return CpuThreads(std::max(threads() / 4, 1)); + return std::max(count / 4, 1); } - return CpuThreads(std::max(threads() / 2, 1)); + return std::max(count / 2, 1); } diff --git a/src/backend/cpu/platform/HwlocCpuInfo.cpp b/src/backend/cpu/platform/HwlocCpuInfo.cpp index b2aa47d7..8a9c75d3 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.cpp +++ b/src/backend/cpu/platform/HwlocCpuInfo.cpp @@ -222,7 +222,7 @@ xmrig::CpuThreads xmrig::HwlocCpuInfo::threads(const Algorithm &algorithm) const processTopLevelCache(cache, algorithm, threads); } - if (threads.empty()) { + if (threads.isEmpty()) { LOG_WARN("hwloc auto configuration for algorithm \"%s\" failed.", algorithm.shortName()); return BasicCpuInfo::threads(algorithm); @@ -286,7 +286,7 @@ void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorith for (hwloc_obj_t core : cores) { const std::vector units = findByType(core, HWLOC_OBJ_PU); for (hwloc_obj_t pu : units) { - threads.push_back(CpuThread(1, pu->os_index)); + threads.add(pu->os_index); } } @@ -307,7 +307,7 @@ void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorith PUs--; allocated_pu = true; - threads.push_back(CpuThread(1, units[pu_id]->os_index)); + threads.add(units[pu_id]->os_index); if (cacheHashes == 0) { break; From 42dc914eecb8c4b17efda3a6203bb06d723bf767 Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 7 Aug 2019 18:12:39 +0700 Subject: [PATCH 134/172] Added alternative object format for CPU threads. --- doc/CPU.md | 6 ++ src/backend/common/Threads.cpp | 2 +- src/backend/cpu/CpuThreads.cpp | 90 +++++++++++++++++++++++++++-- src/backend/cpu/CpuThreads.h | 7 +++ src/core/config/ConfigTransform.cpp | 47 ++------------- 5 files changed, 106 insertions(+), 46 deletions(-) diff --git a/doc/CPU.md b/doc/CPU.md index f5d07745..15c3bf79 100644 --- a/doc/CPU.md +++ b/doc/CPU.md @@ -9,6 +9,7 @@ Example below demonstrate all primary ideas of flexible profiles configuration: * `"rx/wow"` Exact match to algorithm `rx/wow`, defined 4 threads without CPU affinity. * `"cn"` Default failback profile for all `cn/*` algorithms, defined 2 threads with CPU affinity, another failback profiles is `cn-lite`, `cn-heavy` and `rx`. * `"cn-lite"` Default failback profile for all `cn-lite/*` algorithms, defined 2 double threads with CPU affinity. +* `"cn-pico"` Alternative short object format, since 2.99.5. * `"custom-profile"` Custom user defined profile. * `"*"` Failback profile for all unhandled by other profiles algorithms. * `"cn/r"` Exact match, alias to profile `custom-profile`. @@ -34,6 +35,11 @@ Example below demonstrate all primary ideas of flexible profiles configuration: "affinity": 2 } ], + "cn-pico": { + "intensity": 2, + "threads": 8, + "affinity": -1 + }, "custom-profile": [0, 2], "*": [-1], "cn/r": "custom-profile", diff --git a/src/backend/common/Threads.cpp b/src/backend/common/Threads.cpp index 323352d1..17fc2951 100644 --- a/src/backend/common/Threads.cpp +++ b/src/backend/common/Threads.cpp @@ -55,7 +55,7 @@ size_t xmrig::Threads::read(const rapidjson::Value &value) using namespace rapidjson; for (auto &member : value.GetObject()) { - if (member.value.IsArray()) { + if (member.value.IsArray() || member.value.IsObject()) { T threads(member.value); if (!threads.isEmpty()) { diff --git a/src/backend/cpu/CpuThreads.cpp b/src/backend/cpu/CpuThreads.cpp index f877c9d3..2e8b9e1f 100644 --- a/src/backend/cpu/CpuThreads.cpp +++ b/src/backend/cpu/CpuThreads.cpp @@ -23,10 +23,66 @@ */ +#include + + #include "backend/cpu/CpuThreads.h" +#include "base/io/json/Json.h" #include "rapidjson/document.h" +namespace xmrig { + + +static const char *kAffinity = "affinity"; +static const char *kIntensity = "intensity"; +static const char *kThreads = "threads"; + + +static inline int64_t getAffinityMask(const rapidjson::Value &value) +{ + if (value.IsInt64()) { + return value.GetInt64(); + } + + if (value.IsString()) { + const char *arg = value.GetString(); + const char *p = strstr(arg, "0x"); + + return p ? strtoll(p, nullptr, 16) : strtoll(arg, nullptr, 10); + } + + return -1L; +} + + +static inline int64_t getAffinity(uint64_t index, int64_t affinity) +{ + if (affinity == -1L) { + return -1L; + } + + size_t idx = 0; + + for (size_t i = 0; i < 64; i++) { + if (!(static_cast(affinity) & (1ULL << i))) { + continue; + } + + if (idx == index) { + return static_cast(i); + } + + idx++; + } + + return -1L; +} + + +} + + xmrig::CpuThreads::CpuThreads(const rapidjson::Value &value) { if (value.IsArray()) { @@ -37,6 +93,20 @@ xmrig::CpuThreads::CpuThreads(const rapidjson::Value &value) } } } + else if (value.IsObject()) { + int intensity = Json::getInt(value, kIntensity, 1); + const size_t threads = std::min(Json::getUint(value, kThreads), 1024); + m_affinity = getAffinityMask(Json::getValue(value, kAffinity)); + m_format = ObjectFormat; + + if (intensity < 1 || intensity > 5) { + intensity = 1; + } + + for (size_t i = 0; i < threads; ++i) { + add(getAffinity(i, m_affinity), intensity); + } + } } @@ -45,10 +115,22 @@ rapidjson::Value xmrig::CpuThreads::toJSON(rapidjson::Document &doc) const using namespace rapidjson; auto &allocator = doc.GetAllocator(); - Value array(kArrayType); - for (const CpuThread &thread : m_data) { - array.PushBack(thread.toJSON(doc), allocator); + Value out; + + if (m_format == ArrayFormat) { + out.SetArray(); + + for (const CpuThread &thread : m_data) { + out.PushBack(thread.toJSON(doc), allocator); + } + } + else { + out.SetObject(); + + out.AddMember(StringRef(kIntensity), m_data.empty() ? 1 : m_data.front().intensity(), allocator); + out.AddMember(StringRef(kThreads), static_cast(m_data.size()), allocator); + out.AddMember(StringRef(kAffinity), m_affinity, allocator); } - return array; + return out; } diff --git a/src/backend/cpu/CpuThreads.h b/src/backend/cpu/CpuThreads.h index 3951c202..9d03e78b 100644 --- a/src/backend/cpu/CpuThreads.h +++ b/src/backend/cpu/CpuThreads.h @@ -53,6 +53,13 @@ public: rapidjson::Value toJSON(rapidjson::Document &doc) const; private: + enum Format { + ArrayFormat, + ObjectFormat + }; + + Format m_format = ArrayFormat; + int64_t m_affinity = -1; std::vector m_data; }; diff --git a/src/core/config/ConfigTransform.cpp b/src/core/config/ConfigTransform.cpp index ed315fb4..622855af 100644 --- a/src/core/config/ConfigTransform.cpp +++ b/src/core/config/ConfigTransform.cpp @@ -36,6 +36,7 @@ static const char *kAffinity = "affinity"; static const char *kAsterisk = "*"; static const char *kCpu = "cpu"; static const char *kIntensity = "intensity"; +static const char *kThreads = "threads"; #ifdef XMRIG_ALGO_RANDOMX static const char *kRandomX = "randomx"; @@ -79,30 +80,6 @@ static inline bool isHwAes(uint64_t av) } -static inline int64_t affinity(uint64_t index, int64_t affinity) -{ - if (affinity == -1L) { - return -1L; - } - - size_t idx = 0; - - for (size_t i = 0; i < 64; i++) { - if (!(static_cast(affinity) & (1ULL << i))) { - continue; - } - - if (idx == index) { - return static_cast(i); - } - - idx++; - } - - return -1L; -} - - } @@ -123,24 +100,12 @@ void xmrig::ConfigTransform::finalize(rapidjson::Document &doc) doc.AddMember(StringRef(kCpu), Value(kObjectType), allocator); } - Value threads(kArrayType); + Value profile(kObjectType); + profile.AddMember(StringRef(kIntensity), m_intensity, allocator); + profile.AddMember(StringRef(kThreads), m_threads, allocator); + profile.AddMember(StringRef(kAffinity), m_affinity, allocator); - if (m_intensity > 1) { - for (uint64_t i = 0; i < m_threads; ++i) { - Value thread(kObjectType); - thread.AddMember(StringRef(kIntensity), m_intensity, allocator); - thread.AddMember(StringRef(kAffinity), affinity(i, m_affinity), allocator); - - threads.PushBack(thread, doc.GetAllocator()); - } - } - else { - for (uint64_t i = 0; i < m_threads; ++i) { - threads.PushBack(affinity(i, m_affinity), doc.GetAllocator()); - } - } - - doc[kCpu].AddMember(StringRef(kAsterisk), threads, doc.GetAllocator()); + doc[kCpu].AddMember(StringRef(kAsterisk), profile, doc.GetAllocator()); } } From fd9039928b1fb144eb2f0d7b4a6fb93d6ba97685 Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 7 Aug 2019 18:51:32 +0700 Subject: [PATCH 135/172] Added maxIntensity method to Algorithm. --- src/backend/common/Workers.cpp | 28 ++++------------------------ src/crypto/common/Algorithm.cpp | 18 ++++++++++++++++++ src/crypto/common/Algorithm.h | 1 + 3 files changed, 23 insertions(+), 24 deletions(-) diff --git a/src/backend/common/Workers.cpp b/src/backend/common/Workers.cpp index 6a369c1b..036bc8b6 100644 --- a/src/backend/common/Workers.cpp +++ b/src/backend/common/Workers.cpp @@ -163,36 +163,16 @@ void xmrig::Workers::onReady(void *arg) namespace xmrig { -#if defined (XMRIG_ALGO_RANDOMX) || defined (XMRIG_ALGO_CN_GPU) -static void printIntensityWarning(Thread *handle) -{ - LOG_WARN("CPU thread %zu warning: \"intensity %d not supported for %s algorithm\".", handle->index(), handle->config().intensity, handle->config().algorithm.shortName()); -} -#endif - - template<> xmrig::IWorker *xmrig::Workers::create(Thread *handle) { const int intensity = handle->config().intensity; -# if defined (XMRIG_ALGO_RANDOMX) || defined (XMRIG_ALGO_CN_GPU) - if (intensity > 1) { -# ifdef XMRIG_ALGO_RANDOMX - if (handle->config().algorithm.family() == Algorithm::RANDOM_X) { - printIntensityWarning(handle); +# if defined(XMRIG_ALGO_RANDOMX) || defined(XMRIG_ALGO_CN_GPU) + if (intensity > handle->config().algorithm.maxIntensity()) { + LOG_WARN("CPU thread %zu warning: \"intensity %d not supported for %s algorithm\".", handle->index(), handle->config().intensity, handle->config().algorithm.shortName()); - return new CpuWorker<1>(handle->index(), handle->config()); - } -# endif - -# ifdef XMRIG_ALGO_CN_GPU - if (handle->config().algorithm == Algorithm::CN_GPU) { - printIntensityWarning(handle); - - return new CpuWorker<1>(handle->index(), handle->config()); - } -# endif + return new CpuWorker<1>(handle->index(), handle->config()); } # endif diff --git a/src/crypto/common/Algorithm.cpp b/src/crypto/common/Algorithm.cpp index c7990052..f0670b26 100644 --- a/src/crypto/common/Algorithm.cpp +++ b/src/crypto/common/Algorithm.cpp @@ -120,6 +120,24 @@ static AlgoName const algorithm_names[] = { } /* namespace xmrig */ +int xmrig::Algorithm::maxIntensity() const +{ +# ifdef XMRIG_ALGO_RANDOMX + if (family() == RANDOM_X) { + return 1; + } +# endif + +# ifdef XMRIG_ALGO_CN_GPU + if (m_id == CN_GPU) { + return 1; + } +# endif + + return 5; +} + + rapidjson::Value xmrig::Algorithm::toJSON() const { using namespace rapidjson; diff --git a/src/crypto/common/Algorithm.h b/src/crypto/common/Algorithm.h index b30a946b..0b817bde 100644 --- a/src/crypto/common/Algorithm.h +++ b/src/crypto/common/Algorithm.h @@ -102,6 +102,7 @@ public: inline bool operator==(const Algorithm &other) const { return isEqual(other); } inline operator Algorithm::Id() const { return m_id; } + int maxIntensity() const; rapidjson::Value toJSON() const; size_t memory() const; From 97192f224d79a301c68b3b03818f502acd4cb1fd Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 7 Aug 2019 21:47:42 +0700 Subject: [PATCH 136/172] Changed CPU threads format. --- doc/CPU.md | 50 +++++++++++++++++------ src/backend/cpu/CpuConfig.cpp | 17 ++++---- src/backend/cpu/CpuThread.cpp | 39 ++++++------------ src/backend/cpu/CpuThread.h | 7 ++-- src/backend/cpu/CpuThreads.h | 2 +- src/backend/cpu/platform/HwlocCpuInfo.cpp | 5 ++- 6 files changed, 68 insertions(+), 52 deletions(-) diff --git a/doc/CPU.md b/doc/CPU.md index 15c3bf79..756d3a0c 100644 --- a/doc/CPU.md +++ b/doc/CPU.md @@ -1,5 +1,7 @@ # CPU backend +**Information in this document actual to version 2.99.5+** + All CPU related settings contains in one `cpu` object in config file, CPU backend allow specify multiple profiles and allow switch between them without restrictions by pool request or config change. Default auto-configuration create reasonable minimum of profiles which cover all supported algorithms. ### Example @@ -9,7 +11,7 @@ Example below demonstrate all primary ideas of flexible profiles configuration: * `"rx/wow"` Exact match to algorithm `rx/wow`, defined 4 threads without CPU affinity. * `"cn"` Default failback profile for all `cn/*` algorithms, defined 2 threads with CPU affinity, another failback profiles is `cn-lite`, `cn-heavy` and `rx`. * `"cn-lite"` Default failback profile for all `cn-lite/*` algorithms, defined 2 double threads with CPU affinity. -* `"cn-pico"` Alternative short object format, since 2.99.5. +* `"cn-pico"` Alternative short object format. * `"custom-profile"` Custom user defined profile. * `"*"` Failback profile for all unhandled by other profiles algorithms. * `"cn/r"` Exact match, alias to profile `custom-profile`. @@ -24,16 +26,13 @@ Example below demonstrate all primary ideas of flexible profiles configuration: "priority": null, "asm": true, "rx/wow": [-1, -1, -1, -1], - "cn": [0, 2], + "cn": [ + [1, 0], + [1, 2] + ], "cn-lite": [ - { - "intensity": 2, - "affinity": 0 - }, - { - "intensity": 2, - "affinity": 2 - } + [2, 0], + [2, 2] ], "cn-pico": { "intensity": 2, @@ -48,8 +47,35 @@ Example below demonstrate all primary ideas of flexible profiles configuration: } ``` -### Intensity -This option was known as `low_power_mode`, possible values is range from 1 to 5, for convinient if value 1 used, possible omit this option and specify CPU thread config by only one number: CPU affinity, instead of object. +## Threads definition +Threads can be defined in 3 formats. + +#### Array format +```json +[ + [1, 0], + [1, 2], + [1, -1], + [2, -1] +] +``` +Each line represent one thread, first element is intensity, this option was known as `low_power_mode`, possible values is range from 1 to 5, second element is CPU affinity, special value `-1` means no affinity. + +#### Short array format +```json +[-1, -1, -1, -1] +``` +Each number represent one thread and means CPU affinity, this is default format for algorithm with maximum intensity 1, currently it all RandomX variants and cryptonight-gpu. + +#### Short object format +```json +{ + "intensity": 2, + "threads": 8, + "affinity": -1 +} +``` +Internal format, but can be user defined. ## Shared options diff --git a/src/backend/cpu/CpuConfig.cpp b/src/backend/cpu/CpuConfig.cpp index 883ef506..5905b7b4 100644 --- a/src/backend/cpu/CpuConfig.cpp +++ b/src/backend/cpu/CpuConfig.cpp @@ -147,31 +147,32 @@ void xmrig::CpuConfig::read(const rapidjson::Value &value) void xmrig::CpuConfig::generate() { - m_shouldSave = true; + m_shouldSave = true; + ICpuInfo *cpu = Cpu::info(); m_threads.disable(Algorithm::CN_0); - m_threads.move(kCn, Cpu::info()->threads(Algorithm::CN_0)); + m_threads.move(kCn, cpu->threads(Algorithm::CN_0)); # ifdef XMRIG_ALGO_CN_GPU - m_threads.move(kCnGPU, Cpu::info()->threads(Algorithm::CN_GPU)); + m_threads.move(kCnGPU, cpu->threads(Algorithm::CN_GPU)); # endif # ifdef XMRIG_ALGO_CN_LITE m_threads.disable(Algorithm::CN_LITE_0); - m_threads.move(kCnLite, Cpu::info()->threads(Algorithm::CN_LITE_1)); + m_threads.move(kCnLite, cpu->threads(Algorithm::CN_LITE_1)); # endif # ifdef XMRIG_ALGO_CN_HEAVY - m_threads.move(kCnHeavy, Cpu::info()->threads(Algorithm::CN_HEAVY_0)); + m_threads.move(kCnHeavy, cpu->threads(Algorithm::CN_HEAVY_0)); # endif # ifdef XMRIG_ALGO_CN_PICO - m_threads.move(kCnPico, Cpu::info()->threads(Algorithm::CN_PICO_0)); + m_threads.move(kCnPico, cpu->threads(Algorithm::CN_PICO_0)); # endif # ifdef XMRIG_ALGO_RANDOMX - m_threads.move(kRx, Cpu::info()->threads(Algorithm::RX_0)); - m_threads.move(kRxWOW, Cpu::info()->threads(Algorithm::RX_WOW)); + m_threads.move(kRx, cpu->threads(Algorithm::RX_0)); + m_threads.move(kRxWOW, cpu->threads(Algorithm::RX_WOW)); # endif } diff --git a/src/backend/cpu/CpuThread.cpp b/src/backend/cpu/CpuThread.cpp index e7132cfa..7d7a9e85 100644 --- a/src/backend/cpu/CpuThread.cpp +++ b/src/backend/cpu/CpuThread.cpp @@ -28,25 +28,14 @@ #include "rapidjson/document.h" -namespace xmrig { - - -static const char *kAffinity = "affinity"; -static const char *kIntensity = "intensity"; - - -} - - - xmrig::CpuThread::CpuThread(const rapidjson::Value &value) { - if (value.IsObject()) { - m_intensity = Json::getInt(value, kIntensity, -1); - m_affinity = Json::getInt(value, kAffinity, -1); + if (value.IsArray() && value.Size() >= 2) { + m_intensity = value[0].GetInt(); + m_affinity = value[1].GetInt(); } else if (value.IsInt()) { - m_intensity = 1; + m_intensity = -1; m_affinity = value.GetInt(); } } @@ -55,17 +44,15 @@ xmrig::CpuThread::CpuThread(const rapidjson::Value &value) rapidjson::Value xmrig::CpuThread::toJSON(rapidjson::Document &doc) const { using namespace rapidjson; - - if (intensity() > 1) { - auto &allocator = doc.GetAllocator(); - - Value obj(kObjectType); - - obj.AddMember(StringRef(kIntensity), m_intensity, allocator); - obj.AddMember(StringRef(kAffinity), m_affinity, allocator); - - return obj; + if (m_intensity == -1) { + return Value(m_affinity); } - return Value(m_affinity); + auto &allocator = doc.GetAllocator(); + + Value out(kArrayType); + out.PushBack(m_intensity, allocator); + out.PushBack(m_affinity, allocator); + + return out; } diff --git a/src/backend/cpu/CpuThread.h b/src/backend/cpu/CpuThread.h index 7c7ce4be..691ee114 100644 --- a/src/backend/cpu/CpuThread.h +++ b/src/backend/cpu/CpuThread.h @@ -35,13 +35,14 @@ namespace xmrig { class CpuThread { public: - inline constexpr CpuThread(int intensity = 1, int64_t affinity = -1) : m_intensity(intensity), m_affinity(affinity) {} + inline constexpr CpuThread() {} + inline constexpr CpuThread(int64_t affinity, int intensity) : m_intensity(intensity), m_affinity(affinity) {} CpuThread(const rapidjson::Value &value); inline bool isEqual(const CpuThread &other) const { return other.m_affinity == m_affinity && other.m_intensity == m_intensity; } - inline bool isValid() const { return m_intensity >= 1 && m_intensity <= 5; } - inline int intensity() const { return m_intensity; } + inline bool isValid() const { return m_intensity == -1 || (m_intensity >= 1 && m_intensity <= 5); } + inline int intensity() const { return m_intensity == -1 ? 1 : m_intensity; } inline int64_t affinity() const { return m_affinity; } inline bool operator!=(const CpuThread &other) const { return !isEqual(other); } diff --git a/src/backend/cpu/CpuThreads.h b/src/backend/cpu/CpuThreads.h index 9d03e78b..461cc1f2 100644 --- a/src/backend/cpu/CpuThreads.h +++ b/src/backend/cpu/CpuThreads.h @@ -47,7 +47,7 @@ public: inline const std::vector &data() const { return m_data; } inline size_t count() const { return m_data.size(); } inline void add(CpuThread &&thread) { m_data.push_back(thread); } - inline void add(int64_t affinity, int intensity = 1) { add(CpuThread(intensity, affinity)); } + inline void add(int64_t affinity, int intensity) { add(CpuThread(affinity, intensity)); } inline void reserve(size_t capacity) { m_data.reserve(capacity); } rapidjson::Value toJSON(rapidjson::Document &doc) const; diff --git a/src/backend/cpu/platform/HwlocCpuInfo.cpp b/src/backend/cpu/platform/HwlocCpuInfo.cpp index 8a9c75d3..c441457e 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.cpp +++ b/src/backend/cpu/platform/HwlocCpuInfo.cpp @@ -250,6 +250,7 @@ void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorith int L2_associativity = 0; size_t extra = 0; const size_t scratchpad = algorithm.memory(); + int intensity = algorithm.maxIntensity() == 1 ? -1 : 1; if (cache->attr->cache.depth == 3 && isCacheExclusive(cache)) { for (size_t i = 0; i < cache->arity; ++i) { @@ -286,7 +287,7 @@ void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorith for (hwloc_obj_t core : cores) { const std::vector units = findByType(core, HWLOC_OBJ_PU); for (hwloc_obj_t pu : units) { - threads.add(pu->os_index); + threads.add(pu->os_index, intensity); } } @@ -307,7 +308,7 @@ void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorith PUs--; allocated_pu = true; - threads.add(units[pu_id]->os_index); + threads.add(units[pu_id]->os_index, intensity); if (cacheHashes == 0) { break; From 9a842a593b2d7a0148a6bdfb77e584c3af2064b4 Mon Sep 17 00:00:00 2001 From: XMRig Date: Thu, 8 Aug 2019 00:21:01 +0700 Subject: [PATCH 137/172] Use intensity=2 for cn-pico. --- .../AMD_Ryzen_7_3700X_windows_2_0_4.xml | 104 ++++++++++++++++++ src/backend/cpu/platform/HwlocCpuInfo.cpp | 6 + 2 files changed, 110 insertions(+) create mode 100644 doc/topology/AMD_Ryzen_7_3700X_windows_2_0_4.xml diff --git a/doc/topology/AMD_Ryzen_7_3700X_windows_2_0_4.xml b/doc/topology/AMD_Ryzen_7_3700X_windows_2_0_4.xml new file mode 100644 index 00000000..1f2d0ee4 --- /dev/null +++ b/doc/topology/AMD_Ryzen_7_3700X_windows_2_0_4.xml @@ -0,0 +1,104 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/backend/cpu/platform/HwlocCpuInfo.cpp b/src/backend/cpu/platform/HwlocCpuInfo.cpp index c441457e..d88b4207 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.cpp +++ b/src/backend/cpu/platform/HwlocCpuInfo.cpp @@ -277,6 +277,12 @@ void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorith size_t cacheHashes = ((L3 + extra) + (scratchpad / 2)) / scratchpad; +# ifdef XMRIG_ALGO_CN_PICO + if (algorithm == Algorithm::CN_PICO_0 && (cacheHashes / PUs) >= 2) { + intensity = 2; + } +# endif + # ifdef XMRIG_ALGO_CN_GPU if (algorithm == Algorithm::CN_GPU) { cacheHashes = PUs; From 5896b27bf3852177739fefe10e35c8d6a9ca68ac Mon Sep 17 00:00:00 2001 From: XMRig Date: Thu, 8 Aug 2019 14:03:52 +0700 Subject: [PATCH 138/172] Added L2 information to Algorithm. --- ...ntel_Xeon_Gold_6146_x2_UMA_linux_2_0_4.xml | 246 ++++++++++++++++++ src/backend/cpu/CpuBackend.cpp | 6 +- src/backend/cpu/CpuLaunchData.cpp | 2 +- src/backend/cpu/CpuWorker.cpp | 6 +- src/backend/cpu/platform/HwlocCpuInfo.cpp | 2 +- src/crypto/common/Algorithm.cpp | 22 +- src/crypto/common/Algorithm.h | 3 +- 7 files changed, 277 insertions(+), 10 deletions(-) create mode 100644 doc/topology/Intel_Xeon_Gold_6146_x2_UMA_linux_2_0_4.xml diff --git a/doc/topology/Intel_Xeon_Gold_6146_x2_UMA_linux_2_0_4.xml b/doc/topology/Intel_Xeon_Gold_6146_x2_UMA_linux_2_0_4.xml new file mode 100644 index 00000000..fe94194c --- /dev/null +++ b/doc/topology/Intel_Xeon_Gold_6146_x2_UMA_linux_2_0_4.xml @@ -0,0 +1,246 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp index 7f208c38..e03046dd 100644 --- a/src/backend/cpu/CpuBackend.cpp +++ b/src/backend/cpu/CpuBackend.cpp @@ -99,13 +99,13 @@ public: tag, profileName.data(), threads.size(), - algo.memory() / 1024 + algo.l3() / 1024 ); workers.stop(); status.reset(); - status.memory = algo.memory(); + status.memory = algo.l3(); status.threads = threads.size(); for (const CpuLaunchData &data : threads) { @@ -317,7 +317,7 @@ rapidjson::Value xmrig::CpuBackend::toJSON(rapidjson::Document &doc) const hugepages.PushBack(pages[1], allocator); out.AddMember("hugepages", hugepages, allocator); - out.AddMember("memory", static_cast(d_ptr->algo.isValid() ? (ways * d_ptr->algo.memory()) : 0), allocator); + out.AddMember("memory", static_cast(d_ptr->algo.isValid() ? (ways * d_ptr->algo.l3()) : 0), allocator); if (d_ptr->threads.empty() || !hashrate()) { return out; diff --git a/src/backend/cpu/CpuLaunchData.cpp b/src/backend/cpu/CpuLaunchData.cpp index 6fa458aa..a01f22a6 100644 --- a/src/backend/cpu/CpuLaunchData.cpp +++ b/src/backend/cpu/CpuLaunchData.cpp @@ -43,7 +43,7 @@ xmrig::CpuLaunchData::CpuLaunchData(const Miner *miner, const Algorithm &algorit bool xmrig::CpuLaunchData::isEqual(const CpuLaunchData &other) const { - return (algorithm.memory() == other.algorithm.memory() + return (algorithm.l3() == other.algorithm.l3() && assembly == other.assembly && hugePages == other.hugePages && hwAES == other.hwAES diff --git a/src/backend/cpu/CpuWorker.cpp b/src/backend/cpu/CpuWorker.cpp index 000d7061..14ef1797 100644 --- a/src/backend/cpu/CpuWorker.cpp +++ b/src/backend/cpu/CpuWorker.cpp @@ -62,7 +62,7 @@ xmrig::CpuWorker::CpuWorker(size_t index, const CpuLaunchData &data) : m_miner(data.miner), m_ctx() { - m_memory = new VirtualMemory(m_algorithm.memory() * N, data.hugePages); + m_memory = new VirtualMemory(m_algorithm.l3() * N, data.hugePages); } @@ -184,7 +184,7 @@ void xmrig::CpuWorker::start() const Job &job = m_job.currentJob(); - if (job.algorithm().memory() != m_algorithm.memory()) { + if (job.algorithm().l3() != m_algorithm.l3()) { break; } @@ -283,7 +283,7 @@ template void xmrig::CpuWorker::allocateCnCtx() { if (m_ctx[0] == nullptr) { - CnCtx::create(m_ctx, m_memory->scratchpad(), m_algorithm.memory(), N); + CnCtx::create(m_ctx, m_memory->scratchpad(), m_algorithm.l3(), N); } } diff --git a/src/backend/cpu/platform/HwlocCpuInfo.cpp b/src/backend/cpu/platform/HwlocCpuInfo.cpp index d88b4207..047c97e8 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.cpp +++ b/src/backend/cpu/platform/HwlocCpuInfo.cpp @@ -249,7 +249,7 @@ void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorith size_t L2 = 0; int L2_associativity = 0; size_t extra = 0; - const size_t scratchpad = algorithm.memory(); + const size_t scratchpad = algorithm.l3(); int intensity = algorithm.maxIntensity() == 1 ? -1 : 1; if (cache->attr->cache.depth == 3 && isCacheExclusive(cache)) { diff --git a/src/crypto/common/Algorithm.cpp b/src/crypto/common/Algorithm.cpp index f0670b26..30a7ad9c 100644 --- a/src/crypto/common/Algorithm.cpp +++ b/src/crypto/common/Algorithm.cpp @@ -146,7 +146,27 @@ rapidjson::Value xmrig::Algorithm::toJSON() const } -size_t xmrig::Algorithm::memory() const +size_t xmrig::Algorithm::l2() const +{ +# ifdef XMRIG_ALGO_RANDOMX + switch (m_id) { + case RX_0: + case RX_LOKI: + return 0x40000; + + case RX_WOW: + return 0x20000; + + default: + break; + } +# endif + + return 0; +} + + +size_t xmrig::Algorithm::l3() const { const Family f = family(); assert(f != UNKNOWN); diff --git a/src/crypto/common/Algorithm.h b/src/crypto/common/Algorithm.h index 0b817bde..92fcc61e 100644 --- a/src/crypto/common/Algorithm.h +++ b/src/crypto/common/Algorithm.h @@ -104,7 +104,8 @@ public: int maxIntensity() const; rapidjson::Value toJSON() const; - size_t memory() const; + size_t l2() const; + size_t l3() const; static Family family(Id id); static Id parse(const char *name); From b82181b9c59e9fd746eb5c4720b04b7abfad71a8 Mon Sep 17 00:00:00 2001 From: XMRig Date: Thu, 8 Aug 2019 21:03:29 +0700 Subject: [PATCH 139/172] #1103 Added L2 checks for RandomX auto configuration. --- src/backend/cpu/platform/HwlocCpuInfo.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/backend/cpu/platform/HwlocCpuInfo.cpp b/src/backend/cpu/platform/HwlocCpuInfo.cpp index 047c97e8..491305ec 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.cpp +++ b/src/backend/cpu/platform/HwlocCpuInfo.cpp @@ -289,6 +289,12 @@ void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorith } # endif +# ifdef XMRIG_ALGO_RANDOMX + if (extra == 0 && algorithm.l2() > 0) { + cacheHashes = std::min(std::max(L2 / algorithm.l2(), cores.size()), cacheHashes); + } +# endif + if (cacheHashes >= PUs) { for (hwloc_obj_t core : cores) { const std::vector units = findByType(core, HWLOC_OBJ_PU); From ce370bf7212b054955f0deb6352ed590fcf4a1cb Mon Sep 17 00:00:00 2001 From: xmrig Date: Thu, 8 Aug 2019 22:28:27 +0700 Subject: [PATCH 140/172] Update CHANGELOG.md --- CHANGELOG.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 87cd1d4a..f2e3b081 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,10 @@ # v2.99.5-beta - [#1066](https://github.com/xmrig/xmrig/issues/1066#issuecomment-518080529) Fixed crash and added error message if pool not ready for RandomX. - [#1092](https://github.com/xmrig/xmrig/issues/1092) Fixed crash if wrong CPU affinity used. -- JSON arrays in config and API now more compact, single line if possible. +- [#1103](https://github.com/xmrig/xmrig/issues/1103) Improved auto configuration for RandomX for CPUs where L2 cache is limiting factor. +- [#1105](https://github.com/xmrig/xmrig/issues/1105) Improved auto configuration for `cn-pico` algorithm. +- Added alternative short format for CPU threads. +- Changed format for CPU threads with intensity above 1. # v2.99.4-beta - [#1062](https://github.com/xmrig/xmrig/issues/1062) Fixed 32 bit support. **32 bit is slow and deprecated**. From 647cbef43cdc9da428cb8e942461f30ce7ea87ea Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 9 Aug 2019 01:00:21 +0700 Subject: [PATCH 141/172] uv_mutex replaced to std::mutex. --- src/backend/cpu/CpuBackend.cpp | 19 ++++------ src/crypto/common/Nonce.cpp | 14 +++----- src/net/JobResults.cpp | 65 +++++++++++++++------------------- 3 files changed, 39 insertions(+), 59 deletions(-) diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp index e03046dd..863fa075 100644 --- a/src/backend/cpu/CpuBackend.cpp +++ b/src/backend/cpu/CpuBackend.cpp @@ -23,7 +23,7 @@ */ -#include +#include #include "backend/common/Hashrate.h" @@ -83,13 +83,6 @@ public: inline CpuBackendPrivate(Controller *controller) : controller(controller) { - uv_mutex_init(&mutex); - } - - - inline ~CpuBackendPrivate() - { - uv_mutex_destroy(&mutex); } @@ -119,9 +112,9 @@ public: Algorithm algo; Controller *controller; LaunchStatus status; + std::mutex mutex; std::vector threads; String profileName; - uv_mutex_t mutex; Workers workers; }; @@ -233,7 +226,7 @@ void xmrig::CpuBackend::setJob(const Job &job) void xmrig::CpuBackend::start(IWorker *worker) { - uv_mutex_lock(&d_ptr->mutex); + d_ptr->mutex.lock(); const auto pages = worker->memory()->hugePages(); @@ -254,7 +247,7 @@ void xmrig::CpuBackend::start(IWorker *worker) ); } - uv_mutex_unlock(&d_ptr->mutex); + d_ptr->mutex.unlock(); worker->start(); } @@ -299,10 +292,10 @@ rapidjson::Value xmrig::CpuBackend::toJSON(rapidjson::Document &doc) const out.AddMember("asm", false, allocator); # endif - uv_mutex_lock(&d_ptr->mutex); + d_ptr->mutex.lock(); uint64_t pages[2] = { d_ptr->status.hugePages, d_ptr->status.pages }; const size_t ways = d_ptr->status.ways; - uv_mutex_unlock(&d_ptr->mutex); + d_ptr->mutex.unlock(); # ifdef XMRIG_ALGO_RANDOMX if (d_ptr->algo.family() == Algorithm::RANDOM_X) { diff --git a/src/crypto/common/Nonce.cpp b/src/crypto/common/Nonce.cpp index 45c7001a..151819e0 100644 --- a/src/crypto/common/Nonce.cpp +++ b/src/crypto/common/Nonce.cpp @@ -23,7 +23,7 @@ */ -#include +#include #include "crypto/common/Nonce.h" @@ -37,7 +37,7 @@ std::atomic Nonce::m_sequence[Nonce::MAX]; uint32_t Nonce::m_nonces[2] = { 0, 0 }; -static uv_mutex_t mutex; +static std::mutex mutex; static Nonce nonce; @@ -51,8 +51,6 @@ xmrig::Nonce::Nonce() for (int i = 0; i < MAX; ++i) { m_sequence[i] = 1; } - - uv_mutex_init(&mutex); } @@ -60,7 +58,7 @@ uint32_t xmrig::Nonce::next(uint8_t index, uint32_t nonce, uint32_t reserveCount { uint32_t next; - uv_mutex_lock(&mutex); + std::lock_guard lock(mutex); if (nicehash) { next = (nonce & 0xFF000000) | m_nonces[index]; @@ -71,20 +69,16 @@ uint32_t xmrig::Nonce::next(uint8_t index, uint32_t nonce, uint32_t reserveCount m_nonces[index] += reserveCount; - uv_mutex_unlock(&mutex); - return next; } void xmrig::Nonce::reset(uint8_t index) { - uv_mutex_lock(&mutex); + std::lock_guard lock(mutex); m_nonces[index] = 0; touch(); - - uv_mutex_unlock(&mutex); } diff --git a/src/net/JobResults.cpp b/src/net/JobResults.cpp index bf0b5e86..40c2e50b 100644 --- a/src/net/JobResults.cpp +++ b/src/net/JobResults.cpp @@ -25,6 +25,7 @@ #include #include +#include #include @@ -40,75 +41,65 @@ namespace xmrig { class JobResultsPrivate { public: - inline JobResultsPrivate() + inline JobResultsPrivate(IJobResultListener *listener) : + listener(listener) { - uv_mutex_init(&m_mutex); + async = new uv_async_t; + async->data = this; - m_async = new uv_async_t; - m_async->data = this; - - uv_async_init(uv_default_loop(), m_async, JobResultsPrivate::onResult); + uv_async_init(uv_default_loop(), async, JobResultsPrivate::onResult); } inline ~JobResultsPrivate() { - Handle::close(m_async); - - uv_mutex_destroy(&m_mutex); - } - - - void setListener(IJobResultListener *listener) - { - m_listener = listener; + Handle::close(async); } void submit(const JobResult &result) { - uv_mutex_lock(&m_mutex); - m_queue.push_back(result); - uv_mutex_unlock(&m_mutex); + mutex.lock(); + queue.push_back(result); + mutex.unlock(); - uv_async_send(m_async); + uv_async_send(async); } private: - static void onResult(uv_async_t *handle) - { - static_cast(handle->data)->submit(); - } + static void onResult(uv_async_t *handle) { static_cast(handle->data)->submit(); } inline void submit() { std::list results; - uv_mutex_lock(&m_mutex); - while (!m_queue.empty()) { - results.push_back(std::move(m_queue.front())); - m_queue.pop_front(); + mutex.lock(); + + while (!queue.empty()) { + results.push_back(std::move(queue.front())); + queue.pop_front(); } - uv_mutex_unlock(&m_mutex); + + mutex.unlock(); for (auto result : results) { - m_listener->onJobResult(result); + listener->onJobResult(result); } results.clear(); } - IJobResultListener *m_listener = nullptr; - std::list m_queue; - uv_async_t *m_async; - uv_mutex_t m_mutex; + IJobResultListener *listener; + std::list queue; + std::mutex mutex; + uv_async_t *async; }; -static JobResultsPrivate *handler = new JobResultsPrivate(); +static JobResultsPrivate *handler = nullptr; } // namespace xmrig @@ -117,14 +108,16 @@ static JobResultsPrivate *handler = new JobResultsPrivate(); void xmrig::JobResults::setListener(IJobResultListener *listener) { - assert(handler != nullptr && listener != nullptr); + assert(handler == nullptr); - handler->setListener(listener); + handler = new JobResultsPrivate(listener); } void xmrig::JobResults::stop() { + assert(handler != nullptr); + delete handler; handler = nullptr; From 7a6790d0f6ae62a6e18e6ce637d8fd0bd7bf04cb Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 9 Aug 2019 01:50:31 +0700 Subject: [PATCH 142/172] #1106 Fixed hugepages field in API. --- src/backend/cpu/CpuBackend.cpp | 46 +++++++++++++++++++++++----------- src/backend/cpu/CpuBackend.h | 5 ++++ src/core/Miner.cpp | 18 +++++++++++-- 3 files changed, 52 insertions(+), 17 deletions(-) diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp index 863fa075..24600270 100644 --- a/src/backend/cpu/CpuBackend.cpp +++ b/src/backend/cpu/CpuBackend.cpp @@ -109,6 +109,14 @@ public: } + size_t ways() + { + std::lock_guard lock(mutex); + + return status.ways; + } + + Algorithm algo; Controller *controller; LaunchStatus status; @@ -135,6 +143,25 @@ xmrig::CpuBackend::~CpuBackend() } +std::pair xmrig::CpuBackend::hugePages() const +{ + std::pair pages(0, 0); + +# ifdef XMRIG_ALGO_RANDOMX + if (d_ptr->algo.family() == Algorithm::RANDOM_X) { + pages = Rx::hugePages(); + } +# endif + + std::lock_guard lock(d_ptr->mutex); + + pages.first += d_ptr->status.hugePages; + pages.second += d_ptr->status.pages; + + return pages; +} + + bool xmrig::CpuBackend::isEnabled() const { return d_ptr->controller->config()->cpu().isEnabled(); @@ -292,25 +319,14 @@ rapidjson::Value xmrig::CpuBackend::toJSON(rapidjson::Document &doc) const out.AddMember("asm", false, allocator); # endif - d_ptr->mutex.lock(); - uint64_t pages[2] = { d_ptr->status.hugePages, d_ptr->status.pages }; - const size_t ways = d_ptr->status.ways; - d_ptr->mutex.unlock(); - -# ifdef XMRIG_ALGO_RANDOMX - if (d_ptr->algo.family() == Algorithm::RANDOM_X) { - const auto rxPages = Rx::hugePages(); - pages[0] += rxPages.first; - pages[1] += rxPages.second; - } -# endif + const auto pages = hugePages(); rapidjson::Value hugepages(rapidjson::kArrayType); - hugepages.PushBack(pages[0], allocator); - hugepages.PushBack(pages[1], allocator); + hugepages.PushBack(pages.first, allocator); + hugepages.PushBack(pages.second, allocator); out.AddMember("hugepages", hugepages, allocator); - out.AddMember("memory", static_cast(d_ptr->algo.isValid() ? (ways * d_ptr->algo.l3()) : 0), allocator); + out.AddMember("memory", static_cast(d_ptr->algo.isValid() ? (d_ptr->ways() * d_ptr->algo.l3()) : 0), allocator); if (d_ptr->threads.empty() || !hashrate()) { return out; diff --git a/src/backend/cpu/CpuBackend.h b/src/backend/cpu/CpuBackend.h index 9e71c247..bedef5ca 100644 --- a/src/backend/cpu/CpuBackend.h +++ b/src/backend/cpu/CpuBackend.h @@ -26,6 +26,9 @@ #define XMRIG_CPUBACKEND_H +#include + + #include "backend/common/interfaces/IBackend.h" @@ -43,6 +46,8 @@ public: CpuBackend(Controller *controller); ~CpuBackend() override; + std::pair hugePages() const; + protected: bool isEnabled() const override; bool isEnabled(const Algorithm &algorithm) const override; diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index a1c65ed2..4406ce52 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -145,10 +145,24 @@ public: reply.AddMember("ua", StringRef(Platform::userAgent()), allocator); reply.AddMember("cpu", Cpu::toJSON(doc), allocator); - if (version == 1) { - reply.AddMember("hugepages", false, allocator); + Value hugepages; + + if (!backends.empty() && backends.front()->type() == "cpu") { + const auto pages = static_cast(backends.front())->hugePages(); + if (version > 1) { + hugepages.SetArray(); + hugepages.PushBack(pages.first, allocator); + hugepages.PushBack(pages.second, allocator); + } + else { + hugepages = pages.first == pages.second; + } + } + else { + hugepages = false; } + reply.AddMember("hugepages", hugepages, allocator); reply.AddMember("donate_level", controller->config()->pools().donateLevel(), allocator); Value algo(kArrayType); From be251a2ec19f7aeb78c33a3fea28b72f621e9207 Mon Sep 17 00:00:00 2001 From: xmrig Date: Fri, 9 Aug 2019 02:09:20 +0700 Subject: [PATCH 143/172] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f2e3b081..f2ad4ed1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ - [#1092](https://github.com/xmrig/xmrig/issues/1092) Fixed crash if wrong CPU affinity used. - [#1103](https://github.com/xmrig/xmrig/issues/1103) Improved auto configuration for RandomX for CPUs where L2 cache is limiting factor. - [#1105](https://github.com/xmrig/xmrig/issues/1105) Improved auto configuration for `cn-pico` algorithm. +- [#1106](https://github.com/xmrig/xmrig/issues/1106) Fixed `hugepages` field in summary API. - Added alternative short format for CPU threads. - Changed format for CPU threads with intensity above 1. From 4583d979db793eabdaac399dd7fcae3f2cbd7c5a Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 9 Aug 2019 12:51:27 +0700 Subject: [PATCH 144/172] Fixed auto configuration without hwloc. --- src/backend/cpu/CpuThreads.cpp | 10 +++++++ src/backend/cpu/CpuThreads.h | 1 + src/backend/cpu/platform/AdvancedCpuInfo.cpp | 14 +++++++--- src/backend/cpu/platform/BasicCpuInfo.cpp | 28 +++++++++++++++++--- 4 files changed, 46 insertions(+), 7 deletions(-) diff --git a/src/backend/cpu/CpuThreads.cpp b/src/backend/cpu/CpuThreads.cpp index 2e8b9e1f..07e8ca33 100644 --- a/src/backend/cpu/CpuThreads.cpp +++ b/src/backend/cpu/CpuThreads.cpp @@ -110,6 +110,16 @@ xmrig::CpuThreads::CpuThreads(const rapidjson::Value &value) } +xmrig::CpuThreads::CpuThreads(size_t count, int intensity) +{ + m_data.reserve(count); + + for (size_t i = 0; i < count; ++i) { + add(-1, intensity); + } +} + + rapidjson::Value xmrig::CpuThreads::toJSON(rapidjson::Document &doc) const { using namespace rapidjson; diff --git a/src/backend/cpu/CpuThreads.h b/src/backend/cpu/CpuThreads.h index 461cc1f2..f8ad6430 100644 --- a/src/backend/cpu/CpuThreads.h +++ b/src/backend/cpu/CpuThreads.h @@ -42,6 +42,7 @@ public: inline CpuThreads(size_t count) : m_data(count) {} CpuThreads(const rapidjson::Value &value); + CpuThreads(size_t count, int intensity); inline bool isEmpty() const { return m_data.empty(); } inline const std::vector &data() const { return m_data; } diff --git a/src/backend/cpu/platform/AdvancedCpuInfo.cpp b/src/backend/cpu/platform/AdvancedCpuInfo.cpp index f8871d3c..de8ff272 100644 --- a/src/backend/cpu/platform/AdvancedCpuInfo.cpp +++ b/src/backend/cpu/platform/AdvancedCpuInfo.cpp @@ -112,7 +112,7 @@ xmrig::AdvancedCpuInfo::AdvancedCpuInfo() : xmrig::CpuThreads xmrig::AdvancedCpuInfo::threads(const Algorithm &algorithm) const { if (threads() == 1) { - return CpuThreads(1); + return 1; } # ifdef XMRIG_ALGO_CN_GPU @@ -132,7 +132,7 @@ xmrig::CpuThreads xmrig::AdvancedCpuInfo::threads(const Algorithm &algorithm) co } if (cache) { - const size_t memory = algorithm.memory(); + const size_t memory = algorithm.l3(); assert(memory > 0); count = cache / memory; @@ -145,5 +145,13 @@ xmrig::CpuThreads xmrig::AdvancedCpuInfo::threads(const Algorithm &algorithm) co count = threads() / 2; } - return CpuThreads(std::max(std::min(count, threads()), 1)); + int intensity = algorithm.maxIntensity() == 1 ? -1 : 1; + +# ifdef XMRIG_ALGO_CN_PICO + if (algorithm == Algorithm::CN_PICO_0 && (count / cores()) >= 2) { + intensity = 2; + } +# endif + + return CpuThreads(std::max(std::min(count, threads()), 1), intensity); } diff --git a/src/backend/cpu/platform/BasicCpuInfo.cpp b/src/backend/cpu/platform/BasicCpuInfo.cpp index 49d4b005..2f3177d2 100644 --- a/src/backend/cpu/platform/BasicCpuInfo.cpp +++ b/src/backend/cpu/platform/BasicCpuInfo.cpp @@ -194,13 +194,33 @@ xmrig::CpuThreads xmrig::BasicCpuInfo::threads(const Algorithm &algorithm) const } # endif - if (algorithm.family() == Algorithm::CN_LITE || algorithm.family() == Algorithm::CN_PICO) { - return count; +# ifdef XMRIG_ALGO_CN_LITE + if (algorithm.family() == Algorithm::CN_LITE) { + return CpuThreads(count, 1); } +# endif +# ifdef XMRIG_ALGO_CN_PICO + if (algorithm.family() == Algorithm::CN_PICO) { + return CpuThreads(count, 2); + } +# endif + +# ifdef XMRIG_ALGO_CN_HEAVY if (algorithm.family() == Algorithm::CN_HEAVY) { - return std::max(count / 4, 1); + return CpuThreads(std::max(count / 4, 1), 1); } +# endif - return std::max(count / 2, 1); +# ifdef XMRIG_ALGO_RANDOMX + if (algorithm.family() == Algorithm::RANDOM_X) { + if (algorithm == Algorithm::RX_WOW) { + return count; + } + + return std::max(count / 2, 1); + } +# endif + + return CpuThreads(std::max(count / 2, 1), 1); } From 5cb3ef90680e95fe96108d6a4d5e964b5ee87301 Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 9 Aug 2019 14:26:00 +0700 Subject: [PATCH 145/172] Name for reference RandomX configuration changed to rx/text to avoid potential conflicts in future. --- CHANGELOG.md | 1 + doc/ALGORITHMS.md | 1 + src/base/net/stratum/Job.cpp | 9 ++++++++- src/crypto/common/Algorithm.cpp | 2 ++ 4 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f2ad4ed1..b67934bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ - [#1106](https://github.com/xmrig/xmrig/issues/1106) Fixed `hugepages` field in summary API. - Added alternative short format for CPU threads. - Changed format for CPU threads with intensity above 1. +- Name for reference RandomX configuration changed to `rx/text` to avoid potential conflicts in future. # v2.99.4-beta - [#1062](https://github.com/xmrig/xmrig/issues/1062) Fixed 32 bit support. **32 bit is slow and deprecated**. diff --git a/doc/ALGORITHMS.md b/doc/ALGORITHMS.md index 07f92271..076eb2ff 100644 --- a/doc/ALGORITHMS.md +++ b/doc/ALGORITHMS.md @@ -44,6 +44,7 @@ Since version 3 mining [algorithm](#algorithm-names) should specified for each p | `cn-heavy/xhv` | 4 MB | CryptoNight-Heavy (modified, TUBE only). | | `cn-heavy/tube` | 4 MB | CryptoNight-Heavy (modified, Haven Protocol only). | | `cn-pico` | 256 KB | TurtleCoin (TRTL) | +| `rx/test` | 2 MB | RandomX (reference configuration). | | `rx/0` | 2 MB | RandomX (reference configuration), reserved for future use. | | `rx/wow` | 1 MB | RandomWOW (Wownero). | | `rx/loki` | 2 MB | RandomXL (Loki). | diff --git a/src/base/net/stratum/Job.cpp b/src/base/net/stratum/Job.cpp index 512b686e..04bd82d8 100644 --- a/src/base/net/stratum/Job.cpp +++ b/src/base/net/stratum/Job.cpp @@ -174,6 +174,13 @@ void xmrig::Job::copy(const Job &other) m_target = other.m_target; m_index = other.m_index; - memcpy(m_blob, other.m_blob, sizeof (m_blob)); + memcpy(m_blob, other.m_blob, sizeof(m_blob)); memcpy(m_seedHash, other.m_seedHash, sizeof(m_seedHash)); + +# ifdef XMRIG_PROXY_PROJECT + m_rawSeedHash = other.m_rawSeedHash; + + memcpy(m_rawBlob, other.m_rawBlob, sizeof(m_rawBlob)); + memcpy(m_rawTarget, other.m_rawTarget, sizeof(m_rawTarget)); +# endif } diff --git a/src/crypto/common/Algorithm.cpp b/src/crypto/common/Algorithm.cpp index 30a7ad9c..db6cb234 100644 --- a/src/crypto/common/Algorithm.cpp +++ b/src/crypto/common/Algorithm.cpp @@ -107,6 +107,8 @@ static AlgoName const algorithm_names[] = { { "cryptonight_turtle", "cn_turtle", Algorithm::CN_PICO_0 }, # endif # ifdef XMRIG_ALGO_RANDOMX + { "randomx/test", "rx/test", Algorithm::RX_0 }, + { "randomx/0", "rx/0", Algorithm::RX_0 }, { "randomx/0", "rx/0", Algorithm::RX_0 }, { "RandomX", "rx", Algorithm::RX_0 }, { "randomx/wow", "rx/wow", Algorithm::RX_WOW }, From feda02bf5021fde035d58ee062f15cb76e0dbc83 Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 9 Aug 2019 15:36:34 +0700 Subject: [PATCH 146/172] Disable HW AES for RandomX on ARM. --- src/crypto/rx/RxVm.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/crypto/rx/RxVm.cpp b/src/crypto/rx/RxVm.cpp index 6426443a..275f9558 100644 --- a/src/crypto/rx/RxVm.cpp +++ b/src/crypto/rx/RxVm.cpp @@ -33,9 +33,11 @@ xmrig::RxVm::RxVm(RxDataset *dataset, uint8_t *scratchpad, bool softAes) { +# ifndef XMRIG_ARM if (!softAes) { m_flags |= RANDOMX_FLAG_HARD_AES; } +# endif if (dataset->get()) { m_flags |= RANDOMX_FLAG_FULL_MEM; From 24c293e58ec6c2e5021114ee90f550413a0b68d7 Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 9 Aug 2019 15:46:10 +0700 Subject: [PATCH 147/172] v2.99.5-beta --- src/version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/version.h b/src/version.h index e2c62435..de875f2d 100644 --- a/src/version.h +++ b/src/version.h @@ -28,7 +28,7 @@ #define APP_ID "xmrig" #define APP_NAME "XMRig" #define APP_DESC "XMRig CPU miner" -#define APP_VERSION "2.99.5-evo" +#define APP_VERSION "2.99.5-beta" #define APP_DOMAIN "xmrig.com" #define APP_SITE "www.xmrig.com" #define APP_COPYRIGHT "Copyright (C) 2016-2019 xmrig.com" From 4b3409958674d01c68d75bcf3858fd7ae6463474 Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 9 Aug 2019 17:20:20 +0700 Subject: [PATCH 148/172] v2.99.6-evo --- src/version.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/version.h b/src/version.h index de875f2d..59577d2a 100644 --- a/src/version.h +++ b/src/version.h @@ -28,7 +28,7 @@ #define APP_ID "xmrig" #define APP_NAME "XMRig" #define APP_DESC "XMRig CPU miner" -#define APP_VERSION "2.99.5-beta" +#define APP_VERSION "2.99.6-evo" #define APP_DOMAIN "xmrig.com" #define APP_SITE "www.xmrig.com" #define APP_COPYRIGHT "Copyright (C) 2016-2019 xmrig.com" @@ -36,7 +36,7 @@ #define APP_VER_MAJOR 2 #define APP_VER_MINOR 99 -#define APP_VER_PATCH 5 +#define APP_VER_PATCH 6 #ifdef _MSC_VER # if (_MSC_VER >= 1920) From b43d8ca845929eba800e4fc9f78cea7534d25e5f Mon Sep 17 00:00:00 2001 From: XMRig Date: Fri, 9 Aug 2019 19:41:33 +0700 Subject: [PATCH 149/172] Added screenshot.png --- doc/screenshot.png | Bin 0 -> 33480 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 doc/screenshot.png diff --git a/doc/screenshot.png b/doc/screenshot.png new file mode 100644 index 0000000000000000000000000000000000000000..28e1e3a1c642c90527bd210829a0565c54082eef GIT binary patch literal 33480 zcmb@tcUY6l*EWhsFH%GSr3eIR0s<->La)*hP>P6vC=iMe5J*7jQlv;xN~9MBX#x@m zy%!OXPNYT%9TG}Na>CyGx8L_W-}&C_T<1FTPm+0_#qPBYO?i(^RtvnAyr!toZW1 zI#P%C&jmh~DlKYeDZ8(l8ol$B3X4zgJw_?kz1+{Pgd08P`L=hN%-;Uv1F_&TRBgUStL~u*PMWJb3gAv8Anh zn<4Z8OT6YC&!Y$fJcxfo?$N-Ny1^D#oK*LY>?@Iy{aQK2(o1;1@jM(+~f-!-kIj$hYy6~BVfH`~HI`x+Il+j68y9FjwrG0~@EcN%Y9ULpeMD=AESl*vPBHwt0?mjQ+*k|%pRR)07k*aj2b2Cof& zMJBNEpL@)WDB{{OCg3hSN?pO8VfKx#{<~TV|1zab<-J9^G_D5QT*n6zCPpJW5enzy z*p;PY8yzco_}mh7-D$d}{G(&1?zM%)`>^o#%GWiGkw3zKfnj)4Sg*R49}O5fhLPjI$-S8aFI$8^vvkj~EaeR$Hzk46 zeg2VNO7GV2Y9Y29W?@P^HR;iz2n*PFo}KHD7aN*(JnfDg;yCvgt#9_91m|9e1fFPy z<*~)Tq+y!9-dbW_dwrn-DDPv!=9jf9RF8`blzrO2n={^remve!tt6Z)XcFAruE$e3!kl4JPA|#)>ACkiSFLZB##iJpc`Q3aT{%whyc%HTC;Kt z!UO(ZQ9Jh_BUGs#G=a*(PwCzTBr9Fs00ab!(W=41)$`q%fZ{RQ0#A!IVVgWxMx;iW zC9$#L0aYbOno+6KdZ#EXr=O-P)99;syJtzEJqgC)0i2W2OMBJCp^Rahs+>gK6LeQK zT*%w8_1DBPD~19c%{4;NXRdoa=l=Pu$XD-|nA^uRZhqzGD}b^*?g282%Vb$Xd=}@j z1PNN30}Lri6o`lyB7SGYS+foodA+hB=SZ+|>j$R1&YV!yoZF0e-(LAk{>@9AFl4Ih(gtO@XqTdIqG?w4-g0=^O{fb^~mqaO{hbZ7@|_od(XT7Y5(HK z-+vRS_lC(omzYUV_jd~;2XbzXItN_XmIM+>+vm87ypT0y^wMh*?G3)Ax;^i@UDKwQT-(j;WDD{vsqolOS6P^+ zNp-k=`Vy~c6^9ttF5J0^3D)YD$stxE5Ic%FU(h-R1On=ySj7R1p4{QXopegileP>y zO=|4FDDsm@w?CS3Xs%JwR>nhQcD36(j-DpfI7=Qs7*Cdu>Rna7DB*SH7OVxf9Vtm4 z$Xjl#jhB$Hi2Ou!&+W;n%tA?`PKaeV-1hR@o&ok(_gZR+mA?=Wrm1@^974l;D%7NX z(*DM*>Vv}FpCu2=Bsubp>Dhd*KH`nCsj24_is7{V#6~OeEVuKHlI=Fb{)34JiSlg~ z6g94M#{36LO*l952nSg*s0^tDX0FEuJTwkm9yIH*HPnDe9 zZmr{!{y@S$MgQa+(wIe9Jpc!ug^hg%Ln*RwJUrk8Y6Ds{p^-<<>~#UyZ1F9QD0QJ% z@5yVD2OzM9@xLM=*_Vw{JbBnsXN;WRI@4Ea#@mq}afb{ZCAPu}Lyz@4XcRe_6Zm|} zCRt$9?Y}*!*n~<`FR7U(KXr_LZv}gL%|vRn0dFS^9+Y7%V!6egpW>bT6C0Y5muyf! zG=A_NLfBR!BGh2al&7*c9)#epKnSTi?|7V24YZPFnzHuML>nHqBIP&$cM{1z{Jb@Cu;4$t(3mUtlJfr1&eh2J7>2c#sDhjkI3RFw4%#+d zAK0?}izvOIym1Fh8W#ytcI7tB)1?S8XPbmJyw*8TkUT!w8NaugN;%VCND@Z=52E~w zE&mdkf4R;7icO*5js_*{HK}`yl%%h=*upIGino7WYDUGcHB>cK*IASix>^ptHO8c~ z4$d4i43w#8yv!dQ2~GES@@#ANw|pR2po9H#L2AQOi>(fi>o2r}_TT!K69&=99EYHM zeZBsJ%|^vEhh~IB&v#Ck43=}bGXhX}hRPK+o*-@^bBDE))!!$CI9)bfp370n`GPlI znSE)N=untIxa`x+O`7%rW9svy-JP5S2PP6$)A=bt~=L`2lG4cqtd~Et^O8u_ihFsC*E1Ps= zDh4nMcww4VIj|H9MO|-=rI4}zK8Wx2PR_|rs!x|oZ(&#H>|3rWh~X|==CvbU+DNP) zS)C58O>nZ;u;KL|l>;*vOH8E4q>LaSb_lmSQW9|T)$C<7{`2*#K5C+X8x3)}&|0N@ z0_c>WG+%F_8o=PwxMe#sb-FX@+tY;VD737|wQ3?3@UdpdO&)!ty=FWwkM~8L41EmV zEV+EwvW-zu?75|}IH**TRXCEvM0}#UI4xU9C1|X~0s7*8+_lw*x^+x-4(4E1rz>@3 zkBARU%eB#G&)tm2zv<6veb0W5m))c#QyC)Rt6v(h&LVwNVbDjVcbobb;TaGXoDH0I zv3;+kuGGsW(9KY=6)M#eDzEV;b5Zj4wrzoMii_?}k`q z4kwV3sUN%tyZx!Q&2^6H2rR`E`|c;L{1TwB;fBd@U9nZ7bzY75dK*c8l7xF-i6g|;Gq{A96)oHmZae&YF|xjD=DvTHBGJ@?7OQ=25yxX z7rj=#(2AlZLA`=@%EWlgDI>47P~?Z2U1}Qg)iFx_I|ckVO~`q!>T}MteP3B$_PEYr zB&V~u={)CUf;up=b7;MBx96o15psf2q#bMFTq!4C#XJE_c-y7H2Dnva<6ImrJrGV` ziv{*o5I?r4)-U@-&&0R5yk5yR`O&&}Lf*ArjA^_TMA$a+>HR8aa5&O@WYv7_n7~|~ zGfQVrCaZzqwpTntufHrZ$ulIS$K`w;UVvn5aLNlSh5=j zuBT^$nbx0fbm^ykjEG-|JfFP{`Kmz$R5_uhASpx#=B}<8(C%8@Jp#HSjQouc1bJV~ zh6-x%nHRASp>2lba%juv>^jwhAX!J2F+lm4lyaV6WJv#3E}GANrevZ0+e?X3ahjL> z$4P&-47d=YChdBE{4`-)t9SjzXW#C#WovxLPx72jm?NTI%fG#NTW5E33w@>gGXSze z@)*7p{f@OfdaY~J9hWX(o5M#2+@Ek0geac|WIDG1e21PK$-2v> zC+3RD3>Amtrw$wsJ<60UiSLjqR$u zyVs|Lh8FGyzM<9AU*8vNdf=c4ng8))e$I0?^FD2mK<97mR4}eS@B0=Bw8y|kjLvZmhycou#^2<;y^*DCk)R`|0s9EHRPk;aW_j z+o!D8Z)z8f2N$$qIy0_3c6hmg@mLhKd{Pv7*YMQ=Xfw=UY%P}7Qyj|=yCmX zTHB!WbkS3AfX%5>O2V@4|E(3+WXX`j9NtjqDEkSa)zcuLuiAt=*-ux0AiAYZ+F zI*ENSMdc-arEd6mN=s8_RGKN%FREFn&UW^xYZ$8s(V4C=F?8~#lY8AfpR_)u{-jwG zZE{&*OL-!+X(V`_lHwuZ_~p)DG%JjJoAXEcp1Iv!6Qw5pAWH2vhSM+0vdcl3cCXTS z=gCn2C-3_GUk`5}pEK51xz^Z96R>)WtysKkuzyp_x7ASet+2efOw< zb%U9!k-SSr1w@X=h5>m*WuUmPj!!nj%unT)GO!JuxY6i(%K-}7t z2!@`|kcQ3V84%4G;=5WwBKF$qGhznWu7eSlEfE(QyXzRf+<5PI_J-=tG9ZTMvhXi{ zic0KKiV#Ydai!t=e9*DxS05 z9)`lcvI+|KGG@U6H8b@PP%Z`SRT1#U$(C>Qm zJg|-~^*IsjFJN2oq@7uOW2)-IKUyTeS&^>kuN}X z{Ct4YFPBE73_okXZ0+?&ALb0aO*y+BL81h$Z~hLET5^Fb%JF4|4>02@H%z`Evf=DJ zPJsupn+r)PKu18Tt+GU1!D2|?6=A5;GpldFz~*Qv?JW9avm`T3&7ZS9f14>d7Feof zfI(|XJ}?*BkYo}5c-yf8xEKlUqSJd#y-FLFhxy1aC`j8DBNIvew>^SGlFzt1j!2<) zk|cjb2t#@f0Nf`@lZptwt*IpZt+-E;?0oByL?DtZ=Pfp^Af!jo4MqXpbH;Wl^Vun^ zyhSn#FM$lH66x5Cl7FczPF`Hyd*1es4Ioj}|8aW%<5Kwh=g)WM-?RQNKMI8X({sb*f#|5Y6Qwu&LBL?1F7Z?4MXuftcL2Pz7TtDER_se_9F9)*i&_hG?_4lxP4Oy-|{9hsJA31wHI%1+XeO6+xl3hyTR#>umMcn?EFLel)H>VAHa&>16aLm$ z3hqmyT4uW`QP0r19VeZ)F8m=F??*=&(i#)LPK>RsH$_wDvo;f&HcOBY=UA3sJWyFe zjZ)a`)WXa=am)VueccA~u9#(on50zTba;$KDKbPSVm7mr4@n0gR^JnByoR3SId{Y2lVLDxELecFtFUbXCbowpSgZEk7%5sV$b@ z&Qm@VTT>gQs`aoartJSvxqn%sDPvuY&ul|2tojIA`djR>aGFjy_=H^WwXoY$3&W8r3AQlHgvZnr02{qtX(gezs+WM22uwMByHcT z@_);q&_v)~)BzXO)->>2{ES7T1FS(_NQ*pT{e2ICF07qGMDU1N^4nT|1GzKWp?gw` z+6W(V$aFcGtq-3A)sD7$2;=$`^ZrjsvA}18sPKt@j0=4OG%bc-B7vpWYqtyhUpNHD zbmP;h(>ZVI&FTNPpRFg{j0u`ojJ&`jT+iC39J7>6%u;)d{gnK=!Ni~E(y|UsMsjCF zE$-PP>(+WhU;kt!CYsa_-*stzr2!B^E!3(@pz!*gt;WzvbA%zho0hm zU;L(y{7XbXfB!#F&wsTVvj0%ef6<$aCRJ8o{5!Zmah8$i{AWmFsR=C0e{=ZWnept6 z7^qbzcW-9Xxydu{gt^0zmzVc&Z%=Jt{CD6vFhE`dO9KqVD<6cJ zZ4o7;317I|F~du_;<`8@@AJddZNUYDEYIdS~C()LfxIIE^4Lv=kr(bT0-?63g<_pKn+A$41$gSjs z<2&LGqzMC8EaZmiR65+Y;PR-G`(x9%8g zd;)SNET?Biy7|M!8SfwX1URO}MDjCzfOV*S)}`}o7Q&!=Ay!*Ed^dFi9UQ(@vZiTG*9LC-rF<3?=c>GP_@h`znRUYxcoAge1TVdu!9(BLO6z*#Ss2TUfq4C*`MgUK;dFHMa_73Xm7`n01ND8 z7aptrapyA>bnU}cMKR>%gKw&vUnJt{BWsqH zNv`a7F@5U8&oxD#v4bWk8a5&7h#LGdec1j4`RXkNaK3`{@_cpNJ~%v}kxE5InL_~6 zkGVZ=6zCdwVT?R4&&ZEO?J9_frqo{{+#D`lAy6T?_ct#{&+5OQMddfkYb}>ZDGT$~ z-l8Ng%zS23WIo(aXmZJORtb6|_x5v(&fx)>+Fm(T6_tG`WxYC(J-ExyNUsqnKl4RXN7-+h??EGjMoOoZbF~iZVz88d|bn%J2$(Y5>U6q&JCz+G!<_JFto({PQ z!dv+eF@<~roKV`3y#lc>$xEO~ox)@4K|wH|qo%3tsQipit1_t$-%>>=*@?91;BQ|W&;O-0FCD?z?* zkGpS+iZm{)pVaC(JrTGgqKAJM&t+2{t<7(PC?(XX3ifoI7blRB@vsVC0hO*!h?#rp1*rZd5fEDdM~V8_navDZ<;L#nqUcmH9(ea{NIm4%>Knw3T1=J4g3p@ zwuXn#y5YYlX7`qE11C>6p*sC`0+|EzkDky9^Ccb_(x%p_jG^;7`MyG~eYONVQhN@_ zONxHgGlhD6i8_g2@^O(Z@enRV@&0OvEbyX zp!YT>FQ^4_ZlGYRf0Jh7EW$i$%+iO=Js#w)c-i!a&l+9>tXraD`;^@cMQ1~&;i64o zK@HdxbZkp>4ERLV0QN&T#_FS5nQq;;Fn9YA37#)O0W^2I3$N%G$Nvzxm0&QUj7pA9 z#TH0;yyWrdo;40M2~Yjj)-R`bk`-les9AQoQ`8BL!5$l6a{ZKtWl}bfUuas*zdu|it-8~7nS6VlDBjy^jkaB z4{dcb{=DCk^2XiqCae=;4tIoXbHRpen4S1NN0qnTGWEO%Hk0S#Lg-E=07{$I+tW+8 zx3rk`#-fqs-cl!7Hf1blf(7%EUu4Re8kLzX%qzJ-*DEdEygegleK?!Xm@XBL?ciZE z+8=r%C1LUtl{=x@MhGEdv?%r6aD~ePf+i~H({n`IeM=E5SrFx&tiLMMhh!B#WBXYb z5$v4^oOTV$*W1r3ut7M5-aPPxBm-^ODW=p@r#=`vUQ1OMNiMpu9q`$cHZ=}g?<=QS z1klo%O5}kmt*`q=pauBsIZ>chnLWQz>U>EvJZ>BPmOk|*PJUS+0IwI89Ed+(5urf& zC?7pBy&360q(J4-;&p9~G}!`ynz@Jm)=APD#`p4j8AkObWIj{I*4Dg)rXGd=TJf|2 zHB#xA)n@j(OR#uIY~4y#a!?J*PME(c@)b90Lp77KlJguY`$}K{=quFS&(;~0z6Ec8 z^o_c34G`p%6jt{8YzP1njG=dn@3AJf5Kra$#7~f|_89R6$o|=zNy&t&sA$hJ z|8u|uNd>x{qo&HAWv5T2QqVg;;K7^D2jf|y&x>Ecd|rKvnwmswb-?TJ zY6D6p<^g^gWwzd@razUwGYUhvxN6K1H+Z9vD*9_$3eqe^B6=E z6mm67f&ZGIGC|1lznShok&5ZXC1s~YGyheZs|EfQO_TcfjsFpbUco3z2gYD74Uj6sST$XuAEj95T zyh=T(jOtPLPeT2pC=oi*g{!ow8qxmg6#C__HjZC$mH&dW)1^``Alg(sgEr!p%f1y? zd_!~N(#kw)=18W7?BbHWAt6Dw&@C=#J%psC(J|baY~^z4S^h~8_b20mVgt`d5gfnS zRud-uT3#V0JqMZq>x8D9q2FjHV9p zy*Cx9B)~VZ81XJPl~qT6`6*jU@@s1s-1k5&bzL(ZO2CxZTnp=PQGDq>nnaj21w@|5 zb8eXG^;kQN>-!Dvi!E{tlB{P3NGewk*D;umH+2@MU4Ysa93mG^_h|NR^`y5pHVQ3qF4tNd3-b@weRk zD|df~P`yf{F8t*9u$p1(3`_kXT`TH_xXh4I^w`!Zh-9ZQ#M|Tn^1@GUdg0d1da$`I z;!4jCMyWrFW#uCWq|~2(SarmBMkM^+=81#?N4jR@1+ju!O4?;WVXvn{iRJih4X+X> zRKTZHacd5t(n~CWVD#I#Q)ARnp_69u>`vBRy7<53yQn1HmYA@IKMFjOy=5Z$DS`0h z0+U|BnH=i=sn#^uPiMFa!!>y%j>r5*9)g3qCfK1*G`j20hBOa+?wODx3NfM*T_{oMUlSz&2{P_sP)0_aT<8E9nqZlA-}wxXw?pO-~=<7w8Gw%^n4CTGso9hyBK#$cCM zx1(RJzT#*wheRhSU1%#Ym*($X+JZI%W9E>Z^6%%6UvJ9=g${l*Tg9F{0}Z>rGBG-e z0VjY9NZh~sm}cD13idTk0n@rk=sE&@R+)YC)AUg`f<8a#;!nFnv!@{rd8mh`vk}U| z_2*C__zu9MOW;{!QtT|f-8rDTY|pxM1w;hgreoaOL!`3r4<3zw~rPLVl< z{U^H=wr37EP24X=(~iB#YrX>B7NY1^q@*iOV5`eoT}{2M{a!A}#H{<2@G<2GFi${A znD}fANFX7;id>@s70~ypA~L{o zB;3JL9J7EG3kP4={R|wH*&YR;T&nwzf@4^jVr!Ph?xb?gRbeqF_u2|b8y%@aBoP8R zih+kKzbBq08dinEKfX!cAd=EPbhqVYd_AB>W?ZV%Z>l-B<6b02yYRzL|x%`ur z(sqOSE1H~=&)zMJm5GJ!+L)n9>gdoX2e2r&cOU`uoG{iezDjgsOtEB}lMz3}nz(tDfA zdUUC=FR`&W$b?w5joxX5yzmGd+OY%O@+*0*cThPwz#;aktMVH}85^R4LW5#wnI>~pREmk{6>7jH&!i_t>g%A>e0F*um$Hm)ddkrd0DSJj1^ej* zN#!`ZI?t7n%?D%6&9%eAHpvf_;?+Jvm;CHrsc- z>H!DJQgbzc%9D@wbY&4g{~zL>mV1BVzeU{H7(wU8&gyO7rN1?bz9L+OLxYg_LVc50 z0?lAqSGjt{UAOtE;e#o^B?R4LL(Mby#57cyl~ke+8ka2BqLAwlu`5B=;&Mfj3?iWFcZO4kw_I*v zzOftBiYKWryZ&KI$P$)NM!3q8|aN2hFZ71!vB#BPR(*K8G{J-KH z{{}-a3s`tMC(ko`6>Rl%)1za!w}_gDw{ei6Pk;pXG7bzqQ+ z#4N(}PKVB8_G97r3aBcgPXTd;t;``d6F|5FZHHe8ofB22lYgcS%gdgZ%_f{e7&f1t z4N7;^RoP6+<;bp!5gERY#nBg#vgvHYrJ0SHx9bq#pzUB?r>Pvga@AzJg(P9 z$Q~P-n@?U}Zr{r9Jf83Io;O4CqD(V;jy$Hp&|sm%RcM?mSI|x)Qfvh6P|l>Zd_0UZB*ngWty^ zyod`>L}UeV8z;B`UEe_k<&x&uj0^5+aa;ZaF;<#H`uDhe%2xblzPh(}LLeAqJ3I*% zoI%J#+{2b+eiS)>3bZCJ+(9Gakqk8^cSILd&2J1fV2+C9-s?b@B)!=R)J5%i%#hJwa{}zkw-qW^*a^6GlzJqv>N` z;;)sbowfw!CLIrW@9c>?&EQ+VVGS&=>wmGc??F##0_O`=9zfd0Ctlzz>j*f@JCO2k z`4wME%i4K=)^C|LUy}=YwDqLR9X*aPf)@3gj~MTtjHn+4J#uXkqr)NoG@FBq_xy{M z%Av+oI=DHE^C763FpiTk%YhTVkG+2OqN%L3;-JUso26YzNhp zOMFUP2x*BGf)7t3>vf2~hL3gM7k8tYaFTwIzP+HFOi^qY@0dl;tL zFjI!p7lkKU9zjd_9zKMwpP*>M9qngnFk9s|&15o+rRYY0iS)(@903Nqt8&&j+%h@5 z_4dsHIpYv~OXlai)ElXsI6o(<{GAd{#iok>tVOwzPdbzi)2lrPFQrK#+40BWUmtnk00qg77J0)M@shXr8qrqIXE#s+?huS zO%OlsersuCX)_a|aJQ3FLC!-9Q9C)t*kkxT2pwZBN#m;02!~U_M}Q;yuse4Q4bnbk zBMHER>Oaf{&PCI--rwzrLkA;LyI@lv8`2HpG0f$%w5d&aqJ9j^WG`bgaA8}`EhW7dx_@0(~U=d&u=jZx$$Bn?-PwIMXU{Y!c6s) z;$@}XafnW~tmcgeIY(#VIZzKSMXJV8cFS4bo`Cqe{%cG^Ad5Vr`AF_0a3fbN@d$K3 z+pnd(YI7KfY-o8(T#G}F;}g?_y3_A!*qP2_7g0Bys#_(Dk3i3ZZ}vE>ADEs(`T|w* zkt1LgVjwaqbhT*GmBQKVVTY)cben_tg zdqpZ9IgwP*S$c-*W%?>VEHDj`k(W8H*~3w9B<2u9!lVaL2Tks}vLq=N6&cWlBd%@w z_UWu$r%#0$%ve=E+TH_ZskA1&IBNWwk2+kzD2tdpSe6&$DZ{*Z7XtAEe@LoXLinCS zKk5xP8jUbCihmG&K6{|kj#55lEBlps0c}L3Rts+>6U$~~8rp5-%S(%bsYZRJ#HlWy z1T_I;FqJ!vUxxV-jw&P<5H`z43BW!Mhc!U>Rf904%FZJ}bn~<97upS0PtKfGdD)SY zHY2W^*LR0es!nGr`)8V`s%TupWZ?n#8)QE)@MUmIBe8l!^{#Oq<@002>ZJuieJqHuxm9sixWxCzMObf zxN&coSy;{3VB-ufvDvN^S+=Frmv|!8z8#k?$-Y)j(Z6nMglCr|2_r2igv7zK6PH$< z4h>Ekd72|=4lp)iZcZ}s5L5wd^bRTQ`}BD)h*S<3U-_9UY5b^b$7YyC*s@>I2DQRb z3kpO$J#zTH0}3J3Q1oY`gyxA_nvIFN5}A4U)RX(dm~m%Rn@#aOTP*IyMpU$f+2m$J2&*4T9T)R$T1;8cN^$wEypx9ni69KKYoVXOAe zVZ!o-UFB9-lLOBVGK)F!<77HqhjaNT(llMBMO0|6<_if|;YvajBGM`~-42_lI|zsB zoRO!O+M05RaeG0ilfFMt*-it=Y9F}H7r?$%p-^oc;r>4e3xTZfk+?IULedd-cdSL} zOJ5%_ckS_UlBDK(NB^SSjCEROB>kdl6;?LM51hI97%w}bI<1<6%&zpNp^sh*#^3kG zEuN0`pbnwgcVO)`SGT3B34lEd)=A5-rALuIwJy)v`B;lyOxmsex|h5KIGK{JbV6RQ z;U-{>WiLDLcSeR=(a}IS%$Eb`4891s9$>@0>?=w4u3!#+*bNjd{P&G<`pg*iF2q%oJ#TJw zRHec)oe%~XTuo`s@&Wc<3_H=!EC4*#X_N3CUfuyWnMnuw^jdu7VaISdKRrOm9cIhlsSP08Gdh^H=i;#?3O}d17Fo@%`5Et0tz{{5 z<(hFRx+#cIO0bDPbF+DNt?0=`2jt{(245|oV=W$yg4Ppf7g)HqwIeLi)6Ks$=Cx$y z&K}8%Ucq1dD~+_a6hk?vw6VM(a$~`cco{D+UVkcVADUzmOTlh`5%AzVH+dbQ{6_i5 zCW~-=4pJxwB!Qn?C1e`}!CRWn)SS+Wp{fgu$|B$D7rY$Iwu}c1J1$<9f_S^XeLaTD zAQiMkSKYtuwWM_BsWeX_uvhhyR$jF&YnCax=ysigC!~AI)ij%X**J9OJ?)yj$4m3!?7vQ8?2}5myc3!M z_V0(>xn;`5x{s0l>3zE^HoW1HH^(}@8NQglcU`HHXD&0c4IEqm`eh^NIB1fR49FN$ zU1LaNO{mHeg#uwTj@h}Es_erIQ-M?q3aX{LB1x8r#kn6SPa@xR3;NUaC8m%POCM}P ztdh$AO&ns$uv_uL|DjgFminOU3ztj0`MIc2xRi{=kr0T+Dwt7D;yqqTA>?p-{}3GL z!>=3Jz7ypQC83y`|F+g2e7a=95N81COwP|qxN@$3|1M5&QCUw_l@q&kXN!fa7+ilB zGQz^+#$*YH=5NV%BN-)V%QAqAi$}i$$zxJ1`1glva4j!`b7ydtf31Se3+D=x5E}!0c&7YV|pX{rpi2Q2)FDFxkcu#RRqIm@8ZI zzn>~G(Y<)@Jbqwg!>{&o*Z{EvQ1lLW_Tza|)bwYfWK|EAr3APlG_A;A%HAnj1NYKa zJRE$)%c-33Q5%|iFLAlHVI=+D*AMuMc>^Eael1r@Q3MsSlov>8V4nOkuopg}tW;&b zRYMY<)B!Oy%Ws?}JK}sNsX9*HfWs-j`e{qlLw4e_Fm3wvyxCQd8frm5XyR>|!VaN~ zmZ;gCgKdyCNdfyW$;pdGckWSMF0EMBuY+6KNKh$C){;;QB%L^I8t_#w_}G2*WeeE& z7P)gZzlpns;wfQ(cIL{FyJt6cg~p(1)e85e7ngyW8}E!eunKqy=MPF-Br&6v2V{Td2=ne}wXDxmK5 zRvp6I2S0Qabs(Wb-aLKSkA0bKx$#I<6X@aD4m}%^;g>U$X1?nS6`4-41){Tshp}CV zS|{8F)~j%XRn$A+!7YZ1jg#aq>~xYYw9${q1vSV{)Zs>xhoL~)c*fkv3%T+&3yup1 zmHO>eMpPwk7CJ6)$^l`hPINl%lT#7(8(oq8!G_FLEcH$C8ytFq0-CL(R9KkwC}xKnMLDTx=ZI9nJvW({{g_9$EUyj|WC)zep=^?eYGR zoA#&MooG)6k23risem=w)d6{31HM1o;%M0Nm!A1%gKJ^6u&4rrhyn(>fwYuN?UJmU zuffd$4-qAyz?18g{R-FnZS0L`ojN-_LKg{gVd*tLeo#O{W9cFT8 zWo3SKU5h%7^R%}KB}O`NC1R4iQvs$;C4aPDOqV*=1ei|e8|{Pd^kwZ0g)0dnku1$f zUDzMaqi4y!GB&GiyKgh7yKMH{uJy;|TjNfHKR*6RLM>1(QGi^K>$S7E&+hYDog!=C z!jPIC2C9|qN?E}*qDzYN>wA#sZ^e5}0VddYk~~+gR6j~A6)bHr;ZovS>UyrW!N_O- zspWoT!2DHVBwwc(4?o+T8>A!4Bvs3Oh?@4MS_Pl|8ntK;l_hQE5n!Sva6S(073LH6 zB(gx)tl0Y4i?|vYuv!};*om$HR_*YP@uag}JdKJ9XwLYW9DNj=Jt8_{qx5(S=`y4v za%u0mJ&57_$X9m*Ce9_$#C-YGgWQ?E@md8bqeU3HEUd#lbZ2be+fgn0*(Eyqn?neD z$z{9zgBS|QfO^fIdTeiZ@?nI6SY_DD~yw<$2%GiGtBhX`=b??co8o zL>{JrY(U^Oy|=LsUnNg)dJE`fztSp`agh;Kd+xB2o&>Mitki*ZCa;j#%?n4N6ZTqF z@f;ii=>(-I#1nJ@eIN{@&c0LMe$p!pK!9O`%_&Ie-^_noGgZa<3{r{1lj9wlFe%j^ zWh$NuaO--^{dd|whsnGQ&+dErK=(ZD?>MKomO1#=TgUu4G!l+=7-y)-)^vE>QP^{IasrWm z3o8Kzcy2$BYun4NqlpQEELAxdA)fxFl_8#F_C5%t=tss&QqcS+kndh$KyD~@K;{o* zS`+a1kH@!tFd3VIJq57)xj{FYUjN$ zJdUD25J_RwqXHv|$VH&{@5aUK7T%(;>(lr01{OHawmx5ykivBn}i} zJJ6*UYSDgG`MrtONE8nDP;^7ESk%PcyD^qCn7vh!S-6zF|G)I9#Z!6`#E`_PPp1Ey zIJM5ybvG>86uO3Qe(|UA^(u}&6&g)ELQR=$j}UbUCmKt+Osvr8Xu`W=B(!!%{s^VY zO8nzAO+578cff-P$nT_}mJp~3Ymmw4*gRmnbH|3~Ys6LGVA-OmI`{m~ZWw)|$fzBs z%rB@RQXSgmXPlbDC*mo0wdgqvaf7Bla#oQC2x%2d-eordok*_W}Z2V5B@fQUUsok6e0t>xZm`ckMXA<{cp zTR>l7fk7h9N6SnO<;h*C>thAG&8mG-U(~E3EMVYma)1gq6ShZs6wZaLT{*aGXhhNV z7)q}?Jd%!Aj;pT}`g#+6!b`WnGr0>buzlA-; z+g=THBRiJ=n{_dLoeVu{CaYEwW@H>oc7BGcSN#FMr9k19=_WDfcE{$wt6DhEay6#G zj}-m$1ZZJr^!23)u!R*;2ewNjygQp@W+R>%Boj9IHP86Wi1@@Q)hHMXlw=3*=+yf# zI4P%&*?}g~vQ@VFZ!fQf&2HP&Bplg29ZYb|<9wD={CawFq69QpwGmh2_-pa~0Sr3H zfZHkV3Q?)M@Dnb*phtP6`p2=w5AVj(w#u7)mb(mc=ThGZIaR$aT971}Ye{3_Oln#S8`o47Y-e+b88N9eEyJ&Lj$Hq5@{85sr1WDtdY-lV||UX&Bn#@?DaMZ z>_{5SsN=I{*)v0VNBiLu@?EiW3KrFDeWQT!w*XNK5trbfa7OlVSt*&cYh~VmhFg{X zrC_^%QLx#+DOf~})!!T}I1_l1zLHDSf(j70o~*922s((HY;xTZmp)>GJf^DCgym9b z|B~-&uAAN|Xd++5Vu~p`5r7F|9>Q^nNzd~vo_NG_UdWrgX!Cy+_ts%iz3acQG$;)Q zE!|Q|NGb>t(ukshhysGNzzi`XN-7~CT@unD4H82m(nyHJ&?q(Z0K?3@5BjZr_Wqq~ zpZEN6=AYqW!7#&m*1GTeQxA+U*Jx<}L}L=E`#DT6T>x45yh&B{1Fcod{>ON89*c$o znKtS}$p{&eTM3>*tYwT4CAWsdQct*)++G|x5SHZGC?3LsQ_@hMB&SLA!ck^Jf;4UP z^g6QajCXl1N)rrG=oTq>Tj+9H>73yTX56#~pTDcXHghTkfCfVRJ} zQ3iAo)hR3y%D#yzwG()<5xM$qZ9q=Lg#B7z6)L49j#3Fy73daTSiMTEFBVD|;OpjR zNb$72VSM_lV|geY!%&hM{6TtFTL02^wauOfC-Qx->N{L`H;0BL`r9tDi{NjCG6Pf7Q3Gc_PPd05%(;p|W zl`gxL|5e}Gx%2zj@&s{o6>1cYIR=#MEUk!rT#aM0r9rZhwpN4nBWx$3W|Py}u#PEx%S?LB06?5!#a7ou|tEy@}D0iUc2fFy-Tg zhwL_>g?rXp;lpYfk+=Vqkart)GZi^STN2diDNYtZ61}AD_Q+f}JY?+Ebz-0c^i%25 zdihl>TmQh|s^*?;tA}2kC=n|)+%=j!$mUNnu}?mlg`T$e@F;0+zjZGsqW>rUs-S+$ z?1QtF58xu#CM!KcojuJzKz1Q_rW1I?3V9Bdc|wyJx0Y!_;FY%HQ46&{4Gud6F-Z7f z$o5^rNpiZo*36@&jeG4CJ(urk7H?eejD8rGkzKX7^1M%u=eqb5&*Yt`;|fT}vJ$Bg zXX;Pd0q?Q2xIbQbG&x38)0{ut-lYsLqMbFV6J_(#>uS))WSeK%%JsaSa+a6Yc_7we zQuX|OPE1?&hR2O-PCQ+(^Jmp53$5C5co?c9~E zgMb`!7`D}U!N42!6P~G3TF(NQ$cy@ocA4tVZxE17^?IP1x(gJSn@7feFT_;BmHiq{=mgfo(M^PYcx-qr#~c};OV0N2}g-|skJ-&=Cm)tDTwX~O6DPb zb$jES($}&duAX_LADC?KbgOpPP)CLly9HOG9J4&!GdrBI=sx8f_&#Qn?i-1cR_3aV zbh+K7N)EXamU-^g%rTE)GZNGhH_pfeKT~s%o0r*FJEk{^ zf}+Y3SN0ADO7o5~7mp!9dl?;&kcOaO+ugP37%%*ApZdE-&l~OSWAD^cwt6Fd=Esk=#uKH;bz-kVay8{-SPR8hI| zXU~ZI(pL=x`Va26Do86`A5Ou}j7&Y%ILu$qwYSgD|JW4ruCoL0giik^m3nq$2se;s zt#((tK`%Hr)^1$BV_oQ_PHD5*%Db(og_H-%s9Z5d{;!hv>N@Mtd)`bnM7=bpV{Qws z92^-~Kf_j!I_}qG8eSf04gB%%*q45C@*E_Jd)1+sfWpSuye90Go4Q6 z=aEhXk&lp0PeORA>&^PnhRP#sVGmMlme;DZ^?bcqjjH`Lzc+1_0Ou#h%<_Tfe3!Mi zPI{Koh1NC5{lph^Y~nBaInv8$Ao;%4I78Kn3S2koO|^@ZJKxoFoh*7+NXS|2(8O9W zY+R>?v-5-$bq-mvUEUkT7kS&&zuU&n?{5u?>pEA#m2%QAX6Ge6VF!JH^Uvv$3kieMfyW0nyK4Go@(*EtiSy@%Y*@ZU&V zwUj_*kG0nIT-GVaCJkf5PS0PNYueO@i6At@_)rE`{j^e0K~UHUR=3WV|A>1TUyo-bYfT+9>bAoRR$B zt%M7ao_=VPpBPb`(pRo-#Zi8Jt(EEqemYe}9`VUl>}5YgBbo5a`)uym6=+zVPXr15?Zq8iA%@?5E5*yR>*#;B2Lxl#b9!%nsK!dCU&7 zLi}e;C(+Ievc-9;5AWqV=6@>Dlv67wlVMq9S~uuYmd0Tgmx+4IMa2_4q{*rMfYg)K z0X6#pK(PwR_gH$5lxiP8^zwg!CfZ0LSQw5Bebd^#=eBb5!KdmxjA!CS*J-QoJI}lz z4g}GSh^LWdJ%MD{5!rbOX{C)cWcj>?_w|ApKNOVa@nY&^F5bZJ(Qw*VSi4X@R6AvD zO#kDh{%>}CX1C`N2}{1LD2DJnOp@F29L!>m_-6DWOdgI-`hgj?nL&|y7pq7hX=2F& z3`7~jmb`_$O_xticBL6f2s6R3%A!%MhF5NrY`J>cM<$&W4kE@anKy zpPaN3yBs2?LvMA@e2Id5T`>=N8$U)7se(aG1*E2T-fp`128KL?c5M!Nt-Q;=&-={Z;M>-c!vW6VADh8;*2jIWy zCVn`PLMKGE^@$}j#SXD%+2G;}P_9qR?C*S@D)H#EGjY^zz3C&mU5V8!P0HXi!7=^n zUdw#H2~Uq7axzI}R_+4-8Ylfv+7c&W{iyzJm7O37qK&FJUi~!+e`}#Mf)am9zPQwF z4MtDG+V zh*S(WBII_5E8j?uUwksnfRbNWxYp%^C%#@U^j=qvu#Z~ssI}4V$XcqW`lcub;kjy(!k&J z#YK>V?26cTZMffPWZ(+i&d2!b^x!gKk=aTpS^smf#Et%9bGXq@vX5Ub!99cHQlwh` ztuc1$yOyWsL_q?dTlGj=al1Ljylp}jy+8!zt0sQUq^bxILjPNn3yfI$`j)+z5w28ZRB9p6bA|Qt*B`q)T`c|I zPCE0w^YyCn>8X93=TdCWvRWM680y|K#-5K}_=@iTX<-pn%TF;CRvR&e6me0Ib+|mc ziTrv{1BIbGB9;$#72xm0Q|$~Ez4gG2hwTyLJyOLIT=IUmq5x33vhSDH>4cTnsQ$n$ zad7BH^+OQ}P@%B~e(T&ZIUS*G*i5JV2O81YZiqNp_5wB*pnrER$yM|?Te1j>U7t$O zKE*L{vBvU&2K}e?5FK{7)}#Jgw~^1vOlU`@9N`X@I}flKRj6F474(Ef+B+VaCb7kW zSf>2|h-IewCNCtG3HCUli=~@Vxp#BZv+w-eUP>RT;m3_DW`Q=#oo+H@XD9DjADrL$ zCfSM$VxuUpMstUn z4IDjqL#zc7Wo$AfB3uS^q(t9b@(c_5z_hz$)UENndTBuYB~)Fz#@CZOtAOQ7;3~os zCx~7_gl?T*aD?}_*CMHGQ7>SS3D^PB3jHe9f)K~Aw zJbEsDUATElCga&L)n%5tHk+^#cbO%dSjS44yA?=+t(|IVE6g3;r0nc3s=vbs=<=FK z9|5-7a2i=(SMR?gncTrwV5OI`%v8e7lF*Ri-E9lJgP-XH8fn%lL@v`z6gL2bb6#eH zRJoOPGH)0|z+rs^FwW4FxS;nlTqh8x!(|yGT&MEt(MIZtqA&8jr%38+lGw(QPKoBQ zrb~{)yWbAT^(z(k)=O)Fo(VU#unG{)wW*&4O2RqZ5 zp<7paIG*(T7`{Ef0UZP&sM{T$>@RLJLt@mE2sh0r_#-GqYFH1L>x<3P#`4YZ>#Sd^ zjU6&R0EwP5`tVaJ9TPZJ1%&N3Jn0vPwm}xpN%f+*ZNnZQ%O%yi;DnoR|dyC2{6&HMw;w6XF0d@^$ zjN{(GbK|jsoKY_g<6$%Q#8j_p7{mbhPT!EcURFh2q4nYT_9uhe@&|*LS&afFDMh#D85R%X`h4bzPsHV;-ohJ;Y7NvI=cR})nz zrqb%St_=7*ja%?rdh+4DOUn(#d6}y)Lo3{nl#6P*z}C~n8>zJtvp;tz2)bVxg2iC= zY$4Dl>}pn_oxJb;Y}F3w!E&f`wRAyc!%i*!iM$%ositZ=f!-|T(Kt%~8Uoh^#1Gbx z%oJ|rrX|^k$C%y)h`#kanr4_o5sv}diB>N;#=NJtiD^_@UeA0UGckHe@8pv$Doh-` zP_Zmp_tXENq6uZ8UnHRUd^3n()NDF_@9GVwkSvf6Fw!r-a>HE-#C|>HGgIQS)EV45 ze@>=$UU;QJP%cLWODh00??B+^VJW6IVt9TNcDv~3EIIxX4`gyv5~FCj)D$|5Hfy`T zb&^U0xs!hRE2|DpsonEDp!34EbSvSK5C4?rj~UGxn36mm&t`i6UO20!{b@J3cqX9XTF`zK$on7)VeSIPTceDP2b zXxL5DzcKKbtnQ?r+<%(*&-_BmB%OYvWfCgMXRANF@7WBgEa|VN;Oj*e0l$zkmwpT9 zo!j)7fvQu%7z>M{GZ69cbLG4Ox+Ld~M1{geV2ZbWOQ zw+?nZVj_AXbuhch@1TZ!U@d#+WR$rTbp`I(*uk01J(OI65Oh@5#}m!BYTemu4>xs; z%2$jw^IU%7?<+pGY5wj<5%G@M?p2p5M#|gt_3JKOf+Z>2=A-HvK6;`u^y8w} zN-6l|L#8i9jG=BtfuN9&{cj?f;c{qRxyDr8C}h6`8`o9QRM&a77$Lo8-W2?@{~Za= zFfP1gGfD)J%wtiB*`DLu7c%d3x;|?<$aCnDvYA`wOR+8bxUgU^gE?gj$K>m9Q4^#U zG{XKVZ5J&C%;+2W`&&LdElo}6w(_1$y;I$kV8thqyoP70pMps#duDcw-t0q^L+5O^ z{by>T+(sT2;zK2-@E`RBA;A4R)9~-c-oL%Rf2yzk7G#|iEeN?t5H(n%$2SOh>w7+y z12MU*?JtX6U5Dx~iydd{qu^&e@irCy6G%^ZW9`2%bZrg@+*2j#=;U=?)13*#ZTKez z-H9|AJ1Ngl#64Ve923r`*lDQ7#oSsJ3oAv9_%JIEF7FIJ3GBVaJmsn4a~zK%ZY;^+vE#5DM!s==a6@CLSEBTPXdMWCpNx?mdcC$yjRJ#hrK!fO}>Ah|5xNlb_>8<_f`Ba5Qp+P5J~OXRC8Ruvo^I@03_$18@3Uaf0v}!Hklm@7+&`X$!X<& z$6?tM8I%E-xt~zK$ufpg-#=>i!+2iO+}0BjYc4m)gqcSmMmBctdFH_3`yWESfllI7 zBMANuCEl@8?&%A)Wf0FGBHg?z9>a9%m(Yo7aOg;kNu0t@yvLs9eWUM{g7XhQaJnnT z5q}}tvtSeuY~D6LeB#$~Z)Kl~!F$EOj=|$b_}r}QQ}^KjNMw^ zS+?~nxqY%#yqQZ_MBn9$+wM2%wmqGtn&G*BSuyY z+(LGBxXnfn#Xa9MVjvUPN7!%faKM|27)d7G{c31HT=Z_BGYkWGnGDtV*o37P+RVo% zQX-9}b<(jvq(qazK5L9+T3-${Y732ESsVk#E|WmN+uHX|4VUGFn^raJ1)b7-S6wE5 zhIE{HI?}}W!pqLRctHPb9oerKYg7tlZ4!W$ua!eZM!Veehc*54;?>;OB9ZxUXKX>ZG9;%TJe&#N-;E1=F* zHd$aq*V`K#4s*0+25m(DKkARqN&N}<-_@USqQT3Td%=+YWGu}nHQf&LgK{-xyBmnr zs>rS<5b`cM+>9YpxX8^Gc;6msHM+{!z#H|FkkG>W5oYM;fYcSX|OOv^Pvxqy-7}w%W>w= z7NKL6h4d8&7h>>{Y;*5~^`H3}S1Qo}JpefWWne0u9^^ThO8>NbjRL!u(jRuO!++Sl zw64RheCIY2xmYrmHU`U{Z|)3kf>=SovH&h2lj6=;9fewY4e@^^1C!^(Z9&JNc)& z9_Np^q~)|E9(&wBr#{@?QS_p9i=A3}y;S|GZm=n@mvD3axdz?)G9<6w5j%O`cl~0Y zQy*6uDS$NU4pZ~c)_>O0rJGURJ-_E2y&!h!iqH0=$MqJj8`{T*aMh?|Txgc6T28x> zBgFv(div7OV`cuf*A!0~5926cHitz#hhJ_PmNaxzV&Z7~Z$@#LHXC_jwoD2pR_z*c z)om)2kFM9hpd8Il-1#m!m(&Vt6Z#Vu6rk1Y4aNb`d4PiP>+zp! z%CAFz%|^c(5!Hus&%w86Kv3`Olfaa)FA5#kK(L$0r^zeqiNAbROy^Wxxe+mg^p0>O z_Hao(Z1ztF_j_lCYV1+Y;Wr)Es<`{TPRdPB%N6m($X@Xx-&~nm;h&jTOAV5&ry!yq zBd(iN52Gin?U%~Zm4UjufBA?njAi*r;Qai~F=bEA_K%&zACj$0d|?~%7nNW=p8nID zrG*N_R>?JQvy36pYE`iec{A#7P$FWp^*zqFruHYp8!GMVaahQ+j63yr=ZPLBSc0O7 zP}%Z*2rSnBYI4FT7yn5dBn=J$(Z{p=SX)HGGlLNw?ePx|e~EKzev5PcvQeupJ+z5_ z;ZpiCndK+0eEneiPWstOtXBkR@WMY;DXkoITQA)kF~DZs ztOXYy%N6U0{sF}=5WGU2!MsYjMmkSgdM1YX(Xx%1aIw-MTlWq(IR{lK{PFSYkd_RBN0IGH@(FQ zKV71Txrr475Zh&oO|t~jAnaBNfTW^>vJ1w>bxhNZMctX|j4aLx)Mq;)#Y=N^hxxaR zyQjbFmzI0L^&_-{gj=z5CuIGQM zia%CzUj2M_zM!)bb(`jvhWnHO0#_u>uPj32sMoNA^E*!h3b9!_L}}NYs2T0o`@b5A z08|dWBxM|RB2;4~sAjnzN1~^cg3-W-zG z^l7)v7ZO+)=D2u?g%D&{%#gXNUjVL*diN!M^P8fi#+|2?jS+pz8-X7HN^!vY3s&~+ zQVBj$mv(pB^*El`pgH^$g8t``iGzAe3u0W=D7WHhoA^cT(MY0Hyr_EzrOi~k)j+jX z1j1yv%7%Nn?BC|7&b{hb8S4h?!FybS0$^U*3HLHciKyk$2Yph6+65zLpTPE88yNmh z@8~@p;*}tkT0wo#Qk1@V9dT)vO%rN*{beVWSox}i+e7sgA_i8u>66e25Grk*_^Jz; zbFk4uTT&-6owu(?s`H*R)%OI?73xBNegmo)zkc$n(}$hVou8>OeHHr-`;>Mg`#DVg zpXB>J%ozjdu;(jwHLh}O1M9cf$oj0wNMgjJAV^qjq(zY&+l155 zqT=33-cT}aiE0~QOjD^<#m(^;)%v0N@(};P#YQ-8nzrOT15X&jEl>C5T+w1R*ct$* zu?$he&bG>`0q$7>tm_$cBQK|b070LreVTd%hfjxMy079_Ia+o)d1kj+vmWUK<4){I8 zzVdF$SJkt3tq=Z~AbJVg5eD3+5D95@23Gbw%gAwsEh*@A=lfl?cH*P(j`$7uom|Fx ze(;j!|6c9>QzYG=p5v2+qCgz03YxG}IIiFF$J`z(fX|-zi66|re@eY3xvk4B=o`A#3w-N1+^i<}pJ z?Jm%g#8Xz8e3L}R%*O^w0g7YTt=k&jrqJko$2_Er6}IgOytGIB)Tw%l^aG`jfF4%> z>1T<1$PTB#aV20t;V?yeb|95wLlS#!!DrBtHqov?>Qv?0eaL?HwdzlTTpb4hLn@d*8No{ zr|HrtPw?6oK*wnQ%I%{Ku8?W)Z8I}qr%CPq=hEifa6a{SX_F&dRL7^0+yFW!brQ`q zT9F2ubNgi~v!d>pzz{|Emigw$hRXMkY}Rq90ydQf_?rz1*7keqVTzR*nGK{POr2Ga zhwJfU38atz#{SyH7>DNN*A%; zST~j>Re67j2m$%q?Ea@UR-Uk3ve=qgm-FQq2DzVZY+T(S%_q`4WQ2?uMzgu6*b8KR zLH++MWt4Fpo(KP&!k&dKU%5ZH1A7ymHXtuief-ofqYVs~6oLLZ{rto$;YY=aElo$^ zEBEzIptY5^W%=v7-q}N(*-7L>49x%Avi7DgQue#tHjxq`l=?roCRP{qvp&3k3X^RU zUNi{Vhc}*IjBkFHCuJjBFuL{jdDZ*3LI`2~JNDieV+8dLrvufXO+=DIF4&^Qu)8G& zyRkP)XRs z3Dcn_n>Djfs^U6&{05_0l?Yt4Y z@%*4TY_S1+uzwjTtpB->LN36zp&D3a3@cM?Tqsr3W!b~NO2I)EFruU)CmUpAgPz=; z!j+0u$Y@xku@hq(=4h0E}^^R>CRUG&%A*gnU)u`>#5 z!l|~dmJkQif|!Sc`+lxw*KfN?4&*pz*h*472Odw|!!xzQChhblm7o7*Wb5M~WcWh& zo2Sdd8R0+4@d4985Q)~*O54aoNI+7X+4)%HnXP;^C*JrmkN~*);o--q8HWgMUFdoY zx=fmW(c7*z^0Onw>szwpn!6s*dYDtxaeH>4m3ipEL0_%d)-s+-a~mTSMNOA-POo;0 zGV6Lp^i~pk~%)^wb6q45Oa%Aq58ib;R^omM>tT$UN})Ptc;qgpQ^v{TX!b3 zBhvo;H=h+}=aA?ZSgBAEx=K5KQ#|Mn*wU(BM#heE430W>JF-F01SHk0k16+dT1VxC zF@6OMFCP4Bj2$$5K@`{iaGHmojPz3$Y+V})F(LhAXK$6}zV){Z`Yd(L`k4#Ubc4A! z6+RXg98&(EqQXbT2_PCZIT>CD|CsArwXnd~VU}&0HCOsBiP(Vsb1L^D8QU(Uy{Zk# z#rSlbSh~J?j+=m~6wL>YD_@*MCeR%Gm385Lt?j!L-^35c57+0xCK#Um$|K4JpW|Z#K3EE! zyK~gY-bvZ#t2>P(JL0$Z@z0gN7c-nvDQE^fLL>H2lOnvw>LA={b$=^m0e&?D*bPAt zRQItd>WZuTQY6LRZGx?hqqH9!?;A%nwg^LkzO-hO`C_T-lvpS7V4Z2r&pJY-cJFu^ z(imXKbLgcQ3SN-7kiM;|*P4mH?@YMOdnM(S>Z9oHY6Tj^EM|)urCHjr7l?_ngPU11 zv&RQ-?XfXb3CK1?M9(8(GN~H|k9?3YSYw@v=G5k2blS`^f${#!Ta~C$iz6<sbSk|fW;**=SV?hg-s`tGGr})O5a0*E2B{6@qpP9lc zFfoRwITbi0}saNi=S)vvpIS)aK}+<_)co!Xv8XKqXHMP1i{#t_S3Y zYEe|Vr}4s8S1n>mZawvML4@0e*w^WZrZuj(RSo6MQoVQg5Ta}vVelOSF_x2QPd->Crg+nN;TPXnxsj&ET+G|O z;?$oLC3y_fgy;?R>jgH*-ts2y{5jK}S$QQ{55*o?PY3a2Wr)U|Q}=8ZTvyS3pYL)MKF4f? zJ_JGYO|POpAvEF>hGrRbkge;#GsNQcHQ=Q$f_MyJ$!@a5JR72e{r+eB)}ZGyQPsmc z_YV4~fv~>Xsfg*Zx(`c9ZD$>qDVvp|ZIhsZ^K>sATj^%MxzcP5r0fT2}=v|aJ>d=(el8B#Z_;NnC)bz_IF$nIqZtQ@>*H_oF z7@^jaJ+2s{=ySB8Sy6xI41Wj>dWm6?rk$6V%X7u}RLJmqvmT}vG?gDL5(ZF2`C)qe ze-cn0Uvl+XOPvP9@#FqYF%rV|F{dR(?FM|=ws%_t`pvB3Y`c!NLKJ|EhHa>P>%t1g*fY@C`kw(*YoJ%gWYhJ&k>TZAYN~#BcC`D&hne=&#X;?n&wrq6xGB+(X~%d%%If+Wy;tu=EcHf-nUR#F+J%Yjj|51-#8Z zxw{*C1r%+q5rDgTrQ}$uUKuaKbN|bOM%I`TAe#DoKH~N1@uJS*C}Rr&YIf?aN0k@% zKw7gtl25hX@k|VgcJAy+xEU>o{&rRNtXg_S9DQ+&R&0mpo#Lx}|1s|h))gPRkhyrdn5fEh{gDZjsUUBAq3bW)%pqzfkd8TFDBoj9YNxLW%Jw} zE~49b2}?G_GSNE`_K*8czeAU=lz=lb-p^t+>7`z-kela%l?L&F_o;x;RUN-X5tA8i zo%r;y=Ho@R%fH+S^YhB{Q}k>|8Ux+_Mqk%9@2Wmp|VM!Tz?B}_OUL`VC3 zxcm{mAww~EJaxz%6#POKI6;$nq}AN+&tcOpA>&T%Q^bs@&QbN}o2_)Zk;mz!$GcjjdrA&;iHPCMcc`Dlv8+}8vDTSutMe{h5z zUYG6KR!L4njS0IQ+Z1Dl6OlcV19O$729~_@&hwaP!L@#fg*l@tGp|i_gvA zULt}8t3ofS;9pk!7CV6k@Gq6qzkvJT0C=3h9lstY9Q5r*FF6~L99+B3nTEzPJfa4x zC9pygONh^#!4Yhd{HB`ej3070J{erx=tMjvZGiUCn|HXAJMhFiJolgYH!tWoPj~%^ z7xYIxrNt3$S|0C>MeZ+$U6=|xw_2?H%I&X!3EvuJ1rQ z4DUmlNL05g-C9xgUM9g_DG$4_mnJQhaz@5byE!*@OH6<)%(?1 zK{ag>wtJNHb0|x_EA*Mo1?zYEu`XFQ&uB6WsC#Ezp;C1Jb0Gj$cZX}iW>>4yZ7hqKLY&f1%sCmo($aO|js7fz1w zN&J}f%MQbGFM^@+y1bOzSVFEp5?K$3--ycEtsISR7gxM9Fs~f;+3*L8+NgsS8_lodCgQ&(20Vh)QL>nv|2skTyo=m^7%oryA&06;&EssZC70LE> z{kC_pM5h#l0cd@W|G33o<$%)vx0s4Cz#waa!`A#9TT5xrANFG#gh3<*B0%r{9U!1usmgItPzyCpzi1$sx926m1|-wt~wx~ z=_4GsEh3xc;!}|vZ5&l&AQQLpz4$u_)xUsG3Mw8~t=!#tJB4V~>3(*>pj$_ECuAM0 zHL1341q8@LyRww{bhKZ?N=INJ@Pk0M>iw9kK9)!AxhC&v6LWxAaj#RYMe?itYP8%; zHo2Er;{3O-i9K*V)%5;QJm%Os4AHTzL%kEEC_Q#{ zpWc`uf^jEU#qSck#RlqO#B0_}~a?;yaZ`%AJ z!{VoG+c|u&uX5R|O73y+gR3Y?BC$z)07qtY^}+!r^3{;x*ohaDt9jG;&db>w*99l6 z%3d?q_8l+Yn_KdQZ|_8r(`?qDVSX8h|@15|j>nG%W@IF9gQX9X<2 zH9C>vCgEQD4b;pBfi8tSBWv%(^bO}r5|Wqa=M|)rEYHqrAJ!ed>2wkm)PCsGjUhW# zBc`ADl$sCjZS(l__4;F}4PKBH3y28aeJG>(Nk9dASP=Sj;o;C6aUA7j3r;y#clH4c zh>{1-U5N;SjB`BXyHBH~md>+tgMKlMOaF8S{OgZ|`w=f9baiDBGMznj6No*SmrDAu z;W+7oFZxz39M`w?gH1o;P$b{dvy?q-pwof3jl2?~%rt$W< zdX&%9UIbqkh+f^EpKq_>dE&!k#Ay!lS4;J=TifU$NOlK);lh<(@ibSpx#=@(Fo?)- zI*H28)Ai00xLMyD9Up}sVCx+EGkk2-Uqo!YI=XJiY8r9jyiNN-P4CW{VHm{%X07_YZM*b z!)uoIP_Fh{WsB1vFO=77`PU@dI77cRX%DBp%fgj1@4bpFY}|v|EsB(fL(T9F-=Lhx z3STLB^Mel_Qm#hjsG_K0Cakh`5cy)5vdrZ1k?Pp#fKi732#M)L-mJitL4)0CiC!YSOeEd4hVjTDBSd?;<<21OMk?QF__ix1z z&2(-zhA{^u%!b!IJ}KodB>$c9*z_7)y%B;R(EMHOU+A3`cz^xRzMzSJ8y?-?zL^dQ RQaCxTc|%vN`1-?_{~wQR>$3m= literal 0 HcmV?d00001 From 761f12e01a96bdbfe0c5f153670e929c61e0dcb8 Mon Sep 17 00:00:00 2001 From: xmrig Date: Fri, 9 Aug 2019 19:44:33 +0700 Subject: [PATCH 150/172] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5b6dc7a0..f6c58e55 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ Originally based on cpuminer-multi with heavy optimizations/rewrites and removin * This is the **CPU-mining** version, there is also a [NVIDIA GPU version](https://github.com/xmrig/xmrig-nvidia) and [AMD GPU version]( https://github.com/xmrig/xmrig-amd). * [Roadmap](https://github.com/xmrig/xmrig/issues/106) for next releases. - + #### Table of contents * [Features](#features) From adbf66669e56fc20746f3c17dea94e43596d54b3 Mon Sep 17 00:00:00 2001 From: xmrig Date: Fri, 9 Aug 2019 20:09:00 +0700 Subject: [PATCH 151/172] Update README.md --- README.md | 64 ++----------------------------------------------------- 1 file changed, 2 insertions(+), 62 deletions(-) diff --git a/README.md b/README.md index f6c58e55..5d5af97b 100644 --- a/README.md +++ b/README.md @@ -7,39 +7,19 @@ [![GitHub stars](https://img.shields.io/github/stars/xmrig/xmrig.svg)](https://github.com/xmrig/xmrig/stargazers) [![GitHub forks](https://img.shields.io/github/forks/xmrig/xmrig.svg)](https://github.com/xmrig/xmrig/network) -XMRig is a high performance Monero (XMR) CPU miner, with official support for Windows. -Originally based on cpuminer-multi with heavy optimizations/rewrites and removing a lot of legacy code, since version 1.0.0 completely rewritten from scratch on C++. +XMRig is a high performance RandomX and CryptoNight CPU miner, with official support for Windows. * This is the **CPU-mining** version, there is also a [NVIDIA GPU version](https://github.com/xmrig/xmrig-nvidia) and [AMD GPU version]( https://github.com/xmrig/xmrig-amd). -* [Roadmap](https://github.com/xmrig/xmrig/issues/106) for next releases. #### Table of contents -* [Features](#features) * [Download](#download) * [Usage](#usage) -* [Algorithm variations](#algorithm-variations) * [Build](https://github.com/xmrig/xmrig/wiki/Build) -* [Common Issues](#common-issues) -* [Other information](#other-information) * [Donations](#donations) -* [Release checksums](#release-checksums) * [Contacts](#contacts) -## Features -* High performance. -* Official Windows support. -* Small Windows executable, without dependencies. -* x86/x64 support. -* Support for backup (failover) mining server. -* keepalived support. -* Command line options compatible with cpuminer. -* CryptoNight-Lite support for AEON. -* Smart automatic [CPU configuration](https://github.com/xmrig/xmrig/wiki/Threads). -* Nicehash support -* It's open source software. - ## Download * Binary releases: https://github.com/xmrig/xmrig/releases * Git tree: https://github.com/xmrig/xmrig.git @@ -95,48 +75,8 @@ Use [config.xmrig.com](https://config.xmrig.com/xmrig) to generate, edit or shar Also you can use configuration via config file, default name **config.json**. Some options available only via config file: [`autosave`](https://github.com/xmrig/xmrig/issues/767), [`hw-aes`](https://github.com/xmrig/xmrig/issues/563). `watch` option currently not implemented in miners only in proxy. -## Algorithm variations - -- `av` option used for automatic and simple threads mode (when you specify only threads count). -- For [advanced threads mode](https://github.com/xmrig/xmrig/issues/563) each thread configured individually and `av` option not used. - -| av | Hashes per round | Hardware AES | -|----|------------------|--------------| -| 1 | 1 (Single) | yes | -| 2 | 2 (Double) | yes | -| 3 | 1 (Single) | no | -| 4 | 2 (Double) | no | -| 5 | 3 (Triple) | yes | -| 6 | 4 (Quard) | yes | -| 7 | 5 (Penta) | yes | -| 8 | 3 (Triple) | no | -| 9 | 4 (Quard) | no | -| 10 | 5 (Penta) | no | - -## Common Issues -### HUGE PAGES unavailable -* Run XMRig as Administrator. -* Since version 0.8.0 XMRig automatically enables SeLockMemoryPrivilege for current user, but reboot or sign out still required. [Manual instruction](https://msdn.microsoft.com/en-gb/library/ms190730.aspx). - -## Other information -* No HTTP support, only stratum protocol support. -* Default donation 5% (5 minutes in 100 minutes) can be reduced to 1% via option `donate-level`. - - -### CPU mining performance -* **Intel i7-7700** - 307 H/s (4 threads) -* **AMD Ryzen 7 1700X** - 560 H/s (8 threads) - -Please note performance is highly dependent on system load. The numbers above are obtained on an idle system. Tasks heavily using a processor cache, such as video playback, can greatly degrade hashrate. Optimal number of threads depends on the size of the L3 cache of a processor, 1 thread requires 2 MB of cache. - -### Maximum performance checklist -* Idle operating system. -* Do not exceed optimal thread count. -* Use modern CPUs with AES-NI instruction set. -* Try setup optimal cpu affinity. -* Enable fast memory (Large/Huge pages). - ## Donations +* Default donation 5% (5 minutes in 100 minutes) can be reduced to 1% via option `donate-level` or disabled in source code. * XMR: `48edfHu7V9Z84YzzMa6fUueoELZ9ZRXq9VetWzYGzKt52XU5xvqgzYnDK9URnRoJMk1j8nLwEVsaSWJ4fhdUyZijBGUicoD` * BTC: `1P7ujsXeX7GxQwHNnJsRMgAdNkFZmNVqJT` From c0d2eeea2aa454c3f70fa28b6ad252c9d741387c Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 10 Aug 2019 00:54:45 +0700 Subject: [PATCH 152/172] Added algorithms.json. --- doc/data/algorithms.json | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 doc/data/algorithms.json diff --git a/doc/data/algorithms.json b/doc/data/algorithms.json new file mode 100644 index 00000000..b42fa82f --- /dev/null +++ b/doc/data/algorithms.json @@ -0,0 +1,32 @@ +{ + "current": [ + ["rx/test", "2 MB", "2.99.5+", "RandomX (reference configuration)."], + ["rx/0", "2 MB", "2.99.0+", "RandomX (reference configuration), reserved for future use."], + ["rx/wow", "1 MB", "2.99.0+", "RandomWOW."], + ["rx/loki", "2 MB", "2.99.0+", "RandomXL."], + ["cn/fast", "2 MB", "2.99.0+", "CryptoNight variant 1 with half iterations."], + ["cn/rwz", "2 MB", "2.14.0+", "CryptoNight variant 2 with 3/4 iterations and reversed shuffle operation."], + ["cn/zls", "2 MB", "2.14.0+", "CryptoNight variant 2 with 3/4 iterations."], + ["cn/double", "2 MB", "2.14.0+", "CryptoNight variant 2 with double iterations."], + ["cn/r", "2 MB", "2.13.0+", "CryptoNightR (Monero's variant 4)."], + ["cn/wow", "2 MB", "2.12.0+", "CryptoNightR (Wownero)."], + ["cn/gpu", "2 MB", "2.11.0+", "CryptoNight-GPU."], + ["cn-pico", "256 KB", "2.10.0+", "CryptoNight-Pico."], + ["cn/half", "2 MB", "2.9.0+", "CryptoNight variant 2 with half iterations."], + ["cn/2", "2 MB", "2.8.0+", "CryptoNight variant 2."], + ["cn/xao", "2 MB", "2.6.4+", "CryptoNight variant 0 (modified)."], + ["cn/rto", "2 MB", "2.6.4+", "CryptoNight variant 1 (modified)."], + ["cn-heavy/tube", "4 MB", "2.6.4+", "CryptoNight-Heavy (modified)."], + ["cn-heavy/xhv", "4 MB", "2.6.3+", "CryptoNight-Heavy (modified)."], + ["cn-heavy/0", "4 MB", "2.6.0+", "CryptoNight-Heavy."], + ["cn/1", "2 MB", "2.5.0+", "CryptoNight variant 1."], + ["cn-lite/1", "1 MB", "2.5.0+", "CryptoNight-Lite variant 1."], + ["cn-lite/0", "1 MB", "0.8.0+", "CryptoNight-Lite variant 0."], + ["cn/0", "2 MB", "0.5.0+", "CryptoNight (original)."] + ], + "removed": [ + ["cn/msr", "2 MB", "2.6.3+", "Renamed to cn/fast, still supported as alias."], + ["cn/xtl", "2 MB", "2.6.1-2.16.0", "Coin forked to cn/half."], + ["cn-lite/ipbc", "1 MB", "2.6.1-2.6.3", "Coin forked to cn-heavy/tube."] + ] +} \ No newline at end of file From fdbae116af3ee8cb76ff16e45772b7158b107fe6 Mon Sep 17 00:00:00 2001 From: xmrig Date: Sat, 10 Aug 2019 00:57:26 +0700 Subject: [PATCH 153/172] Update ALGORITHMS.md --- doc/ALGORITHMS.md | 26 +------------------------- 1 file changed, 1 insertion(+), 25 deletions(-) diff --git a/doc/ALGORITHMS.md b/doc/ALGORITHMS.md index 076eb2ff..ab55bb74 100644 --- a/doc/ALGORITHMS.md +++ b/doc/ALGORITHMS.md @@ -23,28 +23,4 @@ Since version 3 mining [algorithm](#algorithm-names) should specified for each p ## Algorithm names -| Name | Memory | Notes | -|-----------------|--------|--------------------------------------------------------------------------------------| -| `cn/0` | 2 MB | CryptoNight (original) | -| `cn/1` | 2 MB | CryptoNight variant 1 also known as `Monero7` and `CryptoNightV7`. | -| `cn/2` | 2 MB | CryptoNight variant 2. | -| `cn/r` | 2 MB | CryptoNightR (Monero's variant 4). | -| `cn/wow` | 2 MB | CryptoNightR (Wownero). | -| `cn/fast` | 2 MB | CryptoNight variant 1 with half iterations. | -| `cn/half` | 2 MB | CryptoNight variant 2 with half iterations (Masari/Torque) | -| `cn/xao` | 2 MB | CryptoNight variant 0 (modified, Alloy only) | -| `cn/rto` | 2 MB | CryptoNight variant 1 (modified, Arto only) | -| `cn/rwz` | 2 MB | CryptoNight variant 2 with 3/4 iterations and reversed shuffle operation (Graft). | -| `cn/zls` | 2 MB | CryptoNight variant 2 with 3/4 iterations (Zelerius). | -| `cn/double` | 2 MB | CryptoNight variant 2 with double iterations (X-CASH). | -| `cn/gpu` | 2 MB | CryptoNight-GPU (RYO). | -| `cn-lite/0` | 1 MB | CryptoNight-Lite variant 0. | -| `cn-lite/1` | 1 MB | CryptoNight-Lite variant 1. | -| `cn-heavy/0` | 4 MB | CryptoNight-Heavy . | -| `cn-heavy/xhv` | 4 MB | CryptoNight-Heavy (modified, TUBE only). | -| `cn-heavy/tube` | 4 MB | CryptoNight-Heavy (modified, Haven Protocol only). | -| `cn-pico` | 256 KB | TurtleCoin (TRTL) | -| `rx/test` | 2 MB | RandomX (reference configuration). | -| `rx/0` | 2 MB | RandomX (reference configuration), reserved for future use. | -| `rx/wow` | 1 MB | RandomWOW (Wownero). | -| `rx/loki` | 2 MB | RandomXL (Loki). | +* https://xmrig.com/docs/algorithms From 843951266fd7a66f43929ea7f3b4b1b1e105468a Mon Sep 17 00:00:00 2001 From: Jethro Grassie Date: Fri, 9 Aug 2019 19:45:03 -0400 Subject: [PATCH 154/172] fix #1108 --- src/backend/cpu/CpuBackend.cpp | 4 ++-- src/backend/cpu/CpuBackend.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/backend/cpu/CpuBackend.cpp b/src/backend/cpu/CpuBackend.cpp index 24600270..77baeaf6 100644 --- a/src/backend/cpu/CpuBackend.cpp +++ b/src/backend/cpu/CpuBackend.cpp @@ -143,9 +143,9 @@ xmrig::CpuBackend::~CpuBackend() } -std::pair xmrig::CpuBackend::hugePages() const +std::pair xmrig::CpuBackend::hugePages() const { - std::pair pages(0, 0); + std::pair pages(0, 0); # ifdef XMRIG_ALGO_RANDOMX if (d_ptr->algo.family() == Algorithm::RANDOM_X) { diff --git a/src/backend/cpu/CpuBackend.h b/src/backend/cpu/CpuBackend.h index bedef5ca..2b907840 100644 --- a/src/backend/cpu/CpuBackend.h +++ b/src/backend/cpu/CpuBackend.h @@ -46,7 +46,7 @@ public: CpuBackend(Controller *controller); ~CpuBackend() override; - std::pair hugePages() const; + std::pair hugePages() const; protected: bool isEnabled() const override; From b41fd120d289980324ca8e9840a025ab87896268 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 10 Aug 2019 13:14:20 +0700 Subject: [PATCH 155/172] Uniform signature for hugePages method. --- src/crypto/rx/Rx.cpp | 4 ++-- src/crypto/rx/Rx.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/crypto/rx/Rx.cpp b/src/crypto/rx/Rx.cpp index 6df708be..8e757ddf 100644 --- a/src/crypto/rx/Rx.cpp +++ b/src/crypto/rx/Rx.cpp @@ -250,9 +250,9 @@ xmrig::RxDataset *xmrig::Rx::dataset(const Job &job, uint32_t nodeId) } -std::pair xmrig::Rx::hugePages() +std::pair xmrig::Rx::hugePages() { - std::pair pages(0, 0); + std::pair pages(0, 0); std::lock_guard lock(d_ptr->mutex); for (auto const &item : d_ptr->datasets) { diff --git a/src/crypto/rx/Rx.h b/src/crypto/rx/Rx.h index 7cb94b4e..1a383055 100644 --- a/src/crypto/rx/Rx.h +++ b/src/crypto/rx/Rx.h @@ -46,7 +46,7 @@ class Rx public: static bool isReady(const Job &job); static RxDataset *dataset(const Job &job, uint32_t nodeId); - static std::pair hugePages(); + static std::pair hugePages(); static void destroy(); static void init(); static void init(const Job &job, int initThreads, bool hugePages, bool numa); From 01e1c9f54d945c9eddf64d6fc892efc0f0e62db5 Mon Sep 17 00:00:00 2001 From: xmrig Date: Sat, 10 Aug 2019 14:48:43 +0700 Subject: [PATCH 156/172] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b67934bb..15ae35a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,7 @@ - [#1106](https://github.com/xmrig/xmrig/issues/1106) Fixed `hugepages` field in summary API. - Added alternative short format for CPU threads. - Changed format for CPU threads with intensity above 1. -- Name for reference RandomX configuration changed to `rx/text` to avoid potential conflicts in future. +- Name for reference RandomX configuration changed to `rx/test` to avoid potential conflicts in future. # v2.99.4-beta - [#1062](https://github.com/xmrig/xmrig/issues/1062) Fixed 32 bit support. **32 bit is slow and deprecated**. From f346a772b3e8db35d8b8b70a83d305fab4b78c22 Mon Sep 17 00:00:00 2001 From: xmrig Date: Sat, 10 Aug 2019 14:57:02 +0700 Subject: [PATCH 157/172] Update README.md --- README.md | 91 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 47 insertions(+), 44 deletions(-) diff --git a/README.md b/README.md index 5d5af97b..ee499d47 100644 --- a/README.md +++ b/README.md @@ -26,55 +26,58 @@ XMRig is a high performance RandomX and CryptoNight CPU miner, with official sup * Clone with `git clone https://github.com/xmrig/xmrig.git` :hammer: [Build instructions](https://github.com/xmrig/xmrig/wiki/Build). ## Usage -Use [config.xmrig.com](https://config.xmrig.com/xmrig) to generate, edit or share configurations. +Preferend way to configure miner is [JSON config file](src/config.json) as more flexible and human frendly, command line interface not cover all features, for example mining profiles for different algorithms. Most impotant options can be changed in runtime without miner restart by editing config or via API. ### Options ``` - -a, --algo=ALGO specify the algorithm to use - cryptonight - cryptonight-lite - cryptonight-heavy - -o, --url=URL URL of mining server - -O, --userpass=U:P username:password pair for mining server - -u, --user=USERNAME username for mining server - -p, --pass=PASSWORD password for mining server - --rig-id=ID rig identifier for pool-side statistics (needs pool support) - -t, --threads=N number of miner threads - -v, --av=N algorithm variation, 0 auto select - -k, --keepalive send keepalived packet for prevent timeout (needs pool support) - --nicehash enable nicehash.com support - --tls enable SSL/TLS support (needs pool support) - --tls-fingerprint=F pool TLS certificate fingerprint, if set enable strict certificate pinning - -r, --retries=N number of times to retry before switch to backup server (default: 5) - -R, --retry-pause=N time to pause between retries (default: 5) - --cpu-affinity set process affinity to CPU core(s), mask 0x3 for cores 0 and 1 - --cpu-priority set process priority (0 idle, 2 normal to 5 highest) - --no-huge-pages disable huge pages support - --no-color disable colored output - --variant algorithm PoW variant - --donate-level=N donate level, default 5% (5 minutes in 100 minutes) - --user-agent set custom user-agent string for pool - -B, --background run the miner in the background - -c, --config=FILE load a JSON-format configuration file - -l, --log-file=FILE log all output to a file - -S, --syslog use system log for output messages - --max-cpu-usage=N maximum CPU usage for automatic threads mode (default 75) - --safe safe adjust threads and av settings for current CPU - --asm=ASM ASM code for cn/2, possible values: auto, none, intel, ryzen. - --print-time=N print hashrate report every N seconds - --api-port=N port for the miner API - --api-access-token=T access token for API - --api-worker-id=ID custom worker-id for API - --api-id=ID custom instance ID for API - --api-ipv6 enable IPv6 support for API - --api-no-restricted enable full remote access (only if API token set) - --dry-run test configuration and exit - -h, --help display this help and exit - -V, --version output version information and exit + -a, --algo=ALGO specify the algorithm to use + cn/r, cn/2, cn/1, cn/0, cn/double, cn/half, cn/fast, + cn/rwz, cn/zls, cn/xao, cn/rto, cn/gpu, + cn-lite/1, + cn-heavy/xhv, cn-heavy/tube, cn-heavy/0, + cn-pico, + rx/wow, rx/loki + -o, --url=URL URL of mining server + -O, --userpass=U:P username:password pair for mining server + -u, --user=USERNAME username for mining server + -p, --pass=PASSWORD password for mining server + --rig-id=ID rig identifier for pool-side statistics (needs pool support) + -t, --threads=N number of miner threads + -v, --av=N algorithm variation, 0 auto select + -k, --keepalive send keepalived packet for prevent timeout (needs pool support) + --nicehash enable nicehash.com support + --tls enable SSL/TLS support (needs pool support) + --tls-fingerprint=F pool TLS certificate fingerprint, if set enable strict certificate pinning + --daemon use daemon RPC instead of pool for solo mining + --daemon-poll-interval=N daemon poll interval in milliseconds (default: 1000) + -r, --retries=N number of times to retry before switch to backup server (default: 5) + -R, --retry-pause=N time to pause between retries (default: 5) + --cpu-affinity set process affinity to CPU core(s), mask 0x3 for cores 0 and 1 + --cpu-priority set process priority (0 idle, 2 normal to 5 highest) + --no-huge-pages disable huge pages support + --no-color disable colored output + --donate-level=N donate level, default 5% (5 minutes in 100 minutes) + --user-agent set custom user-agent string for pool + -B, --background run the miner in the background + -c, --config=FILE load a JSON-format configuration file + -l, --log-file=FILE log all output to a file + --asm=ASM ASM optimizations, possible values: auto, none, intel, ryzen, bulldozer. + --print-time=N print hashrate report every N seconds + --api-worker-id=ID custom worker-id for API + --api-id=ID custom instance ID for API + --http-enabled enable HTTP API + --http-host=HOST bind host for HTTP API (default: 127.0.0.1) + --http-port=N bind port for HTTP API + --http-access-token=T access token for HTTP API + --http-no-restricted enable full remote access to HTTP API (only if access token set) + --randomx-init=N threads count to initialize RandomX dataset + --randomx-no-numa disable NUMA support for RandomX + --export-topology export hwloc topology to a XML file and exit + --dry-run test configuration and exit + -h, --help display this help and exit + -V, --version output version information and exit ``` -Also you can use configuration via config file, default name **config.json**. Some options available only via config file: [`autosave`](https://github.com/xmrig/xmrig/issues/767), [`hw-aes`](https://github.com/xmrig/xmrig/issues/563). `watch` option currently not implemented in miners only in proxy. - ## Donations * Default donation 5% (5 minutes in 100 minutes) can be reduced to 1% via option `donate-level` or disabled in source code. * XMR: `48edfHu7V9Z84YzzMa6fUueoELZ9ZRXq9VetWzYGzKt52XU5xvqgzYnDK9URnRoJMk1j8nLwEVsaSWJ4fhdUyZijBGUicoD` From 10072d545eb81ec5d3d7c5796d8a107df2fc3d62 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 10 Aug 2019 17:50:29 +0700 Subject: [PATCH 158/172] Added CHANGELOG_OLD.md --- doc/CHANGELOG_OLD.md | 343 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 343 insertions(+) create mode 100644 doc/CHANGELOG_OLD.md diff --git a/doc/CHANGELOG_OLD.md b/doc/CHANGELOG_OLD.md new file mode 100644 index 00000000..15ae35a9 --- /dev/null +++ b/doc/CHANGELOG_OLD.md @@ -0,0 +1,343 @@ +# v2.99.5-beta +- [#1066](https://github.com/xmrig/xmrig/issues/1066#issuecomment-518080529) Fixed crash and added error message if pool not ready for RandomX. +- [#1092](https://github.com/xmrig/xmrig/issues/1092) Fixed crash if wrong CPU affinity used. +- [#1103](https://github.com/xmrig/xmrig/issues/1103) Improved auto configuration for RandomX for CPUs where L2 cache is limiting factor. +- [#1105](https://github.com/xmrig/xmrig/issues/1105) Improved auto configuration for `cn-pico` algorithm. +- [#1106](https://github.com/xmrig/xmrig/issues/1106) Fixed `hugepages` field in summary API. +- Added alternative short format for CPU threads. +- Changed format for CPU threads with intensity above 1. +- Name for reference RandomX configuration changed to `rx/test` to avoid potential conflicts in future. + +# v2.99.4-beta +- [#1062](https://github.com/xmrig/xmrig/issues/1062) Fixed 32 bit support. **32 bit is slow and deprecated**. +- [#1088](https://github.com/xmrig/xmrig/pull/1088) Fixed macOS compilation. +- [#1095](https://github.com/xmrig/xmrig/pull/1095) Fixed compatibility with hwloc 1.10.x. +- Optimized RandomX initialization and switching, fixed rare crash when re-initialize dataset. +- Fixed ARM build with hwloc. + +# v2.99.3-beta +- [#1082](https://github.com/xmrig/xmrig/issues/1082) Fixed hwloc auto configuration on AMD FX CPUs. +- Added command line option `--export-topology` for export hwloc topology to a XML file. + +# v2.99.2-beta +- [#1077](https://github.com/xmrig/xmrig/issues/1077) Added NUMA support via **hwloc**. +- Fixed miner freeze when switch between RandomX variants. +- Fixed dataset initialization speed on Linux if thread affinity was used. + +# v2.99.1-beta +- [#1072](https://github.com/xmrig/xmrig/issues/1072) Fixed RandomX `seed_hash` re-initialization. + +# v2.99.0-beta +- [#1050](https://github.com/xmrig/xmrig/pull/1050) Added RandomXL algorithm for [Loki](https://loki.network/), algorithm name used by miner is `randomx/loki` or `rx/loki`. +- Added [flexible](https://github.com/xmrig/xmrig/blob/evo/doc/CPU.md) multi algorithm configuration. +- Added unlimited switching between incompatible algorithms, all mining options can be changed in runtime. +- Breaked backward compatibility with previous configs and command line, `variant` option replaced to `algo`, global option `algo` removed, all CPU related settings moved to `cpu` object. +- Options `av`, `safe` and `max-cpu-usage` removed. +- Algorithm `cn/msr` renamed to `cn/fast`. +- Algorithm `cn/xtl` removed. +- API endpoint `GET /1/threads` replaced to `GET /2/backends`. + +# v2.16.0-beta +- [#1036](https://github.com/xmrig/xmrig/pull/1036) Added RandomWOW (RandomX with different preferences) algorithm support for [Wownero](http://wownero.org/). + - Algorithm name used by miner is `randomx/wow` or `rx/wow`. + - Currently runtime algorithm switching NOT supported with other algorithms. + +# v2.15.4-beta +- Added global uptime and extended connection information in API. +- API now return current algorithm instead of global algorithm specified in config. +- This version also include all changes from stable version v2.14.4. + +# v2.15.3-beta +- [#1014](https://github.com/xmrig/xmrig/issues/1014) Fixed regression, default value for `algo` option was not applied. + +# v2.15.2-beta +- [#1010](https://github.com/xmrig/xmrig/pull/1010#issuecomment-482632107) Added daemon support (solo mining). +- [#1012](https://github.com/xmrig/xmrig/pull/1012) Fixed compatibility with clang 9. +- Config subsystem was rewritten, internally JSON is primary format now. +- Fixed regression, big HTTP responses was truncated. + +# v2.15.1-beta +- [#1007](https://github.com/xmrig/xmrig/issues/1007) Old HTTP API backend based on libmicrohttpd, replaced to custom HTTP server (libuv + http_parser). +- [#257](https://github.com/xmrig/xmrig-nvidia/pull/257) New logging subsystem, file and syslog now always without colors. + +# v2.15.0-beta +- [#314](https://github.com/xmrig/xmrig-proxy/issues/314) Added donate over proxy feature. + - Added new option `donate-over-proxy`. + - Added real graceful exit. + +# v2.14.4 +- [#992](https://github.com/xmrig/xmrig/pull/992) Fixed compilation with Clang 3.5. +- [#1012](https://github.com/xmrig/xmrig/pull/1012) Fixed compilation with Clang 9.0. +- In HTTP API for unknown hashrate now used `null` instead of `0.0`. +- Fixed MSVC 2019 version detection. +- Removed obsolete automatic variants. + +# v2.14.1 +* [#975](https://github.com/xmrig/xmrig/issues/975) Fixed crash on Linux if double thread mode used. + +# v2.14.0 +- **[#969](https://github.com/xmrig/xmrig/pull/969) Added new algorithm `cryptonight/rwz`, short alias `cn/rwz` (also known as CryptoNight ReverseWaltz), for upcoming [Graft](https://www.graft.network/) fork.** +- **[#931](https://github.com/xmrig/xmrig/issues/931) Added new algorithm `cryptonight/zls`, short alias `cn/zls` for [Zelerius Network](https://zelerius.org) fork.** +- **[#940](https://github.com/xmrig/xmrig/issues/940) Added new algorithm `cryptonight/double`, short alias `cn/double` (also known as CryptoNight HeavyX), for [X-CASH](https://x-cash.org/).** +- [#951](https://github.com/xmrig/xmrig/issues/951#issuecomment-469581529) Fixed crash if AVX was disabled on OS level. +- [#952](https://github.com/xmrig/xmrig/issues/952) Fixed compile error on some Linux. +- [#957](https://github.com/xmrig/xmrig/issues/957#issuecomment-468890667) Added support for embedded config. +- [#958](https://github.com/xmrig/xmrig/pull/958) Fixed incorrect user agent on ARM platforms. +- [#968](https://github.com/xmrig/xmrig/pull/968) Optimized `cn/r` algorithm performance. + +# v2.13.1 +- [#946](https://github.com/xmrig/xmrig/pull/946) Optimized software AES implementations for CPUs without hardware AES support. `cn/r`, `cn/wow` up to 2.6 times faster, 4-9% improvements for other algorithms. + +# v2.13.0 +- **[#938](https://github.com/xmrig/xmrig/issues/938) Added support for new algorithm `cryptonight/r`, short alias `cn/r` (also known as CryptoNightR or CryptoNight variant 4), for upcoming [Monero](https://www.getmonero.org/) fork on March 9, thanks [@SChernykh](https://github.com/SChernykh).** +- [#939](https://github.com/xmrig/xmrig/issues/939) Added support for dynamic (runtime) pools reload. +- [#932](https://github.com/xmrig/xmrig/issues/932) Fixed `cn-pico` hashrate drop, regression since v2.11.0. + +# v2.12.0 +- [#929](https://github.com/xmrig/xmrig/pull/929) Added support for new algorithm `cryptonight/wow`, short alias `cn/wow` (also known as CryptonightR), for upcoming [Wownero](http://wownero.org) fork on February 14. + +# v2.11.0 +- [#928](https://github.com/xmrig/xmrig/issues/928) Added support for new algorithm `cryptonight/gpu`, short alias `cn/gpu` (original name `cryptonight-gpu`), for upcoming [Ryo currency](https://ryo-currency.com) fork on February 14. +- [#749](https://github.com/xmrig/xmrig/issues/749) Added support for detect hardware AES in runtime on ARMv8 platforms. +- [#292](https://github.com/xmrig/xmrig/issues/292) Fixed build on ARMv8 platforms if compiler not support hardware AES. + +# v2.10.0 +- [#904](https://github.com/xmrig/xmrig/issues/904) Added new algorithm `cn-pico/trtl` (aliases `cryptonight-turtle`, `cn-trtl`) for upcoming TurtleCoin (TRTL) fork. +- Default value for option `max-cpu-usage` changed to `100` also this option now deprecated. + +# v2.9.4 +- [#913](https://github.com/xmrig/xmrig/issues/913) Fixed Masari (MSR) support (this update required for upcoming fork). +- [#915](https://github.com/xmrig/xmrig/pull/915) Improved security, JIT memory now read-only after patching. + +# v2.9.3 +- [#909](https://github.com/xmrig/xmrig/issues/909) Fixed compile errors on FreeBSD. +- [#912](https://github.com/xmrig/xmrig/pull/912) Fixed, C++ implementation of `cn/half` was produce up to 13% of invalid hashes. + +# v2.9.2 +- [#907](https://github.com/xmrig/xmrig/pull/907) Fixed crash on Linux. + +# v2.9.1 +- Restored compatibility with https://stellite.hashvault.pro. + +# v2.9.0 +- [#899](https://github.com/xmrig/xmrig/issues/899) Added support for new algorithm `cn/half` for Masari and Stellite forks. +- [#834](https://github.com/xmrig/xmrig/pull/834) Added ASM optimized code for AMD Bulldozer. +- [#839](https://github.com/xmrig/xmrig/issues/839) Fixed FreeBSD compile. +- [#857](https://github.com/xmrig/xmrig/pull/857) Fixed impossible to build for macOS without clang. + +# v2.8.3 +- [#813](https://github.com/xmrig/xmrig/issues/813) Fixed critical bug with Minergate pool and variant 2. + +# v2.8.1 +- [#768](https://github.com/xmrig/xmrig/issues/768) Fixed build with Visual Studio 2015. +- [#769](https://github.com/xmrig/xmrig/issues/769) Fixed regression, some ANSI escape sequences was in log with disabled colors. +- [#777](https://github.com/xmrig/xmrig/issues/777) Better report about pool connection issues. +- Simplified checks for ASM auto detection, only AES support necessary. +- Added missing options to `--help` output. + +# v2.8.0 +- **[#753](https://github.com/xmrig/xmrig/issues/753) Added new algorithm [CryptoNight variant 2](https://github.com/xmrig/xmrig/issues/753) for Monero fork, thanks [@SChernykh](https://github.com/SChernykh).** + - Added global and per thread option `"asm"` and and command line equivalent. +- **[#758](https://github.com/xmrig/xmrig/issues/758) Added SSL/TLS support for secure connections to pools.** + - Added per pool options `"tls"` and `"tls-fingerprint"` and command line equivalents. +- [#767](https://github.com/xmrig/xmrig/issues/767) Added config autosave feature, same with GPU miners. +- [#245](https://github.com/xmrig/xmrig-proxy/issues/245) Fixed API ID collision when run multiple miners on same machine. +- [#757](https://github.com/xmrig/xmrig/issues/757) Fixed send buffer overflow. + +# v2.6.4 +- [#700](https://github.com/xmrig/xmrig/issues/700) `cryptonight-lite/ipbc` replaced to `cryptonight-heavy/tube` for **Bittube (TUBE)**. +- Added `cryptonight/rto` (cryptonight variant 1 with IPBC/TUBE mod) variant for **Arto (RTO)** coin. +- Added `cryptonight/xao` (original cryptonight with bigger iteration count) variant for **Alloy (XAO)** coin. +- Better variant detection for **nicehash.com** and **minergate.com**. +- [#692](https://github.com/xmrig/xmrig/issues/692) Added support for specify both algorithm and variant via single `algo` option. + +# v2.6.3 +- **Added support for new cryptonight-heavy variant xhv** (`cn-heavy/xhv`) for upcoming Haven Protocol fork. +- **Added support for new cryptonight variant msr** (`cn/msr`) also known as `cryptonight-fast` for upcoming Masari fork. +- Added new detailed hashrate report. +- [#446](https://github.com/xmrig/xmrig/issues/446) Likely fixed SIGBUS error on 32 bit ARM CPUs. +- [#551](https://github.com/xmrig/xmrig/issues/551) Fixed `cn-heavy` algorithm on ARMv8. +- [#614](https://github.com/xmrig/xmrig/issues/614) Fixed display issue with huge pages percentage when colors disabled. +- [#615](https://github.com/xmrig/xmrig/issues/615) Fixed build without libcpuid. +- [#629](https://github.com/xmrig/xmrig/pull/629) Fixed file logging with non-seekable files. +- [#672](https://github.com/xmrig/xmrig/pull/672) Reverted back `cryptonight-light` and exit if no valid algorithm specified. + +# v2.6.2 + - [#607](https://github.com/xmrig/xmrig/issues/607) Fixed donation bug. + - [#610](https://github.com/xmrig/xmrig/issues/610) Fixed ARM build. + +# v2.6.1 + - [#168](https://github.com/xmrig/xmrig-proxy/issues/168) Added support for [mining algorithm negotiation](https://github.com/xmrig/xmrig-proxy/blob/dev/doc/STRATUM_EXT.md#1-mining-algorithm-negotiation). + - Added IPBC coin support, base algorithm `cn-lite` variant `ipbc`. + - [#581](https://github.com/xmrig/xmrig/issues/581) Added support for upcoming Stellite (XTL) fork, base algorithm `cn` variant `xtl`, variant can set now, no need do it after fork. + - Added support for **rig-id** stratum protocol extensions, compatible with xmr-stak. + - Changed behavior for option `variant=-1` for `cryptonight`, now variant is `1` by default, if you mine old coins need change `variant` to `0`. + - A lot of small fixes and better unification with proxy code. + +# v2.6.0-beta3 +- [#563](https://github.com/xmrig/xmrig/issues/563) **Added [advanced threads mode](https://github.com/xmrig/xmrig/issues/563), now possible configure each thread individually.** +- [#255](https://github.com/xmrig/xmrig/issues/563) Low power mode extended to **triple**, **quard** and **penta** modes. +- [#519](https://github.com/xmrig/xmrig/issues/519) Fixed high donation levels, improved donation start time randomization. +- [#554](https://github.com/xmrig/xmrig/issues/554) Fixed regression with `print-time` option. + +# v2.6.0-beta2 +- Improved performance for `cryptonight v7` especially in double hash mode. +- [#499](https://github.com/xmrig/xmrig/issues/499) IPv6 disabled for internal HTTP API by default, was causing issues on some systems. +- Added short aliases for algorithm names: `cn`, `cn-lite` and `cn-heavy`. +- Fixed regressions (v2.6.0-beta1 affected) + - [#494](https://github.com/xmrig/xmrig/issues/494) Command line option `--donate-level` was broken. + - [#502](https://github.com/xmrig/xmrig/issues/502) Build without libmicrohttpd was broken. + - Fixed nonce calculation for `--av 4` (software AES, double hash) was causing reduction of effective hashrate and rejected shares on nicehash. + +# v2.6.0-beta1 + - [#476](https://github.com/xmrig/xmrig/issues/476) **Added Cryptonight-Heavy support for Sumokoin ASIC resistance fork.** + - HTTP server now runs in main loop, it make possible easy extend API without worry about thread synchronization. + - Added initial graceful reload support, miner will reload configuration if config file changed, disabled by default until it will be fully implemented and tested. + - Added API endpoint `PUT /1/config` to update current config. + - Added API endpoint `GET /1/config` to get current active config. + - Added API endpoint `GET /1/threads` to get current active threads configuration. + - API endpoint `GET /` now deprecated, use `GET /1/summary` instead. + - Added `--api-no-ipv6` and similar config option to disable IPv6 support for HTTP API. + - Added `--api-no-restricted` to enable full access to api, this option has no effect if `--api-access-token` not specified. + +# v2.5.3 +- Fixed critical bug, in some cases miner was can't recovery connection and switch to failover pool, version 2.5.2 affected. If you use v2.6.0-beta3 this issue doesn't concern you. +- [#499](https://github.com/xmrig/xmrig/issues/499) IPv6 support disabled for internal HTTP API. +- Added workaround for nicehash.com if you use `cryptonightv7..nicehash.com` option `variant=1` will be set automatically. + +# v2.5.2 +- [#448](https://github.com/xmrig/xmrig/issues/478) Fixed broken reconnect. + +# v2.5.1 +- [#454](https://github.com/xmrig/xmrig/issues/454) Fixed build with libmicrohttpd version below v0.9.35. +- [#456](https://github.com/xmrig/xmrig/issues/459) Verbose errors related to donation pool was not fully silenced. +- [#459](https://github.com/xmrig/xmrig/issues/459) Fixed regression (version 2.5.0 affected) with connection to **xmr.f2pool.com**. + +# v2.5.0 +- [#434](https://github.com/xmrig/xmrig/issues/434) **Added support for Monero v7 PoW, scheduled on April 6.** +- Added full IPv6 support. +- Added protocol extension, when use the miner with xmrig-proxy 2.5+ no more need manually specify `nicehash` option. +- [#123](https://github.com/xmrig/xmrig-proxy/issues/123) Fixed regression (all versions since 2.4 affected) fragmented responses from pool/proxy was parsed incorrectly. +- [#428](https://github.com/xmrig/xmrig/issues/428) Fixed regression (version 2.4.5 affected) with CPU cache size detection. + +# v2.4.5 +- [#324](https://github.com/xmrig/xmrig/pull/324) Fixed build without libmicrohttpd (CMake cache issue). +- [#341](https://github.com/xmrig/xmrig/issues/341) Fixed wrong exit code and added command line option `--dry-run`. +- [#385](https://github.com/xmrig/xmrig/pull/385) Up to 20% performance increase for non-AES CPU and fixed Intel Core 2 cache detection. + +# v2.4.4 + - Added libmicrohttpd version to --version output. + - Fixed bug in singal handler, in some cases miner wasn't shutdown properly. + - Fixed recent MSVC 2017 version detection. + - [#279](https://github.com/xmrig/xmrig/pull/279) Fixed build on some macOS versions. + +# v2.4.3 + - [#94](https://github.com/xmrig/xmrig/issues/94#issuecomment-342019257) [#216](https://github.com/xmrig/xmrig/issues/216) Added **ARMv8** and **ARMv7** support. Hardware AES supported, thanks [Imran Yusuff](https://github.com/imranyusuff). + - [#157](https://github.com/xmrig/xmrig/issues/157) [#196](https://github.com/xmrig/xmrig/issues/196) Fixed Linux compile issues. + - [#184](https://github.com/xmrig/xmrig/issues/184) Fixed cache size detection for CPUs with disabled Hyper-Threading. + - [#200](https://github.com/xmrig/xmrig/issues/200) In some cases miner was doesn't write log to stdout. + +# v2.4.2 + - [#60](https://github.com/xmrig/xmrig/issues/60) Added FreeBSD support, thanks [vcambur](https://github.com/vcambur). + - [#153](https://github.com/xmrig/xmrig/issues/153) Fixed issues with dwarfpool.com. + +# v2.4.1 + - [#147](https://github.com/xmrig/xmrig/issues/147) Fixed comparability with monero-stratum. + +# v2.4.0 + - Added [HTTP API](https://github.com/xmrig/xmrig/wiki/API). + - Added comments support in config file. + - libjansson replaced to rapidjson. + - [#98](https://github.com/xmrig/xmrig/issues/98) Ignore `keepalive` option with minergate.com and nicehash.com. + - [#101](https://github.com/xmrig/xmrig/issues/101) Fixed MSVC 2017 (15.3) compile time version detection. + - [#108](https://github.com/xmrig/xmrig/issues/108) Silently ignore invalid values for `donate-level` option. + - [#111](https://github.com/xmrig/xmrig/issues/111) Fixed build without AEON support. + +# v2.3.1 +- [#68](https://github.com/xmrig/xmrig/issues/68) Fixed compatibility with Docker containers, was nothing print on console. + +# v2.3.0 +- Added `--cpu-priority` option (0 idle, 2 normal to 5 highest). +- Added `--user-agent` option, to set custom user-agent string for pool. For example `cpuminer-multi/0.1`. +- Added `--no-huge-pages` option, to disable huge pages support. +- [#62](https://github.com/xmrig/xmrig/issues/62) Don't send the login to the dev pool. +- Force reconnect if pool block miner IP address. helps switch to backup pool. +- Fixed: failed open default config file if path contains non English characters. +- Fixed: error occurred if try use unavailable stdin or stdout, regression since version 2.2.0. +- Fixed: message about huge pages support successfully enabled on Windows was not shown in release builds. + +# v2.2.1 +- Fixed [terminal issues](https://github.com/xmrig/xmrig-proxy/issues/2#issuecomment-319914085) after exit on Linux and OS X. + +# v2.2.0 +- [#46](https://github.com/xmrig/xmrig/issues/46) Restored config file support. Now possible use multiple config files and combine with command line options also added support for default config. +- Improved colors support on Windows, now used uv_tty, legacy code removed. +- QuickEdit Mode now disabled on Windows. +- Added interactive commands in console window:: **h**ashrate, **p**ause, **r**esume. +- Fixed autoconf mode for AMD FX CPUs. + +# v2.1.0 +- [#40](https://github.com/xmrig/xmrig/issues/40) +Improved miner shutdown, fixed crash on exit for Linux and OS X. +- Fixed, login request was contain malformed JSON if username or password has some special characters for example `\`. +- [#220](https://github.com/fireice-uk/xmr-stak-cpu/pull/220) Better support for Round Robin DNS, IP address now always chosen randomly instead of stuck on first one. +- Changed donation address, new [xmrig-proxy](https://github.com/xmrig/xmrig-proxy) is coming soon. + +# v2.0.2 +- Better deal with possible duplicate jobs from pool, show warning and ignore duplicates. +- For Windows builds libuv updated to version 1.13.1 and gcc to 7.1.0. + +# v2.0.1 + - [#27](https://github.com/xmrig/xmrig/issues/27) Fixed possibility crash on 32bit systems. + +# v2.0.0 + - Option `--backup-url` removed, instead now possibility specify multiple pools for example: `-o example1.com:3333 -u user1 -p password1 -k -o example2.com:5555 -u user2 -o example3.com:4444 -u user3` + - [#15](https://github.com/xmrig/xmrig/issues/15) Added option `-l, --log-file=FILE` to write log to file. + - [#15](https://github.com/xmrig/xmrig/issues/15) Added option `-S, --syslog` to use syslog for logging, Linux only. + - [#18](https://github.com/xmrig/xmrig/issues/18) Added nice messages for accepted/rejected shares with diff and network latency. + - [#20](https://github.com/xmrig/xmrig/issues/20) Fixed `--cpu-affinity` for more than 32 threads. + - Fixed Windows XP support. + - Fixed regression, option `--no-color` was not fully disable colored output. + - Show resolved pool IP address in miner output. + +# v1.0.1 +- Fix broken software AES implementation, app has crashed if CPU not support AES-NI, only version 1.0.0 affected. + +# v1.0.0 +- Miner complete rewritten in C++ with libuv. +- This version should be fully compatible (except config file) with previos versions, many new nice features will come in next versions. +- This is still beta. If you found regression, stability or perfomance issues or have an idea for new feature please fell free to open new [issue](https://github.com/xmrig/xmrig/issues/new). +- Added new option `--print-time=N`, print hashrate report every N seconds. +- New hashrate reports, by default every 60 secons. +- Added Microsoft Visual C++ 2015 and 2017 support. +- Removed dependency on libcurl. +- To compile this version from source please switch to [dev](https://github.com/xmrig/xmrig/tree/dev) branch. + +# v0.8.2 +- Fixed L2 cache size detection for AMD CPUs (Bulldozer/Piledriver/Steamroller/Excavator architecture). + +# v0.8.2 +- Fixed L2 cache size detection for AMD CPUs (Bulldozer/Piledriver/Steamroller/Excavator architecture). +- Fixed gcc 7.1 support. + +# v0.8.1 +- Added nicehash support, detects automaticaly by pool URL, for example `cryptonight.eu.nicehash.com:3355` or manually via option `--nicehash`. + +# v0.8.0 +- Added double hash mode, also known as lower power mode. `--av=2` and `--av=4`. +- Added smart automatic CPU configuration. Default threads count now depends on size of the L3 cache of CPU. +- Added CryptoNight-Lite support for AEON `-a cryptonight-lite`. +- Added `--max-cpu-usage` option for auto CPU configuration mode. +- Added `--safe` option for adjust threads and algorithm variations to current CPU. +- No more manual steps to enable huge pages on Windows. XMRig will do it automatically. +- Removed BMI2 algorithm variation. +- Removed default pool URL. + +# v0.6.0 +- Added automatic cryptonight self test. +- New software AES algorithm variation. Will be automatically selected if cpu not support AES-NI. +- Added 32 bit builds. +- Documented [algorithm variations](https://github.com/xmrig/xmrig#algorithm-variations). + +# v0.5.0 +- Initial public release. From 1aa2d44d0d51e32d5d681442c143373712b99836 Mon Sep 17 00:00:00 2001 From: xmrig Date: Sat, 10 Aug 2019 18:14:48 +0700 Subject: [PATCH 159/172] Update CHANGELOG.md --- CHANGELOG.md | 347 +++------------------------------------------------ 1 file changed, 14 insertions(+), 333 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 15ae35a9..691fa18f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,34 +1,15 @@ -# v2.99.5-beta -- [#1066](https://github.com/xmrig/xmrig/issues/1066#issuecomment-518080529) Fixed crash and added error message if pool not ready for RandomX. -- [#1092](https://github.com/xmrig/xmrig/issues/1092) Fixed crash if wrong CPU affinity used. -- [#1103](https://github.com/xmrig/xmrig/issues/1103) Improved auto configuration for RandomX for CPUs where L2 cache is limiting factor. -- [#1105](https://github.com/xmrig/xmrig/issues/1105) Improved auto configuration for `cn-pico` algorithm. -- [#1106](https://github.com/xmrig/xmrig/issues/1106) Fixed `hugepages` field in summary API. -- Added alternative short format for CPU threads. -- Changed format for CPU threads with intensity above 1. -- Name for reference RandomX configuration changed to `rx/test` to avoid potential conflicts in future. - -# v2.99.4-beta -- [#1062](https://github.com/xmrig/xmrig/issues/1062) Fixed 32 bit support. **32 bit is slow and deprecated**. -- [#1088](https://github.com/xmrig/xmrig/pull/1088) Fixed macOS compilation. -- [#1095](https://github.com/xmrig/xmrig/pull/1095) Fixed compatibility with hwloc 1.10.x. -- Optimized RandomX initialization and switching, fixed rare crash when re-initialize dataset. -- Fixed ARM build with hwloc. - -# v2.99.3-beta -- [#1082](https://github.com/xmrig/xmrig/issues/1082) Fixed hwloc auto configuration on AMD FX CPUs. -- Added command line option `--export-topology` for export hwloc topology to a XML file. - -# v2.99.2-beta +# v3.0.0 +- **Added RandomX (`rx/test`) algorithm for testing and benchmarking.** +- **[#1036](https://github.com/xmrig/xmrig/pull/1036) Added RandomWOW (`rx/wow`) algorithm for [Wownero](http://wownero.org/).** +- **[#1050](https://github.com/xmrig/xmrig/pull/1050) Added RandomXL (`rx/loki`) algorithm for [Loki](https://loki.network/).** +- [#257](https://github.com/xmrig/xmrig-nvidia/pull/257) New logging subsystem, file and syslog now always without colors. +- [#314](https://github.com/xmrig/xmrig-proxy/issues/314) Added donate over proxy feature. +- [#1007](https://github.com/xmrig/xmrig/issues/1007) Old HTTP API backend based on libmicrohttpd, replaced to custom HTTP server (libuv + http_parser). +- [#1010](https://github.com/xmrig/xmrig/pull/1010#issuecomment-482632107) Added daemon support (solo mining). +- [#1066](https://github.com/xmrig/xmrig/issues/1066#issuecomment-518080529) Added error message if pool not ready for RandomX. - [#1077](https://github.com/xmrig/xmrig/issues/1077) Added NUMA support via **hwloc**. -- Fixed miner freeze when switch between RandomX variants. -- Fixed dataset initialization speed on Linux if thread affinity was used. - -# v2.99.1-beta -- [#1072](https://github.com/xmrig/xmrig/issues/1072) Fixed RandomX `seed_hash` re-initialization. - -# v2.99.0-beta -- [#1050](https://github.com/xmrig/xmrig/pull/1050) Added RandomXL algorithm for [Loki](https://loki.network/), algorithm name used by miner is `randomx/loki` or `rx/loki`. +- [#1105](https://github.com/xmrig/xmrig/issues/1105) Improved auto configuration for `cn-pico` algorithm. +- Added command line option `--export-topology` for export hwloc topology to a XML file. - Added [flexible](https://github.com/xmrig/xmrig/blob/evo/doc/CPU.md) multi algorithm configuration. - Added unlimited switching between incompatible algorithms, all mining options can be changed in runtime. - Breaked backward compatibility with previous configs and command line, `variant` option replaced to `algo`, global option `algo` removed, all CPU related settings moved to `cpu` object. @@ -36,308 +17,8 @@ - Algorithm `cn/msr` renamed to `cn/fast`. - Algorithm `cn/xtl` removed. - API endpoint `GET /1/threads` replaced to `GET /2/backends`. - -# v2.16.0-beta -- [#1036](https://github.com/xmrig/xmrig/pull/1036) Added RandomWOW (RandomX with different preferences) algorithm support for [Wownero](http://wownero.org/). - - Algorithm name used by miner is `randomx/wow` or `rx/wow`. - - Currently runtime algorithm switching NOT supported with other algorithms. - -# v2.15.4-beta - Added global uptime and extended connection information in API. -- API now return current algorithm instead of global algorithm specified in config. -- This version also include all changes from stable version v2.14.4. +- API now return current algorithm. -# v2.15.3-beta -- [#1014](https://github.com/xmrig/xmrig/issues/1014) Fixed regression, default value for `algo` option was not applied. - -# v2.15.2-beta -- [#1010](https://github.com/xmrig/xmrig/pull/1010#issuecomment-482632107) Added daemon support (solo mining). -- [#1012](https://github.com/xmrig/xmrig/pull/1012) Fixed compatibility with clang 9. -- Config subsystem was rewritten, internally JSON is primary format now. -- Fixed regression, big HTTP responses was truncated. - -# v2.15.1-beta -- [#1007](https://github.com/xmrig/xmrig/issues/1007) Old HTTP API backend based on libmicrohttpd, replaced to custom HTTP server (libuv + http_parser). -- [#257](https://github.com/xmrig/xmrig-nvidia/pull/257) New logging subsystem, file and syslog now always without colors. - -# v2.15.0-beta -- [#314](https://github.com/xmrig/xmrig-proxy/issues/314) Added donate over proxy feature. - - Added new option `donate-over-proxy`. - - Added real graceful exit. - -# v2.14.4 -- [#992](https://github.com/xmrig/xmrig/pull/992) Fixed compilation with Clang 3.5. -- [#1012](https://github.com/xmrig/xmrig/pull/1012) Fixed compilation with Clang 9.0. -- In HTTP API for unknown hashrate now used `null` instead of `0.0`. -- Fixed MSVC 2019 version detection. -- Removed obsolete automatic variants. - -# v2.14.1 -* [#975](https://github.com/xmrig/xmrig/issues/975) Fixed crash on Linux if double thread mode used. - -# v2.14.0 -- **[#969](https://github.com/xmrig/xmrig/pull/969) Added new algorithm `cryptonight/rwz`, short alias `cn/rwz` (also known as CryptoNight ReverseWaltz), for upcoming [Graft](https://www.graft.network/) fork.** -- **[#931](https://github.com/xmrig/xmrig/issues/931) Added new algorithm `cryptonight/zls`, short alias `cn/zls` for [Zelerius Network](https://zelerius.org) fork.** -- **[#940](https://github.com/xmrig/xmrig/issues/940) Added new algorithm `cryptonight/double`, short alias `cn/double` (also known as CryptoNight HeavyX), for [X-CASH](https://x-cash.org/).** -- [#951](https://github.com/xmrig/xmrig/issues/951#issuecomment-469581529) Fixed crash if AVX was disabled on OS level. -- [#952](https://github.com/xmrig/xmrig/issues/952) Fixed compile error on some Linux. -- [#957](https://github.com/xmrig/xmrig/issues/957#issuecomment-468890667) Added support for embedded config. -- [#958](https://github.com/xmrig/xmrig/pull/958) Fixed incorrect user agent on ARM platforms. -- [#968](https://github.com/xmrig/xmrig/pull/968) Optimized `cn/r` algorithm performance. - -# v2.13.1 -- [#946](https://github.com/xmrig/xmrig/pull/946) Optimized software AES implementations for CPUs without hardware AES support. `cn/r`, `cn/wow` up to 2.6 times faster, 4-9% improvements for other algorithms. - -# v2.13.0 -- **[#938](https://github.com/xmrig/xmrig/issues/938) Added support for new algorithm `cryptonight/r`, short alias `cn/r` (also known as CryptoNightR or CryptoNight variant 4), for upcoming [Monero](https://www.getmonero.org/) fork on March 9, thanks [@SChernykh](https://github.com/SChernykh).** -- [#939](https://github.com/xmrig/xmrig/issues/939) Added support for dynamic (runtime) pools reload. -- [#932](https://github.com/xmrig/xmrig/issues/932) Fixed `cn-pico` hashrate drop, regression since v2.11.0. - -# v2.12.0 -- [#929](https://github.com/xmrig/xmrig/pull/929) Added support for new algorithm `cryptonight/wow`, short alias `cn/wow` (also known as CryptonightR), for upcoming [Wownero](http://wownero.org) fork on February 14. - -# v2.11.0 -- [#928](https://github.com/xmrig/xmrig/issues/928) Added support for new algorithm `cryptonight/gpu`, short alias `cn/gpu` (original name `cryptonight-gpu`), for upcoming [Ryo currency](https://ryo-currency.com) fork on February 14. -- [#749](https://github.com/xmrig/xmrig/issues/749) Added support for detect hardware AES in runtime on ARMv8 platforms. -- [#292](https://github.com/xmrig/xmrig/issues/292) Fixed build on ARMv8 platforms if compiler not support hardware AES. - -# v2.10.0 -- [#904](https://github.com/xmrig/xmrig/issues/904) Added new algorithm `cn-pico/trtl` (aliases `cryptonight-turtle`, `cn-trtl`) for upcoming TurtleCoin (TRTL) fork. -- Default value for option `max-cpu-usage` changed to `100` also this option now deprecated. - -# v2.9.4 -- [#913](https://github.com/xmrig/xmrig/issues/913) Fixed Masari (MSR) support (this update required for upcoming fork). -- [#915](https://github.com/xmrig/xmrig/pull/915) Improved security, JIT memory now read-only after patching. - -# v2.9.3 -- [#909](https://github.com/xmrig/xmrig/issues/909) Fixed compile errors on FreeBSD. -- [#912](https://github.com/xmrig/xmrig/pull/912) Fixed, C++ implementation of `cn/half` was produce up to 13% of invalid hashes. - -# v2.9.2 -- [#907](https://github.com/xmrig/xmrig/pull/907) Fixed crash on Linux. - -# v2.9.1 -- Restored compatibility with https://stellite.hashvault.pro. - -# v2.9.0 -- [#899](https://github.com/xmrig/xmrig/issues/899) Added support for new algorithm `cn/half` for Masari and Stellite forks. -- [#834](https://github.com/xmrig/xmrig/pull/834) Added ASM optimized code for AMD Bulldozer. -- [#839](https://github.com/xmrig/xmrig/issues/839) Fixed FreeBSD compile. -- [#857](https://github.com/xmrig/xmrig/pull/857) Fixed impossible to build for macOS without clang. - -# v2.8.3 -- [#813](https://github.com/xmrig/xmrig/issues/813) Fixed critical bug with Minergate pool and variant 2. - -# v2.8.1 -- [#768](https://github.com/xmrig/xmrig/issues/768) Fixed build with Visual Studio 2015. -- [#769](https://github.com/xmrig/xmrig/issues/769) Fixed regression, some ANSI escape sequences was in log with disabled colors. -- [#777](https://github.com/xmrig/xmrig/issues/777) Better report about pool connection issues. -- Simplified checks for ASM auto detection, only AES support necessary. -- Added missing options to `--help` output. - -# v2.8.0 -- **[#753](https://github.com/xmrig/xmrig/issues/753) Added new algorithm [CryptoNight variant 2](https://github.com/xmrig/xmrig/issues/753) for Monero fork, thanks [@SChernykh](https://github.com/SChernykh).** - - Added global and per thread option `"asm"` and and command line equivalent. -- **[#758](https://github.com/xmrig/xmrig/issues/758) Added SSL/TLS support for secure connections to pools.** - - Added per pool options `"tls"` and `"tls-fingerprint"` and command line equivalents. -- [#767](https://github.com/xmrig/xmrig/issues/767) Added config autosave feature, same with GPU miners. -- [#245](https://github.com/xmrig/xmrig-proxy/issues/245) Fixed API ID collision when run multiple miners on same machine. -- [#757](https://github.com/xmrig/xmrig/issues/757) Fixed send buffer overflow. - -# v2.6.4 -- [#700](https://github.com/xmrig/xmrig/issues/700) `cryptonight-lite/ipbc` replaced to `cryptonight-heavy/tube` for **Bittube (TUBE)**. -- Added `cryptonight/rto` (cryptonight variant 1 with IPBC/TUBE mod) variant for **Arto (RTO)** coin. -- Added `cryptonight/xao` (original cryptonight with bigger iteration count) variant for **Alloy (XAO)** coin. -- Better variant detection for **nicehash.com** and **minergate.com**. -- [#692](https://github.com/xmrig/xmrig/issues/692) Added support for specify both algorithm and variant via single `algo` option. - -# v2.6.3 -- **Added support for new cryptonight-heavy variant xhv** (`cn-heavy/xhv`) for upcoming Haven Protocol fork. -- **Added support for new cryptonight variant msr** (`cn/msr`) also known as `cryptonight-fast` for upcoming Masari fork. -- Added new detailed hashrate report. -- [#446](https://github.com/xmrig/xmrig/issues/446) Likely fixed SIGBUS error on 32 bit ARM CPUs. -- [#551](https://github.com/xmrig/xmrig/issues/551) Fixed `cn-heavy` algorithm on ARMv8. -- [#614](https://github.com/xmrig/xmrig/issues/614) Fixed display issue with huge pages percentage when colors disabled. -- [#615](https://github.com/xmrig/xmrig/issues/615) Fixed build without libcpuid. -- [#629](https://github.com/xmrig/xmrig/pull/629) Fixed file logging with non-seekable files. -- [#672](https://github.com/xmrig/xmrig/pull/672) Reverted back `cryptonight-light` and exit if no valid algorithm specified. - -# v2.6.2 - - [#607](https://github.com/xmrig/xmrig/issues/607) Fixed donation bug. - - [#610](https://github.com/xmrig/xmrig/issues/610) Fixed ARM build. - -# v2.6.1 - - [#168](https://github.com/xmrig/xmrig-proxy/issues/168) Added support for [mining algorithm negotiation](https://github.com/xmrig/xmrig-proxy/blob/dev/doc/STRATUM_EXT.md#1-mining-algorithm-negotiation). - - Added IPBC coin support, base algorithm `cn-lite` variant `ipbc`. - - [#581](https://github.com/xmrig/xmrig/issues/581) Added support for upcoming Stellite (XTL) fork, base algorithm `cn` variant `xtl`, variant can set now, no need do it after fork. - - Added support for **rig-id** stratum protocol extensions, compatible with xmr-stak. - - Changed behavior for option `variant=-1` for `cryptonight`, now variant is `1` by default, if you mine old coins need change `variant` to `0`. - - A lot of small fixes and better unification with proxy code. - -# v2.6.0-beta3 -- [#563](https://github.com/xmrig/xmrig/issues/563) **Added [advanced threads mode](https://github.com/xmrig/xmrig/issues/563), now possible configure each thread individually.** -- [#255](https://github.com/xmrig/xmrig/issues/563) Low power mode extended to **triple**, **quard** and **penta** modes. -- [#519](https://github.com/xmrig/xmrig/issues/519) Fixed high donation levels, improved donation start time randomization. -- [#554](https://github.com/xmrig/xmrig/issues/554) Fixed regression with `print-time` option. - -# v2.6.0-beta2 -- Improved performance for `cryptonight v7` especially in double hash mode. -- [#499](https://github.com/xmrig/xmrig/issues/499) IPv6 disabled for internal HTTP API by default, was causing issues on some systems. -- Added short aliases for algorithm names: `cn`, `cn-lite` and `cn-heavy`. -- Fixed regressions (v2.6.0-beta1 affected) - - [#494](https://github.com/xmrig/xmrig/issues/494) Command line option `--donate-level` was broken. - - [#502](https://github.com/xmrig/xmrig/issues/502) Build without libmicrohttpd was broken. - - Fixed nonce calculation for `--av 4` (software AES, double hash) was causing reduction of effective hashrate and rejected shares on nicehash. - -# v2.6.0-beta1 - - [#476](https://github.com/xmrig/xmrig/issues/476) **Added Cryptonight-Heavy support for Sumokoin ASIC resistance fork.** - - HTTP server now runs in main loop, it make possible easy extend API without worry about thread synchronization. - - Added initial graceful reload support, miner will reload configuration if config file changed, disabled by default until it will be fully implemented and tested. - - Added API endpoint `PUT /1/config` to update current config. - - Added API endpoint `GET /1/config` to get current active config. - - Added API endpoint `GET /1/threads` to get current active threads configuration. - - API endpoint `GET /` now deprecated, use `GET /1/summary` instead. - - Added `--api-no-ipv6` and similar config option to disable IPv6 support for HTTP API. - - Added `--api-no-restricted` to enable full access to api, this option has no effect if `--api-access-token` not specified. - -# v2.5.3 -- Fixed critical bug, in some cases miner was can't recovery connection and switch to failover pool, version 2.5.2 affected. If you use v2.6.0-beta3 this issue doesn't concern you. -- [#499](https://github.com/xmrig/xmrig/issues/499) IPv6 support disabled for internal HTTP API. -- Added workaround for nicehash.com if you use `cryptonightv7..nicehash.com` option `variant=1` will be set automatically. - -# v2.5.2 -- [#448](https://github.com/xmrig/xmrig/issues/478) Fixed broken reconnect. - -# v2.5.1 -- [#454](https://github.com/xmrig/xmrig/issues/454) Fixed build with libmicrohttpd version below v0.9.35. -- [#456](https://github.com/xmrig/xmrig/issues/459) Verbose errors related to donation pool was not fully silenced. -- [#459](https://github.com/xmrig/xmrig/issues/459) Fixed regression (version 2.5.0 affected) with connection to **xmr.f2pool.com**. - -# v2.5.0 -- [#434](https://github.com/xmrig/xmrig/issues/434) **Added support for Monero v7 PoW, scheduled on April 6.** -- Added full IPv6 support. -- Added protocol extension, when use the miner with xmrig-proxy 2.5+ no more need manually specify `nicehash` option. -- [#123](https://github.com/xmrig/xmrig-proxy/issues/123) Fixed regression (all versions since 2.4 affected) fragmented responses from pool/proxy was parsed incorrectly. -- [#428](https://github.com/xmrig/xmrig/issues/428) Fixed regression (version 2.4.5 affected) with CPU cache size detection. - -# v2.4.5 -- [#324](https://github.com/xmrig/xmrig/pull/324) Fixed build without libmicrohttpd (CMake cache issue). -- [#341](https://github.com/xmrig/xmrig/issues/341) Fixed wrong exit code and added command line option `--dry-run`. -- [#385](https://github.com/xmrig/xmrig/pull/385) Up to 20% performance increase for non-AES CPU and fixed Intel Core 2 cache detection. - -# v2.4.4 - - Added libmicrohttpd version to --version output. - - Fixed bug in singal handler, in some cases miner wasn't shutdown properly. - - Fixed recent MSVC 2017 version detection. - - [#279](https://github.com/xmrig/xmrig/pull/279) Fixed build on some macOS versions. - -# v2.4.3 - - [#94](https://github.com/xmrig/xmrig/issues/94#issuecomment-342019257) [#216](https://github.com/xmrig/xmrig/issues/216) Added **ARMv8** and **ARMv7** support. Hardware AES supported, thanks [Imran Yusuff](https://github.com/imranyusuff). - - [#157](https://github.com/xmrig/xmrig/issues/157) [#196](https://github.com/xmrig/xmrig/issues/196) Fixed Linux compile issues. - - [#184](https://github.com/xmrig/xmrig/issues/184) Fixed cache size detection for CPUs with disabled Hyper-Threading. - - [#200](https://github.com/xmrig/xmrig/issues/200) In some cases miner was doesn't write log to stdout. - -# v2.4.2 - - [#60](https://github.com/xmrig/xmrig/issues/60) Added FreeBSD support, thanks [vcambur](https://github.com/vcambur). - - [#153](https://github.com/xmrig/xmrig/issues/153) Fixed issues with dwarfpool.com. - -# v2.4.1 - - [#147](https://github.com/xmrig/xmrig/issues/147) Fixed comparability with monero-stratum. - -# v2.4.0 - - Added [HTTP API](https://github.com/xmrig/xmrig/wiki/API). - - Added comments support in config file. - - libjansson replaced to rapidjson. - - [#98](https://github.com/xmrig/xmrig/issues/98) Ignore `keepalive` option with minergate.com and nicehash.com. - - [#101](https://github.com/xmrig/xmrig/issues/101) Fixed MSVC 2017 (15.3) compile time version detection. - - [#108](https://github.com/xmrig/xmrig/issues/108) Silently ignore invalid values for `donate-level` option. - - [#111](https://github.com/xmrig/xmrig/issues/111) Fixed build without AEON support. - -# v2.3.1 -- [#68](https://github.com/xmrig/xmrig/issues/68) Fixed compatibility with Docker containers, was nothing print on console. - -# v2.3.0 -- Added `--cpu-priority` option (0 idle, 2 normal to 5 highest). -- Added `--user-agent` option, to set custom user-agent string for pool. For example `cpuminer-multi/0.1`. -- Added `--no-huge-pages` option, to disable huge pages support. -- [#62](https://github.com/xmrig/xmrig/issues/62) Don't send the login to the dev pool. -- Force reconnect if pool block miner IP address. helps switch to backup pool. -- Fixed: failed open default config file if path contains non English characters. -- Fixed: error occurred if try use unavailable stdin or stdout, regression since version 2.2.0. -- Fixed: message about huge pages support successfully enabled on Windows was not shown in release builds. - -# v2.2.1 -- Fixed [terminal issues](https://github.com/xmrig/xmrig-proxy/issues/2#issuecomment-319914085) after exit on Linux and OS X. - -# v2.2.0 -- [#46](https://github.com/xmrig/xmrig/issues/46) Restored config file support. Now possible use multiple config files and combine with command line options also added support for default config. -- Improved colors support on Windows, now used uv_tty, legacy code removed. -- QuickEdit Mode now disabled on Windows. -- Added interactive commands in console window:: **h**ashrate, **p**ause, **r**esume. -- Fixed autoconf mode for AMD FX CPUs. - -# v2.1.0 -- [#40](https://github.com/xmrig/xmrig/issues/40) -Improved miner shutdown, fixed crash on exit for Linux and OS X. -- Fixed, login request was contain malformed JSON if username or password has some special characters for example `\`. -- [#220](https://github.com/fireice-uk/xmr-stak-cpu/pull/220) Better support for Round Robin DNS, IP address now always chosen randomly instead of stuck on first one. -- Changed donation address, new [xmrig-proxy](https://github.com/xmrig/xmrig-proxy) is coming soon. - -# v2.0.2 -- Better deal with possible duplicate jobs from pool, show warning and ignore duplicates. -- For Windows builds libuv updated to version 1.13.1 and gcc to 7.1.0. - -# v2.0.1 - - [#27](https://github.com/xmrig/xmrig/issues/27) Fixed possibility crash on 32bit systems. - -# v2.0.0 - - Option `--backup-url` removed, instead now possibility specify multiple pools for example: `-o example1.com:3333 -u user1 -p password1 -k -o example2.com:5555 -u user2 -o example3.com:4444 -u user3` - - [#15](https://github.com/xmrig/xmrig/issues/15) Added option `-l, --log-file=FILE` to write log to file. - - [#15](https://github.com/xmrig/xmrig/issues/15) Added option `-S, --syslog` to use syslog for logging, Linux only. - - [#18](https://github.com/xmrig/xmrig/issues/18) Added nice messages for accepted/rejected shares with diff and network latency. - - [#20](https://github.com/xmrig/xmrig/issues/20) Fixed `--cpu-affinity` for more than 32 threads. - - Fixed Windows XP support. - - Fixed regression, option `--no-color` was not fully disable colored output. - - Show resolved pool IP address in miner output. - -# v1.0.1 -- Fix broken software AES implementation, app has crashed if CPU not support AES-NI, only version 1.0.0 affected. - -# v1.0.0 -- Miner complete rewritten in C++ with libuv. -- This version should be fully compatible (except config file) with previos versions, many new nice features will come in next versions. -- This is still beta. If you found regression, stability or perfomance issues or have an idea for new feature please fell free to open new [issue](https://github.com/xmrig/xmrig/issues/new). -- Added new option `--print-time=N`, print hashrate report every N seconds. -- New hashrate reports, by default every 60 secons. -- Added Microsoft Visual C++ 2015 and 2017 support. -- Removed dependency on libcurl. -- To compile this version from source please switch to [dev](https://github.com/xmrig/xmrig/tree/dev) branch. - -# v0.8.2 -- Fixed L2 cache size detection for AMD CPUs (Bulldozer/Piledriver/Steamroller/Excavator architecture). - -# v0.8.2 -- Fixed L2 cache size detection for AMD CPUs (Bulldozer/Piledriver/Steamroller/Excavator architecture). -- Fixed gcc 7.1 support. - -# v0.8.1 -- Added nicehash support, detects automaticaly by pool URL, for example `cryptonight.eu.nicehash.com:3355` or manually via option `--nicehash`. - -# v0.8.0 -- Added double hash mode, also known as lower power mode. `--av=2` and `--av=4`. -- Added smart automatic CPU configuration. Default threads count now depends on size of the L3 cache of CPU. -- Added CryptoNight-Lite support for AEON `-a cryptonight-lite`. -- Added `--max-cpu-usage` option for auto CPU configuration mode. -- Added `--safe` option for adjust threads and algorithm variations to current CPU. -- No more manual steps to enable huge pages on Windows. XMRig will do it automatically. -- Removed BMI2 algorithm variation. -- Removed default pool URL. - -# v0.6.0 -- Added automatic cryptonight self test. -- New software AES algorithm variation. Will be automatically selected if cpu not support AES-NI. -- Added 32 bit builds. -- Documented [algorithm variations](https://github.com/xmrig/xmrig#algorithm-variations). - -# v0.5.0 -- Initial public release. +# Previous versions +[doc/CHANGELOG_OLD.md](doc/CHANGELOG_OLD.md) From 52ddd059ac4aedb7c03c07c419ad570febdb2f8a Mon Sep 17 00:00:00 2001 From: xmrig Date: Sat, 10 Aug 2019 18:16:39 +0700 Subject: [PATCH 160/172] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 691fa18f..5f5888a8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ - [#1077](https://github.com/xmrig/xmrig/issues/1077) Added NUMA support via **hwloc**. - [#1105](https://github.com/xmrig/xmrig/issues/1105) Improved auto configuration for `cn-pico` algorithm. - Added command line option `--export-topology` for export hwloc topology to a XML file. -- Added [flexible](https://github.com/xmrig/xmrig/blob/evo/doc/CPU.md) multi algorithm configuration. +- Added flexible [multi algorithm](https://github.com/xmrig/xmrig/blob/evo/doc/CPU.md) configuration. - Added unlimited switching between incompatible algorithms, all mining options can be changed in runtime. - Breaked backward compatibility with previous configs and command line, `variant` option replaced to `algo`, global option `algo` removed, all CPU related settings moved to `cpu` object. - Options `av`, `safe` and `max-cpu-usage` removed. From 09e8941f381f47484efd67f222a2c9d2546036c8 Mon Sep 17 00:00:00 2001 From: xmrig Date: Sat, 10 Aug 2019 18:18:25 +0700 Subject: [PATCH 161/172] Update CHANGELOG.md --- CHANGELOG.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5f5888a8..4424560e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,16 +2,16 @@ - **Added RandomX (`rx/test`) algorithm for testing and benchmarking.** - **[#1036](https://github.com/xmrig/xmrig/pull/1036) Added RandomWOW (`rx/wow`) algorithm for [Wownero](http://wownero.org/).** - **[#1050](https://github.com/xmrig/xmrig/pull/1050) Added RandomXL (`rx/loki`) algorithm for [Loki](https://loki.network/).** +- **[#1077](https://github.com/xmrig/xmrig/issues/1077) Added NUMA support via hwloc**. +- **Added flexible [multi algorithm](https://github.com/xmrig/xmrig/blob/evo/doc/CPU.md) configuration.** +- **Added unlimited switching between incompatible algorithms, all mining options can be changed in runtime.** - [#257](https://github.com/xmrig/xmrig-nvidia/pull/257) New logging subsystem, file and syslog now always without colors. - [#314](https://github.com/xmrig/xmrig-proxy/issues/314) Added donate over proxy feature. - [#1007](https://github.com/xmrig/xmrig/issues/1007) Old HTTP API backend based on libmicrohttpd, replaced to custom HTTP server (libuv + http_parser). - [#1010](https://github.com/xmrig/xmrig/pull/1010#issuecomment-482632107) Added daemon support (solo mining). - [#1066](https://github.com/xmrig/xmrig/issues/1066#issuecomment-518080529) Added error message if pool not ready for RandomX. -- [#1077](https://github.com/xmrig/xmrig/issues/1077) Added NUMA support via **hwloc**. - [#1105](https://github.com/xmrig/xmrig/issues/1105) Improved auto configuration for `cn-pico` algorithm. - Added command line option `--export-topology` for export hwloc topology to a XML file. -- Added flexible [multi algorithm](https://github.com/xmrig/xmrig/blob/evo/doc/CPU.md) configuration. -- Added unlimited switching between incompatible algorithms, all mining options can be changed in runtime. - Breaked backward compatibility with previous configs and command line, `variant` option replaced to `algo`, global option `algo` removed, all CPU related settings moved to `cpu` object. - Options `av`, `safe` and `max-cpu-usage` removed. - Algorithm `cn/msr` renamed to `cn/fast`. From bc5e13de5a3e40be0aacf3b49840756b80c9ed1a Mon Sep 17 00:00:00 2001 From: xmrig Date: Sat, 10 Aug 2019 18:31:40 +0700 Subject: [PATCH 162/172] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4424560e..700942b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,7 @@ - **[#1036](https://github.com/xmrig/xmrig/pull/1036) Added RandomWOW (`rx/wow`) algorithm for [Wownero](http://wownero.org/).** - **[#1050](https://github.com/xmrig/xmrig/pull/1050) Added RandomXL (`rx/loki`) algorithm for [Loki](https://loki.network/).** - **[#1077](https://github.com/xmrig/xmrig/issues/1077) Added NUMA support via hwloc**. -- **Added flexible [multi algorithm](https://github.com/xmrig/xmrig/blob/evo/doc/CPU.md) configuration.** +- **Added flexible [multi algorithm](doc/CPU.md) configuration.** - **Added unlimited switching between incompatible algorithms, all mining options can be changed in runtime.** - [#257](https://github.com/xmrig/xmrig-nvidia/pull/257) New logging subsystem, file and syslog now always without colors. - [#314](https://github.com/xmrig/xmrig-proxy/issues/314) Added donate over proxy feature. From 00ae8d063c3879d999c6c08ff14d142452f74311 Mon Sep 17 00:00:00 2001 From: xmrig Date: Sat, 10 Aug 2019 19:43:11 +0700 Subject: [PATCH 163/172] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 700942b5..9b4317f7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,5 @@ # v3.0.0 -- **Added RandomX (`rx/test`) algorithm for testing and benchmarking.** +- **[#1111](https://github.com/xmrig/xmrig/pull/1111) Added RandomX (`rx/test`) algorithm for testing and benchmarking.** - **[#1036](https://github.com/xmrig/xmrig/pull/1036) Added RandomWOW (`rx/wow`) algorithm for [Wownero](http://wownero.org/).** - **[#1050](https://github.com/xmrig/xmrig/pull/1050) Added RandomXL (`rx/loki`) algorithm for [Loki](https://loki.network/).** - **[#1077](https://github.com/xmrig/xmrig/issues/1077) Added NUMA support via hwloc**. From 28b7ac36fcc8f85be0a0595f58f468d1504eaa58 Mon Sep 17 00:00:00 2001 From: XMRig Date: Sat, 10 Aug 2019 23:56:26 +0700 Subject: [PATCH 164/172] v2.99.6-beta --- src/version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/version.h b/src/version.h index 59577d2a..a0018a79 100644 --- a/src/version.h +++ b/src/version.h @@ -28,7 +28,7 @@ #define APP_ID "xmrig" #define APP_NAME "XMRig" #define APP_DESC "XMRig CPU miner" -#define APP_VERSION "2.99.6-evo" +#define APP_VERSION "2.99.6-beta" #define APP_DOMAIN "xmrig.com" #define APP_SITE "www.xmrig.com" #define APP_COPYRIGHT "Copyright (C) 2016-2019 xmrig.com" From a9c1c1ac642e127ffb6793c2551c2a438b815fdd Mon Sep 17 00:00:00 2001 From: XMRig Date: Sun, 11 Aug 2019 00:01:48 +0700 Subject: [PATCH 165/172] v3.0.0-dev --- src/version.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/version.h b/src/version.h index a0018a79..81adcd8b 100644 --- a/src/version.h +++ b/src/version.h @@ -28,15 +28,15 @@ #define APP_ID "xmrig" #define APP_NAME "XMRig" #define APP_DESC "XMRig CPU miner" -#define APP_VERSION "2.99.6-beta" +#define APP_VERSION "3.0.0-dev" #define APP_DOMAIN "xmrig.com" #define APP_SITE "www.xmrig.com" #define APP_COPYRIGHT "Copyright (C) 2016-2019 xmrig.com" #define APP_KIND "cpu" -#define APP_VER_MAJOR 2 -#define APP_VER_MINOR 99 -#define APP_VER_PATCH 6 +#define APP_VER_MAJOR 3 +#define APP_VER_MINOR 0 +#define APP_VER_PATCH 0 #ifdef _MSC_VER # if (_MSC_VER >= 1920) From 2ec257284f5bffce5277c35458d3893430c2d843 Mon Sep 17 00:00:00 2001 From: XMRig Date: Mon, 12 Aug 2019 16:52:16 +0700 Subject: [PATCH 166/172] Common API code moved to base/api. --- CMakeLists.txt | 19 +------------------ src/App.cpp | 1 - src/{ => base}/api/Api.cpp | 8 ++++---- src/{ => base}/api/Api.h | 0 src/{ => base}/api/Httpd.cpp | 4 ++-- src/{ => base}/api/Httpd.h | 0 src/{ => base}/api/interfaces/IApiListener.h | 0 src/{ => base}/api/interfaces/IApiRequest.h | 0 src/{ => base}/api/requests/ApiRequest.cpp | 2 +- src/{ => base}/api/requests/ApiRequest.h | 2 +- .../api/requests/HttpApiRequest.cpp | 2 +- src/{ => base}/api/requests/HttpApiRequest.h | 2 +- src/base/base.cmake | 10 ++++++++++ src/base/kernel/Base.cpp | 17 ++++++++--------- src/base/kernel/Base.h | 2 +- src/core/Miner.cpp | 4 ++-- src/core/Miner.h | 2 +- src/net/Network.cpp | 5 ++--- src/net/Network.h | 2 +- 19 files changed, 36 insertions(+), 46 deletions(-) rename src/{ => base}/api/Api.cpp (97%) rename src/{ => base}/api/Api.h (100%) rename src/{ => base}/api/Httpd.cpp (98%) rename src/{ => base}/api/Httpd.h (100%) rename src/{ => base}/api/interfaces/IApiListener.h (100%) rename src/{ => base}/api/interfaces/IApiRequest.h (100%) rename src/{ => base}/api/requests/ApiRequest.cpp (97%) rename src/{ => base}/api/requests/ApiRequest.h (98%) rename src/{ => base}/api/requests/HttpApiRequest.cpp (98%) rename src/{ => base}/api/requests/HttpApiRequest.h (98%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7600d556..15288319 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -32,7 +32,6 @@ set(HEADERS "${HEADERS_BASE}" "${HEADERS_BASE_HTTP}" "${HEADERS_BACKEND}" - src/api/interfaces/IApiListener.h src/App.h src/core/config/Config_default.h src/core/config/Config_platform.h @@ -232,22 +231,6 @@ if (WITH_EMBEDDED_CONFIG) add_definitions(/DXMRIG_FEATURE_EMBEDDED_CONFIG) endif() -if (WITH_HTTP) - set(HTTP_SOURCES - src/api/Api.cpp - src/api/Api.h - src/api/Httpd.cpp - src/api/Httpd.h - src/api/interfaces/IApiRequest.h - src/api/requests/ApiRequest.cpp - src/api/requests/ApiRequest.h - src/api/requests/HttpApiRequest.cpp - src/api/requests/HttpApiRequest.h - ) -else() - set(HTTP_SOURCES "") -endif() - include_directories(src) include_directories(src/3rdparty) include_directories(${UV_INCLUDE_DIR}) @@ -260,5 +243,5 @@ if (WITH_DEBUG_LOG) add_definitions(/DAPP_DEBUG) endif() -add_executable(${CMAKE_PROJECT_NAME} ${HEADERS} ${SOURCES} ${SOURCES_OS} ${SOURCES_CPUID} ${HEADERS_CRYPTO} ${SOURCES_CRYPTO} ${SOURCES_SYSLOG} ${HTTP_SOURCES} ${TLS_SOURCES} ${XMRIG_ASM_SOURCES} ${CN_GPU_SOURCES}) +add_executable(${CMAKE_PROJECT_NAME} ${HEADERS} ${SOURCES} ${SOURCES_OS} ${SOURCES_CPUID} ${HEADERS_CRYPTO} ${SOURCES_CRYPTO} ${SOURCES_SYSLOG} ${TLS_SOURCES} ${XMRIG_ASM_SOURCES} ${CN_GPU_SOURCES}) target_link_libraries(${CMAKE_PROJECT_NAME} ${XMRIG_ASM_LIBRARY} ${OPENSSL_LIBRARIES} ${UV_LIBRARIES} ${EXTRA_LIBS} ${CPUID_LIB}) diff --git a/src/App.cpp b/src/App.cpp index ccbaad4f..de45a499 100644 --- a/src/App.cpp +++ b/src/App.cpp @@ -28,7 +28,6 @@ #include -#include "api/Api.h" #include "App.h" #include "backend/cpu/Cpu.h" #include "base/io/Console.h" diff --git a/src/api/Api.cpp b/src/base/api/Api.cpp similarity index 97% rename from src/api/Api.cpp rename to src/base/api/Api.cpp index 4c8e7323..f358ac4b 100644 --- a/src/api/Api.cpp +++ b/src/base/api/Api.cpp @@ -32,9 +32,9 @@ #include "3rdparty/http-parser/http_parser.h" -#include "api/Api.h" -#include "api/interfaces/IApiListener.h" -#include "api/requests/HttpApiRequest.h" +#include "base/api/Api.h" +#include "base/api/interfaces/IApiListener.h" +#include "base/api/requests/HttpApiRequest.h" #include "base/kernel/Base.h" #include "base/tools/Buffer.h" #include "base/tools/Chrono.h" @@ -45,7 +45,7 @@ #ifdef XMRIG_FEATURE_HTTP -# include "api/Httpd.h" +# include "base/api/Httpd.h" #endif diff --git a/src/api/Api.h b/src/base/api/Api.h similarity index 100% rename from src/api/Api.h rename to src/base/api/Api.h diff --git a/src/api/Httpd.cpp b/src/base/api/Httpd.cpp similarity index 98% rename from src/api/Httpd.cpp rename to src/base/api/Httpd.cpp index 57a112e9..e61e66f1 100644 --- a/src/api/Httpd.cpp +++ b/src/base/api/Httpd.cpp @@ -24,8 +24,8 @@ #include "3rdparty/http-parser/http_parser.h" -#include "api/Api.h" -#include "api/Httpd.h" +#include "base/api/Api.h" +#include "base/api/Httpd.h" #include "base/io/log/Log.h" #include "base/net/http/HttpApiResponse.h" #include "base/net/http/HttpData.h" diff --git a/src/api/Httpd.h b/src/base/api/Httpd.h similarity index 100% rename from src/api/Httpd.h rename to src/base/api/Httpd.h diff --git a/src/api/interfaces/IApiListener.h b/src/base/api/interfaces/IApiListener.h similarity index 100% rename from src/api/interfaces/IApiListener.h rename to src/base/api/interfaces/IApiListener.h diff --git a/src/api/interfaces/IApiRequest.h b/src/base/api/interfaces/IApiRequest.h similarity index 100% rename from src/api/interfaces/IApiRequest.h rename to src/base/api/interfaces/IApiRequest.h diff --git a/src/api/requests/ApiRequest.cpp b/src/base/api/requests/ApiRequest.cpp similarity index 97% rename from src/api/requests/ApiRequest.cpp rename to src/base/api/requests/ApiRequest.cpp index 3812e419..da73adee 100644 --- a/src/api/requests/ApiRequest.cpp +++ b/src/base/api/requests/ApiRequest.cpp @@ -23,7 +23,7 @@ */ -#include "api/requests/ApiRequest.h" +#include "base/api/requests/ApiRequest.h" xmrig::ApiRequest::ApiRequest(Source source, bool restricted) : diff --git a/src/api/requests/ApiRequest.h b/src/base/api/requests/ApiRequest.h similarity index 98% rename from src/api/requests/ApiRequest.h rename to src/base/api/requests/ApiRequest.h index 05716e29..1fd721f1 100644 --- a/src/api/requests/ApiRequest.h +++ b/src/base/api/requests/ApiRequest.h @@ -27,7 +27,7 @@ #define XMRIG_APIREQUEST_H -#include "api/interfaces/IApiRequest.h" +#include "base/api/interfaces/IApiRequest.h" namespace xmrig { diff --git a/src/api/requests/HttpApiRequest.cpp b/src/base/api/requests/HttpApiRequest.cpp similarity index 98% rename from src/api/requests/HttpApiRequest.cpp rename to src/base/api/requests/HttpApiRequest.cpp index b4dc1810..c3c2dac4 100644 --- a/src/api/requests/HttpApiRequest.cpp +++ b/src/base/api/requests/HttpApiRequest.cpp @@ -23,7 +23,7 @@ */ -#include "api/requests/HttpApiRequest.h" +#include "base/api/requests/HttpApiRequest.h" #include "base/net/http/HttpData.h" #include "rapidjson/error/en.h" diff --git a/src/api/requests/HttpApiRequest.h b/src/base/api/requests/HttpApiRequest.h similarity index 98% rename from src/api/requests/HttpApiRequest.h rename to src/base/api/requests/HttpApiRequest.h index f34d4be5..dc3eb037 100644 --- a/src/api/requests/HttpApiRequest.h +++ b/src/base/api/requests/HttpApiRequest.h @@ -27,7 +27,7 @@ #define XMRIG_HTTPAPIREQUEST_H -#include "api/requests/ApiRequest.h" +#include "base/api/requests/ApiRequest.h" #include "base/net/http/HttpApiResponse.h" #include "base/tools/String.h" diff --git a/src/base/base.cmake b/src/base/base.cmake index ef4da131..bc022c70 100644 --- a/src/base/base.cmake +++ b/src/base/base.cmake @@ -1,4 +1,5 @@ set(HEADERS_BASE + src/base/api/interfaces/IApiListener.h src/base/io/Console.h src/base/io/json/Json.h src/base/io/json/JsonChain.h @@ -114,6 +115,11 @@ endif() if (WITH_HTTP) set(HEADERS_BASE_HTTP src/3rdparty/http-parser/http_parser.h + src/base/api/Api.h + src/base/api/Httpd.h + src/base/api/interfaces/IApiRequest.h + src/base/api/requests/ApiRequest.h + src/base/api/requests/HttpApiRequest.h src/base/kernel/interfaces/IHttpListener.h src/base/kernel/interfaces/IJsonReader.h src/base/kernel/interfaces/ITcpServerListener.h @@ -129,6 +135,10 @@ if (WITH_HTTP) set(SOURCES_BASE_HTTP src/3rdparty/http-parser/http_parser.c + src/base/api/Api.cpp + src/base/api/Httpd.cpp + src/base/api/requests/ApiRequest.cpp + src/base/api/requests/HttpApiRequest.cpp src/base/net/http/HttpApiResponse.cpp src/base/net/http/HttpClient.cpp src/base/net/http/HttpContext.cpp diff --git a/src/base/kernel/Base.cpp b/src/base/kernel/Base.cpp index 03feef89..152f6bcc 100644 --- a/src/base/kernel/Base.cpp +++ b/src/base/kernel/Base.cpp @@ -47,15 +47,8 @@ #ifdef XMRIG_FEATURE_API -# include "api/Api.h" -# include "api/interfaces/IApiRequest.h" -#endif - - -#ifdef XMRIG_FEATURE_EMBEDDED_CONFIG -# include "core/config/Config_default.h" -#endif - +# include "base/api/Api.h" +# include "base/api/interfaces/IApiRequest.h" namespace xmrig { @@ -63,6 +56,12 @@ static const char *kConfigPathV1 = "/1/config"; static const char *kConfigPathV2 = "/2/config"; } // namespace xmrig +#endif + + +#ifdef XMRIG_FEATURE_EMBEDDED_CONFIG +# include "core/config/Config_default.h" +#endif class xmrig::BasePrivate diff --git a/src/base/kernel/Base.h b/src/base/kernel/Base.h index 6a33a802..8eb68866 100644 --- a/src/base/kernel/Base.h +++ b/src/base/kernel/Base.h @@ -26,7 +26,7 @@ #define XMRIG_BASE_H -#include "api/interfaces/IApiListener.h" +#include "base/api/interfaces/IApiListener.h" #include "base/kernel/interfaces/IConfigListener.h" #include "base/kernel/interfaces/IWatcherListener.h" #include "rapidjson/fwd.h" diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index 4406ce52..801b27c7 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -45,8 +45,8 @@ #ifdef XMRIG_FEATURE_API -# include "api/Api.h" -# include "api/interfaces/IApiRequest.h" +# include "base/api/Api.h" +# include "base/api/interfaces/IApiRequest.h" #endif diff --git a/src/core/Miner.h b/src/core/Miner.h index 035c0205..6fc75cd0 100644 --- a/src/core/Miner.h +++ b/src/core/Miner.h @@ -29,7 +29,7 @@ #include -#include "api/interfaces/IApiListener.h" +#include "base/api/interfaces/IApiListener.h" #include "base/kernel/interfaces/IBaseListener.h" #include "base/kernel/interfaces/ITimerListener.h" #include "crypto/common/Algorithm.h" diff --git a/src/net/Network.cpp b/src/net/Network.cpp index 547a8638..3ab8bec0 100644 --- a/src/net/Network.cpp +++ b/src/net/Network.cpp @@ -34,7 +34,6 @@ #include -#include "api/Api.h" #include "base/io/log/Log.h" #include "base/net/stratum/Client.h" #include "base/net/stratum/SubmitResult.h" @@ -51,8 +50,8 @@ #ifdef XMRIG_FEATURE_API -# include "api/Api.h" -# include "api/interfaces/IApiRequest.h" +# include "base/api/Api.h" +# include "base/api/interfaces/IApiRequest.h" #endif diff --git a/src/net/Network.h b/src/net/Network.h index ddf6d6f3..716ce610 100644 --- a/src/net/Network.h +++ b/src/net/Network.h @@ -30,7 +30,7 @@ #include -#include "api/interfaces/IApiListener.h" +#include "base/api/interfaces/IApiListener.h" #include "base/kernel/interfaces/IBaseListener.h" #include "base/kernel/interfaces/IStrategyListener.h" #include "base/kernel/interfaces/ITimerListener.h" From 6955f4a484ed47327c316fa68880b8057ecad4f9 Mon Sep 17 00:00:00 2001 From: XMRig Date: Mon, 12 Aug 2019 21:13:38 +0700 Subject: [PATCH 167/172] Added pause and resume methods via JSON RPC 2.0 API. --- src/base/api/interfaces/IApiRequest.h | 15 ++- src/base/api/requests/ApiRequest.h | 31 +++--- src/base/api/requests/HttpApiRequest.cpp | 129 +++++++++++++++++++---- src/base/api/requests/HttpApiRequest.h | 7 +- src/core/Miner.cpp | 12 +++ 5 files changed, 156 insertions(+), 38 deletions(-) diff --git a/src/base/api/interfaces/IApiRequest.h b/src/base/api/interfaces/IApiRequest.h index 8e65a921..4f74581c 100644 --- a/src/base/api/interfaces/IApiRequest.h +++ b/src/base/api/interfaces/IApiRequest.h @@ -54,16 +54,28 @@ public: enum RequestType { REQ_UNKNOWN, - REQ_SUMMARY + REQ_SUMMARY, + REQ_JSON_RPC + }; + + + enum ErrorCode : int { + RPC_PARSE_ERROR = -32700, + RPC_INVALID_REQUEST = -32600, + RPC_METHOD_NOT_FOUND = -32601, + RPC_INVALID_PARAMS = -32602 }; virtual ~IApiRequest() = default; + virtual bool accept() = 0; + virtual bool hasParseError() const = 0; virtual bool isDone() const = 0; virtual bool isNew() const = 0; virtual bool isRestricted() const = 0; virtual const rapidjson::Value &json() const = 0; + virtual const String &rpcMethod() const = 0; virtual const String &url() const = 0; virtual int version() const = 0; virtual Method method() const = 0; @@ -71,7 +83,6 @@ public: virtual rapidjson::Value &reply() = 0; virtual RequestType type() const = 0; virtual Source source() const = 0; - virtual void accept() = 0; virtual void done(int status) = 0; }; diff --git a/src/base/api/requests/ApiRequest.h b/src/base/api/requests/ApiRequest.h index 1fd721f1..ad4b0c35 100644 --- a/src/base/api/requests/ApiRequest.h +++ b/src/base/api/requests/ApiRequest.h @@ -28,6 +28,7 @@ #include "base/api/interfaces/IApiRequest.h" +#include "base/tools/String.h" namespace xmrig { @@ -40,28 +41,30 @@ public: ~ApiRequest() override; protected: - inline bool isDone() const override { return m_state == STATE_DONE; } - inline bool isNew() const override { return m_state == STATE_NEW; } - inline bool isRestricted() const override { return m_restricted; } - inline int version() const override { return m_version; } - inline RequestType type() const override { return m_type; } - inline Source source() const override { return m_source; } - inline void accept() override { m_state = STATE_ACCEPTED; } - inline void done(int) override { m_state = STATE_DONE; } - - int m_version = 1; - RequestType m_type = REQ_UNKNOWN; - -private: enum State { STATE_NEW, STATE_ACCEPTED, STATE_DONE }; + inline bool accept() override { m_state = STATE_ACCEPTED; return true; } + inline bool isDone() const override { return m_state == STATE_DONE; } + inline bool isNew() const override { return m_state == STATE_NEW; } + inline bool isRestricted() const override { return m_restricted; } + inline const String &rpcMethod() const override { return m_rpcMethod; } + inline int version() const override { return m_version; } + inline RequestType type() const override { return m_type; } + inline Source source() const override { return m_source; } + inline void done(int) override { m_state = STATE_DONE; } + + int m_version = 1; + RequestType m_type = REQ_UNKNOWN; + State m_state = STATE_NEW; + String m_rpcMethod; + +private: bool m_restricted; Source m_source; - State m_state = STATE_NEW; }; diff --git a/src/base/api/requests/HttpApiRequest.cpp b/src/base/api/requests/HttpApiRequest.cpp index c3c2dac4..ed13e47d 100644 --- a/src/base/api/requests/HttpApiRequest.cpp +++ b/src/base/api/requests/HttpApiRequest.cpp @@ -23,14 +23,45 @@ */ +#include "3rdparty/http-parser/http_parser.h" #include "base/api/requests/HttpApiRequest.h" +#include "base/io/json/Json.h" #include "base/net/http/HttpData.h" #include "rapidjson/error/en.h" +namespace xmrig { + + +static const char *kError = "error"; +static const char *kId = "id"; +static const char *kResult = "result"; + + +static inline const char *rpcError(int code) { + switch (code) { + case IApiRequest::RPC_PARSE_ERROR: + return "Parse error"; + + case IApiRequest::RPC_INVALID_REQUEST: + return "Invalid Request"; + + case IApiRequest::RPC_METHOD_NOT_FOUND: + return "Method not found"; + + case IApiRequest::RPC_INVALID_PARAMS: + return "Invalid params"; + } + + return "Internal error"; +} + + +} // namespace xmrig + + xmrig::HttpApiRequest::HttpApiRequest(const HttpData &req, bool restricted) : ApiRequest(SOURCE_HTTP, restricted), - m_parsed(false), m_req(req), m_res(req.id()), m_url(req.url.c_str()) @@ -41,6 +72,28 @@ xmrig::HttpApiRequest::HttpApiRequest(const HttpData &req, bool restricted) : } } + if (method() == METHOD_POST && url() == "/json_rpc") { + m_type = REQ_JSON_RPC; + accept(); + + if (hasParseError()) { + done(RPC_PARSE_ERROR); + + return; + } + + m_rpcMethod = Json::getString(json(), "method"); + if (m_rpcMethod.isEmpty()) { + done(RPC_INVALID_REQUEST); + + return; + } + + m_state = STATE_NEW; + + return; + } + if (url().size() > 4) { if (memcmp(url().data(), "/2/", 3) == 0) { m_version = 2; @@ -49,6 +102,31 @@ xmrig::HttpApiRequest::HttpApiRequest(const HttpData &req, bool restricted) : } +bool xmrig::HttpApiRequest::accept() +{ + using namespace rapidjson; + + ApiRequest::accept(); + + if (m_parsed == 0 && !m_req.body.empty()) { + m_body.Parse(m_req.body.c_str()); + m_parsed = m_body.HasParseError() ? 2 : 1; + + if (!hasParseError()) { + return true; + } + + if (type() != REQ_JSON_RPC) { + reply().AddMember(StringRef(kError), StringRef(GetParseError_En(m_body.GetParseError())), doc().GetAllocator()); + } + + return false; + } + + return hasParseError(); +} + + const rapidjson::Value &xmrig::HttpApiRequest::json() const { return m_body; @@ -61,27 +139,40 @@ xmrig::IApiRequest::Method xmrig::HttpApiRequest::method() const } -void xmrig::HttpApiRequest::accept() -{ - using namespace rapidjson; - - ApiRequest::accept(); - - if (!m_parsed && !m_req.body.empty()) { - m_parsed = true; - m_body.Parse(m_req.body.c_str()); - - if (m_body.HasParseError()) { - reply().AddMember("error", StringRef(GetParseError_En(m_body.GetParseError())), doc().GetAllocator());; - } - } -} - - void xmrig::HttpApiRequest::done(int status) { ApiRequest::done(status); - m_res.setStatus(status); + if (type() == REQ_JSON_RPC) { + using namespace rapidjson; + auto &allocator = doc().GetAllocator(); + + m_res.setStatus(HTTP_STATUS_OK); + + if (status != HTTP_STATUS_OK) { + if (status == HTTP_STATUS_NOT_FOUND) { + status = RPC_METHOD_NOT_FOUND; + } + + Value error(kObjectType); + error.AddMember("code", status, allocator); + error.AddMember("message", StringRef(rpcError(status)), allocator); + + reply().AddMember(StringRef(kError), error, allocator); + } + else if (!reply().HasMember(kResult)) { + Value result(kObjectType); + result.AddMember("status", "OK", allocator); + + reply().AddMember(StringRef(kResult), result, allocator); + } + + reply().AddMember("jsonrpc", "2.0", allocator); + reply().AddMember(StringRef(kId), Value().CopyFrom(Json::getValue(json(), kId), allocator), allocator); + } + else { + m_res.setStatus(status); + } + m_res.end(); } diff --git a/src/base/api/requests/HttpApiRequest.h b/src/base/api/requests/HttpApiRequest.h index dc3eb037..309b5a6b 100644 --- a/src/base/api/requests/HttpApiRequest.h +++ b/src/base/api/requests/HttpApiRequest.h @@ -44,19 +44,20 @@ public: HttpApiRequest(const HttpData &req, bool restricted); protected: + inline bool hasParseError() const override { return m_parsed == 2; } + inline const String &url() const override { return m_url; } inline rapidjson::Document &doc() override { return m_res.doc(); } inline rapidjson::Value &reply() override { return m_res.doc(); } - inline const String &url() const override { return m_url; } + bool accept() override; const rapidjson::Value &json() const override; Method method() const override; - void accept() override; void done(int status) override; private: - bool m_parsed; const HttpData &m_req; HttpApiResponse m_res; + int m_parsed = 0; rapidjson::Document m_body; String m_url; }; diff --git a/src/core/Miner.cpp b/src/core/Miner.cpp index 801b27c7..26a5d7e7 100644 --- a/src/core/Miner.cpp +++ b/src/core/Miner.cpp @@ -477,5 +477,17 @@ void xmrig::Miner::onRequest(IApiRequest &request) d_ptr->getBackends(request.reply(), request.doc()); } } + else if (request.type() == IApiRequest::REQ_JSON_RPC) { + if (request.rpcMethod() == "pause") { + request.accept(); + + setEnabled(false); + } + else if (request.rpcMethod() == "resume") { + request.accept(); + + setEnabled(true); + } + } } #endif From ed04ecd7353c6d33a2f4f0c7602bb89bba043b9a Mon Sep 17 00:00:00 2001 From: xmrig Date: Tue, 13 Aug 2019 00:24:04 +0700 Subject: [PATCH 168/172] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b4317f7..50167977 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - [#1010](https://github.com/xmrig/xmrig/pull/1010#issuecomment-482632107) Added daemon support (solo mining). - [#1066](https://github.com/xmrig/xmrig/issues/1066#issuecomment-518080529) Added error message if pool not ready for RandomX. - [#1105](https://github.com/xmrig/xmrig/issues/1105) Improved auto configuration for `cn-pico` algorithm. +- Added commands `pause` and `resume` via JSON RPC 2.0 API (`POST /json_rpc`). - Added command line option `--export-topology` for export hwloc topology to a XML file. - Breaked backward compatibility with previous configs and command line, `variant` option replaced to `algo`, global option `algo` removed, all CPU related settings moved to `cpu` object. - Options `av`, `safe` and `max-cpu-usage` removed. From df933964e1b53bdc2085925843fec78b87934cdf Mon Sep 17 00:00:00 2001 From: XMRig Date: Tue, 13 Aug 2019 17:56:32 +0700 Subject: [PATCH 169/172] Fixed autoconfig regression. --- src/backend/cpu/platform/HwlocCpuInfo.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/backend/cpu/platform/HwlocCpuInfo.cpp b/src/backend/cpu/platform/HwlocCpuInfo.cpp index 491305ec..7da99787 100644 --- a/src/backend/cpu/platform/HwlocCpuInfo.cpp +++ b/src/backend/cpu/platform/HwlocCpuInfo.cpp @@ -246,13 +246,14 @@ void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorith findByType(cache, HWLOC_OBJ_CORE, [&cores](hwloc_obj_t found) { cores.emplace_back(found); }); size_t L3 = cache->attr->cache.size; + const bool L3_exclusive = isCacheExclusive(cache); size_t L2 = 0; int L2_associativity = 0; size_t extra = 0; const size_t scratchpad = algorithm.l3(); int intensity = algorithm.maxIntensity() == 1 ? -1 : 1; - if (cache->attr->cache.depth == 3 && isCacheExclusive(cache)) { + if (cache->attr->cache.depth == 3) { for (size_t i = 0; i < cache->arity; ++i) { hwloc_obj_t l2 = cache->children[i]; if (!isCacheObject(l2) || l2->attr == nullptr) { @@ -262,7 +263,7 @@ void xmrig::HwlocCpuInfo::processTopLevelCache(hwloc_obj_t cache, const Algorith L2 += l2->attr->cache.size; L2_associativity = l2->attr->cache.associativity; - if (l2->attr->cache.size >= scratchpad) { + if (L3_exclusive && l2->attr->cache.size >= scratchpad) { extra += scratchpad; } } From 9f4428a484f9ac38288eae7ddc7da78b6224a55a Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 14 Aug 2019 02:23:48 +0700 Subject: [PATCH 170/172] Fixed user job recovery after donation round. --- src/backend/common/WorkerJob.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/backend/common/WorkerJob.h b/src/backend/common/WorkerJob.h index c9a3d55c..4b691952 100644 --- a/src/backend/common/WorkerJob.h +++ b/src/backend/common/WorkerJob.h @@ -56,6 +56,7 @@ public: } if (index() == 1 && job.index() == 0 && job == m_jobs[0]) { + m_index = 0; return; } From ca6376bbaaa74e60bd4edaaf16e49b4a4b0e22ab Mon Sep 17 00:00:00 2001 From: XMRig Date: Wed, 14 Aug 2019 02:27:52 +0700 Subject: [PATCH 171/172] Hide AVX2 information from miner summary as not important (only need for cn/gpu). --- src/Summary.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/Summary.cpp b/src/Summary.cpp index ab6b7b1e..227fdcc8 100644 --- a/src/Summary.cpp +++ b/src/Summary.cpp @@ -71,13 +71,12 @@ static void print_cpu(Config *) { const ICpuInfo *info = Cpu::info(); - Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s%s (%zu)") " %sx64 %sAES %sAVX2", + Log::print(GREEN_BOLD(" * ") WHITE_BOLD("%-13s%s (%zu)") " %sx64 %sAES", "CPU", info->brand(), info->packages(), info->isX64() ? GREEN_BOLD_S : RED_BOLD_S "-", - info->hasAES() ? GREEN_BOLD_S : RED_BOLD_S "-", - info->hasAVX2() ? GREEN_BOLD_S : RED_BOLD_S "-" + info->hasAES() ? GREEN_BOLD_S : RED_BOLD_S "-" ); # if defined(XMRIG_FEATURE_LIBCPUID) || defined (XMRIG_FEATURE_HWLOC) Log::print(WHITE_BOLD(" %-13s") BLACK_BOLD("L2:") WHITE_BOLD("%.1f MB") BLACK_BOLD(" L3:") WHITE_BOLD("%.1f MB") From f4db6b16dd6876766b326d13c9395e1dd89b3e10 Mon Sep 17 00:00:00 2001 From: xmrig Date: Wed, 14 Aug 2019 02:45:53 +0700 Subject: [PATCH 172/172] Update CHANGELOG_OLD.md --- doc/CHANGELOG_OLD.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/CHANGELOG_OLD.md b/doc/CHANGELOG_OLD.md index 15ae35a9..58be062b 100644 --- a/doc/CHANGELOG_OLD.md +++ b/doc/CHANGELOG_OLD.md @@ -1,3 +1,9 @@ +# v2.99.6-beta +- Added commands `pause` and `resume` via JSON RPC 2.0 API (`POST /json_rpc`). +- Fixed autoconfig regression (since 2.99.5), mostly `rx/wow` was affected by this bug. +- Fixed user job recovery after donation round. +- Information about AVX2 CPU feature how hidden in miner summary. + # v2.99.5-beta - [#1066](https://github.com/xmrig/xmrig/issues/1066#issuecomment-518080529) Fixed crash and added error message if pool not ready for RandomX. - [#1092](https://github.com/xmrig/xmrig/issues/1092) Fixed crash if wrong CPU affinity used.