Commit b9eac005 authored by Tiago Peixoto's avatar Tiago Peixoto

Switch PRNG from std::mt19937 to PCG

parent 0d86003c
......@@ -357,7 +357,7 @@ AC_SUBST(CPPFLAGS)
AC_SUBST(CXXFLAGS)
# extra CPP flags for submodules
[MOD_CPPFLAGS="-I\$(top_srcdir)/src/boost-workaround -DHAVE_CONFIG_H -I\$(top_srcdir)/src/graph -I\$(top_builddir) ${PYTHON_CPPFLAGS} ${BOOST_CPPFLAGS} ${NUMPY_CPPFLAGS} ${EXPAT_CFLAGS}"]
[MOD_CPPFLAGS="-I\$(top_srcdir)/src/boost-workaround -I\$(top_srcdir)/src/pcg-cpp/include -DHAVE_CONFIG_H -I\$(top_srcdir)/src/graph -I\$(top_builddir) ${PYTHON_CPPFLAGS} ${BOOST_CPPFLAGS} ${NUMPY_CPPFLAGS} ${EXPAT_CFLAGS}"]
AC_SUBST(MOD_CPPFLAGS)
# extra LIBADD flags for submodules
......@@ -378,7 +378,7 @@ AC_SUBST(MOD_LDFLAGS)
AX_CREATE_PKGCONFIG_INFO([graph-tool-py${PYTHON_VERSION}.pc], [],
[${PYTHON_LIBS} -l${BOOST_PYTHON_LIB}],
[graph-tool Python library],
[-I${MOD_DIR}/include -I${MOD_DIR}/include/boost-workaround ${PYTHON_CPPFLAGS} ${BOOST_CPPFLAGS} ${SPARSEHASH_CFLAGS} ${NUMPY_CPPFLAGS}],
[-I${MOD_DIR}/include -I${MOD_DIR}/include/boost-workaround -I${MOD_DIR}/include/pcg-cpp ${PYTHON_CPPFLAGS} ${BOOST_CPPFLAGS} ${SPARSEHASH_CFLAGS} ${NUMPY_CPPFLAGS}],
[])
AC_CONFIG_FILES([
......
......@@ -71,6 +71,7 @@ libgraph_tool_core_la_include_HEADERS = \
mpl_nested_loop.hh \
numpy_bind.hh \
openmp_lock.hh \
parallel_rng.hh \
random.hh \
str_repr.hh \
shared_map.hh \
......@@ -91,3 +92,9 @@ libgraph_tool_core_la_workaround_HEADERS = \
../boost-workaround/boost/graph/copy_alt.hpp \
../boost-workaround/boost/graph/stoer_wagner_min_cut.hpp
libgraph_tool_core_la_pcg_cppdir = $(MOD_DIR)/include/pcg-cpp
libgraph_tool_core_la_pcg_cpp_HEADERS = \
../pcg-cpp/include/pcg_extras.hpp \
../pcg-cpp/include/pcg_random.hpp \
../pcg-cpp/include/pcg_uint128.hpp
......@@ -134,18 +134,50 @@ private:
// uniform sampling from containers
template <class Iter, class RNG>
auto&& uniform_sample(Iter begin, const Iter& end, RNG& rng)
auto uniform_sample_iter(Iter begin, const Iter& end, RNG& rng)
{
auto N = end - begin;
auto N = std::distance(begin, end);
std::uniform_int_distribution<size_t> i_rand(0, N - 1);
std::advance(begin, i_rand(rng));
return *begin;
return begin;
}
template <class Container, class RNG>
auto uniform_sample_iter(Container& v, RNG& rng)
{
return uniform_sample_iter(v.begin(), v.end(), rng);
}
template <class Iter, class RNG>
auto&& uniform_sample(Iter&& begin, const Iter& end, RNG& rng)
{
return *uniform_sample_iter(begin, end, rng);
}
template <class Container, class RNG>
auto&& uniform_sample(Container& v, RNG& rng)
{
return uniform_sample(v.begin(), v.end(), rng);
return *uniform_sample_iter(v, rng);
}
template <class Graph, class RNG>
typename boost::graph_traits<Graph>::vertex_descriptor
random_out_neighbor(typename boost::graph_traits<Graph>::vertex_descriptor v,
const Graph& g,
RNG& rng)
{
auto iter = out_edges(v, g);
return target(*uniform_sample_iter(iter.first, iter.second, rng), g);
}
template <class Graph, class RNG>
typename boost::graph_traits<Graph>::vertex_descriptor
random_in_neighbor(typename boost::graph_traits<Graph>::vertex_descriptor v,
const Graph& g,
RNG& rng)
{
auto iter = in_edge_iteratorS<Graph>::get_edges(v, g);
return source(*uniform_sample_iter(iter.first, iter.second, rng), g);
}
} // namespace graph_tool
......
......@@ -103,6 +103,5 @@ libgraph_tool_inference_la_include_HEADERS = \
support/graph_neighbor_sampler.hh \
support/graph_state.hh \
support/int_part.hh \
support/parallel_rng.hh \
support/util.hh \
graph_modularity.hh
......@@ -102,15 +102,14 @@ python::object do_gibbs_sweep_parallel(python::object ogibbs_states,
block_state::dispatch(oblock_states[i], dispatch);
}
std::vector<std::shared_ptr<rng_t>> rngs;
init_rngs(rngs, rng);
parallel_rng<rng_t>::init(rng);
std::vector<std::tuple<double, size_t, size_t>> rets(N);
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < N; ++i)
{
auto& rng_ = get_rng(rngs, rng);
auto& rng_ = parallel_rng<rng_t>::get(rng);
rets[i] = sweeps[i]->run(rng_);
}
......
......@@ -110,15 +110,14 @@ python::object do_mcmc_sweep_parallel(python::object omcmc_states,
block_state::dispatch(oblock_states[i], dispatch);
}
std::vector<std::shared_ptr<rng_t>> rngs;
init_rngs(rngs, rng);
parallel_rng<rng_t>::init(rng);
std::vector<std::tuple<double, size_t, size_t>> rets(N);
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < N; ++i)
{
auto& rng_ = get_rng(rngs, rng);
auto& rng_ = parallel_rng<rng_t>::get(rng);
rets[i] = sweeps[i]->run(rng_);
}
......
......@@ -102,15 +102,14 @@ python::object do_multiflip_mcmc_sweep_parallel(python::object omcmc_states,
block_state::dispatch(oblock_states[i], dispatch);
}
std::vector<std::shared_ptr<rng_t>> rngs;
init_rngs(rngs, rng);
parallel_rng<rng_t>::init(rng);
std::vector<std::tuple<double, size_t, size_t>> rets(N);
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < N; ++i)
{
auto& rng_ = get_rng(rngs, rng);
auto& rng_ = parallel_rng<rng_t>::get(rng);
rets[i] = sweeps[i]->run(rng_);
}
......
......@@ -130,15 +130,14 @@ python::object gibbs_layered_sweep_parallel(python::object ogibbs_states,
block_state::dispatch(dispatch);
}
std::vector<std::shared_ptr<rng_t>> rngs;
init_rngs(rngs, rng);
parallel_rng<rng_t>::init(rng);
std::vector<std::tuple<double, size_t, size_t>> rets(N);
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < N; ++i)
{
auto& rng_ = get_rng(rngs, rng);
auto& rng_ = parallel_rng<rng_t>::get(rng);
rets[i] = sweeps[i]->run(rng_);
}
......
......@@ -138,15 +138,14 @@ python::object mcmc_layered_sweep_parallel(python::object omcmc_states,
block_state::dispatch(dispatch);
}
std::vector<std::shared_ptr<rng_t>> rngs;
init_rngs(rngs, rng);
parallel_rng<rng_t>::init(rng);
std::vector<std::tuple<double, size_t, size_t>> rets(N);
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < N; ++i)
{
auto& rng_ = get_rng(rngs, rng);
auto& rng_ = parallel_rng<rng_t>::get(rng);
rets[i] = sweeps[i]->run(rng_);
}
......
......@@ -129,15 +129,14 @@ python::object multiflip_mcmc_layered_sweep_parallel(python::object omcmc_states
block_state::dispatch(dispatch);
}
std::vector<std::shared_ptr<rng_t>> rngs;
init_rngs(rngs, rng);
parallel_rng<rng_t>::init(rng);
std::vector<std::tuple<double, size_t, size_t>> rets(N);
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < N; ++i)
{
auto& rng_ = get_rng(rngs, rng);
auto& rng_ = parallel_rng<rng_t>::get(rng);
rets[i] = sweeps[i]->run(rng_);
}
......
......@@ -128,15 +128,14 @@ python::object gibbs_layered_overlap_sweep_parallel(python::object ogibbs_states
overlap_block_state::dispatch(dispatch);
}
std::vector<std::shared_ptr<rng_t>> rngs;
init_rngs(rngs, rng);
parallel_rng<rng_t>::init(rng);
std::vector<std::tuple<double, size_t, size_t>> rets(N);
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < N; ++i)
{
auto& rng_ = get_rng(rngs, rng);
auto& rng_ = parallel_rng<rng_t>::get(rng);
rets[i] = sweeps[i]->run(rng_);
}
......
......@@ -137,15 +137,14 @@ python::object mcmc_layered_overlap_sweep_parallel(python::object omcmc_states,
overlap_block_state::dispatch(dispatch);
}
std::vector<std::shared_ptr<rng_t>> rngs;
init_rngs(rngs, rng);
parallel_rng<rng_t>::init(rng);
std::vector<std::tuple<double, size_t, size_t>> rets(N);
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < N; ++i)
{
auto& rng_ = get_rng(rngs, rng);
auto& rng_ = parallel_rng<rng_t>::get(rng);
rets[i] = sweeps[i]->run(rng_);
}
......
......@@ -128,15 +128,14 @@ python::object multiflip_mcmc_layered_overlap_sweep_parallel(python::object omcm
overlap_block_state::dispatch(dispatch);
}
std::vector<std::shared_ptr<rng_t>> rngs;
init_rngs(rngs, rng);
parallel_rng<rng_t>::init(rng);
std::vector<std::tuple<double, size_t, size_t>> rets(N);
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < N; ++i)
{
auto& rng_ = get_rng(rngs, rng);
auto& rng_ = parallel_rng<rng_t>::get(rng);
rets[i] = sweeps[i]->run(rng_);
}
......
......@@ -26,7 +26,7 @@
#include <tuple>
#include "hash_map_wrap.hh"
#include "../support/parallel_rng.hh"
#include "parallel_rng.hh"
#ifdef _OPENMP
#include <omp.h>
......@@ -39,12 +39,12 @@ auto gibbs_sweep(GibbsState state, RNG& rng_)
{
auto& g = state._g;
vector<std::shared_ptr<RNG>> rngs;
std::vector<std::pair<size_t, double>> best_move;
vector<RNG> rngs;
vector<std::pair<size_t, double>> best_move;
if (state._parallel)
{
init_rngs(rngs, rng_);
parallel_rng<RNG>::init(rng_);
init_cache(state._E);
best_move.resize(num_vertices(g));
}
......@@ -84,7 +84,7 @@ auto gibbs_sweep(GibbsState state, RNG& rng_)
(vlist,
[&](size_t, auto v)
{
auto& rng = get_rng(rngs, rng_);
auto& rng = parallel_rng<RNG>::get(rng_);
if (!state._sequential)
v = uniform_sample(vlist, rng);
......
......@@ -26,7 +26,7 @@
#include <tuple>
#include "hash_map_wrap.hh"
#include "../support/parallel_rng.hh"
#include "parallel_rng.hh"
#ifdef _OPENMP
#include <omp.h>
......@@ -117,10 +117,9 @@ auto mcmc_sweep_parallel(MCMCState state, RNG& rng_)
{
auto& g = state._g;
vector<std::shared_ptr<RNG>> rngs;
std::vector<std::pair<size_t, double>> best_move;
init_rngs(rngs, rng_);
parallel_rng<RNG>::init(rng_);
init_cache(state._E);
best_move.resize(num_vertices(g));
......@@ -146,7 +145,7 @@ auto mcmc_sweep_parallel(MCMCState state, RNG& rng_)
(vlist,
[&](size_t, auto v)
{
auto& rng = get_rng(rngs, rng_);
auto& rng = parallel_rng<RNG>::get(rng_);
if (state.node_weight(v) == 0)
return;
......
......@@ -26,7 +26,7 @@
#include <tuple>
#include "hash_map_wrap.hh"
#include "../support/parallel_rng.hh"
#include "parallel_rng.hh"
#ifdef _OPENMP
#include <omp.h>
......@@ -37,10 +37,9 @@ namespace graph_tool
template <class MergeState, class RNG>
auto merge_sweep(MergeState state, RNG& rng_)
{
vector<std::shared_ptr<RNG>> rngs;
if (state._parallel)
{
init_rngs(rngs, rng_);
parallel_rng<RNG>::init(rng_);
init_cache(state._E);
}
......@@ -60,7 +59,7 @@ auto merge_sweep(MergeState state, RNG& rng_)
(state._available,
[&](size_t, auto v)
{
auto& rng = get_rng(rngs, rng_);
auto& rng = parallel_rng<RNG>::get(rng_);
if (state.node_weight(v) == 0)
return;
......
......@@ -105,15 +105,13 @@ python::object gibbs_overlap_sweep_parallel(python::object ogibbs_states,
overlap_block_state::dispatch(oblock_states[i], dispatch);
}
std::vector<std::shared_ptr<rng_t>> rngs;
init_rngs(rngs, rng);
parallel_rng<rng_t>::init(rng);
std::vector<std::tuple<double, size_t, size_t>> rets(N);
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < N; ++i)
{
auto& rng_ = get_rng(rngs, rng);
auto& rng_ = parallel_rng<rng_t>::get(rng);
rets[i] = sweeps[i]->run(rng_);
}
......
......@@ -106,15 +106,14 @@ python::object overlap_mcmc_sweep_parallel(python::object omcmc_states,
overlap_block_state::dispatch(oblock_states[i], dispatch);
}
std::vector<std::shared_ptr<rng_t>> rngs;
init_rngs(rngs, rng);
parallel_rng<rng_t>::init(rng);
std::vector<std::tuple<double, size_t, size_t>> rets(N);
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < N; ++i)
{
auto& rng_ = get_rng(rngs, rng);
auto& rng_ = parallel_rng<rng_t>::get(rng);
rets[i] = sweeps[i]->run(rng_);
}
......
......@@ -106,15 +106,14 @@ python::object overlap_multiflip_mcmc_sweep_parallel(python::object omcmc_states
overlap_block_state::dispatch(oblock_states[i], dispatch);
}
std::vector<std::shared_ptr<rng_t>> rngs;
init_rngs(rngs, rng);
parallel_rng<rng_t>::init(rng);
std::vector<std::tuple<double, size_t, size_t>> rets(N);
#pragma omp parallel for schedule(runtime)
for (size_t i = 0; i < N; ++i)
{
auto& rng_ = get_rng(rngs, rng);
auto& rng_ = parallel_rng<rng_t>::get(rng);
rets[i] = sweeps[i]->run(rng_);
}
......
......@@ -18,34 +18,56 @@
#ifndef PARALLEL_RNG_HH
#define PARALLEL_RNG_HH
#include "config.h"
#include <vector>
template <class RNG>
void init_rngs(std::vector<std::shared_ptr<RNG>>& rngs, RNG& rng)
{
size_t num_threads = 1;
#ifdef _OPENMP
num_threads = omp_get_max_threads();
# include <omp.h>
#endif
for (size_t i = 0; i < num_threads; ++i)
{
std::array<int, RNG::state_size> seed_data;
std::generate_n(seed_data.data(), seed_data.size(), std::ref(rng));
std::seed_seq seq(std::begin(seed_data), std::end(seed_data));
rngs.push_back(std::make_shared<rng_t>(seq));
}
}
template <class RNG>
RNG& get_rng(std::vector<std::shared_ptr<RNG>>& rngs, RNG& rng)
class parallel_rng
{
if (rngs.empty())
return rng;
size_t tid = 0;
public:
static void init(RNG& rng)
{
size_t num_threads = 1;
#ifdef _OPENMP
num_threads = omp_get_max_threads();
#endif
for (size_t i = _rngs.size(); i < num_threads - 1; ++i)
{
// std::array<int, RNG::state_size> seed_data;
// std::generate_n(seed_data.data(), seed_data.size(), std::ref(rng));
// std::seed_seq seq(std::begin(seed_data), std::end(seed_data));
// rngs.emplace_back(seq);
_rngs.emplace_back(rng);
_rngs.back().set_stream(i + 1);
}
}
static void clear()
{
_rngs.clear();
}
static RNG& get(RNG& rng)
{
size_t tid = 0;
#ifdef _OPENMP
tid = omp_get_thread_num();
tid = omp_get_thread_num();
#endif
return *rngs[tid];
if (tid == 0)
return rng;
return _rngs[tid - 1];
}
private:
static std::vector<RNG> _rngs;
};
template <class RNG>
std::vector<RNG> parallel_rng<RNG>::_rngs;
#endif // PARALLEL_RNG_HH
......@@ -15,10 +15,18 @@
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
#include "graph.hh"
#include "random.hh"
#include "parallel_rng.hh"
rng_t get_rng(size_t seed)
{
parallel_rng<rng_t>::clear();
if (seed == 0)
{
pcg_extras::seed_seq_from<std::random_device> seed_source;
return rng_t(seed_source);
}
std::seed_seq seq{seed, seed + 1, seed + 2, seed + 3, seed + 4};
return rng_t(seq);
}
......@@ -19,8 +19,10 @@
#define RANDOM_HH
#include <random>
#include "pcg_random.hpp"
typedef std::mt19937 rng_t;
//typedef std::mt19937_64 rng_t;
typedef pcg64_k1024 rng_t;
rng_t get_rng(size_t seed);
......
......@@ -3428,8 +3428,8 @@ _rng = libcore.get_rng((numpy.random.randint(0, sys.maxsize) + os.getpid()) % sy
def seed_rng(seed):
"""Seed the random number generator used by graph-tool's algorithms"""
import graph_tool
graph_tool._rng = libcore.get_rng(seed)
global _rng
_rng = libcore.get_rng(seed)
def _get_rng():
global _rng
......
# Contributing
The license for this work does not require you to share changes you make
in your own version of the code. Contributions are welcome, however.
## License
This work is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE.txt) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT.txt) or http://opensource.org/licenses/MIT)
at your option.
## Contribution Policy
Unless you explicitly state otherwise, any contribution intentionally
submitted for inclusion in this work by you, as defined in the
Apache-2.0 license, shall be dual licensed as above, without any
additional terms or conditions.
This diff is collapsed.
Copyright (c) 2014-2017 Melissa O'Neill and PCG Project contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
SPDXVersion: SPDX-2.1
DataLicense: CC0-1.0
PackageName: pcg-cpp
PackageOriginator: Melissa O'Neill <oneill@pcg-random.org>
PackageHomePage: http://www.pcg-random.org
PackageLicenseDeclared: (MIT OR Apache-2.0)
# PCG Random Number Generation, C++ Edition
[PCG-Random website]: http://www.pcg-random.org
This code provides an implementation of the PCG family of random number
generators, which are fast, statistically excellent, and offer a number of
useful features.
Full details can be found at the [PCG-Random website]. This version
of the code provides many family members -- if you just want one
simple generator, you may prefer the minimal C version of the library.
There are two kinds of generator, normal generators and extended generators.
Extended generators provide *k* dimensional equidistribution and can perform
party tricks, but generally speaking most people only need the normal
generators.
There are two ways to access the generators, using a convenience typedef
or by using the underlying templates directly (similar to C++11's `std::mt19937` typedef vs its `std::mersenne_twister_engine` template). For most users, the convenience typedef is what you want, and probably you're fine with `pcg32` for 32-bit numbers. If you want 64-bit numbers, either use `pcg64` (or, if you're on a 32-bit system, making 64 bits from two calls to `pcg32_k2` may be faster).
## Documentation and Examples
Visit [PCG-Random website] for information on how to use this library, or look
at the sample code in the `sample` directory -- hopefully it should be fairly
self explanatory.
## Building
The code is written in C++11, as an include-only library (i.e., there is
nothing you need to build). There are some provided demo programs and tests
however. On a Unix-style system (e.g., Linux, Mac OS X) you should be able
to just type
make
To build the demo programs.
## Testing
Run
make test
## Directory Structure
The directories are arranged as follows:
* `include` -- contains `pcg_random.hpp` and supporting include files
* `test-high` -- test code for the high-level API where the functions have
shorter, less scary-looking names.
* `sample` -- sample code, some similar to the code in `test-high` but more
human readable, some other examples too
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment