mcmc.py 26.6 KB
Newer Older
1
2
3
4
5
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# graph_tool -- a general graph manipulation python module
#
Tiago Peixoto's avatar
Tiago Peixoto committed
6
# Copyright (C) 2006-2017 Tiago de Paula Peixoto <tiago@skewed.de>
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

from __future__ import division, absolute_import, print_function
import sys
if sys.version_info < (3,):
    range = xrange

from .. import Vector_size_t, Vector_double

import numpy
from . util import *

31
def mcmc_equilibrate(state, wait=1000, nbreaks=2, max_niter=numpy.inf,
32
                     force_niter=None, epsilon=0, gibbs=False, multiflip=False,
33
34
                     mcmc_args={}, entropy_args={}, history=False,
                     callback=None, verbose=False):
35
36
37
38
39
40
    r"""Equilibrate a MCMC with a given starting state.

    Parameters
    ----------
    state : Any state class (e.g. :class:`~graph_tool.inference.BlockState`)
        Initial state. This state will be modified during the algorithm.
41
    wait : ``int`` (optional, default: ``1000``)
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
        Number of iterations to wait for a record-breaking event.
    nbreaks : ``int`` (optional, default: ``2``)
        Number of iteration intervals (of size ``wait``) without record-breaking
        events necessary to stop the algorithm.
    max_niter : ``int`` (optional, default: ``numpy.inf``)
        Maximum number of iterations.
    force_niter : ``int`` (optional, default: ``None``)
        If given, will force the algorithm to run this exact number of
        iterations.
    epsilon : ``float`` (optional, default: ``0``)
        Relative changes in entropy smaller than epsilon will not be considered
        as record-breaking.
    gibbs : ``bool`` (optional, default: ``False``)
        If ``True``, each step will call ``state.gibbs_sweep`` instead of
        ``state.mcmc_sweep``.
57
58
59
    gibbs : ``bool`` (optional, default: ``False``)
        If ``True``, each step will call ``state.multiflip_mcmc_sweep`` instead of
        ``state.mcmc_sweep``.
60
61
62
    mcmc_args : ``dict`` (optional, default: ``{}``)
        Arguments to be passed to ``state.mcmc_sweep`` (or ``state.gibbs_sweep``).
    history : ``bool`` (optional, default: ``False``)
63
64
65
        If ``True``, a list of tuples of the form ``(nattempts, nmoves,
        entropy)`` will be kept and returned, where ``entropy`` is the current
        entropy and ``nmoves`` is the number of vertices moved.
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
    callback : ``function`` (optional, default: ``None``)
        If given, this function will be called after each iteration. The
        function must accept the current state as an argument, and its return
        value must be either `None` or a (possibly empty) list of values that
        will be append to the history, if ``history == True``.
    verbose : ``bool`` or ``tuple`` (optional, default: ``False``)
        If ``True``, progress information will be shown. Optionally, this
        accepts arguments of the type ``tuple`` of the form ``(level, prefix)``
        where ``level`` is a positive integer that specifies the level of
        detail, and ``prefix`` is a string that is prepended to the all output
        messages.

    Notes
    -----

    The MCMC equilibration is attempted by keeping track of the maximum and
    minimum values, and waiting sufficiently long without a record-breaking
    event.

    This function calls ``state.mcmc_sweep`` (or ``state.gibbs_sweep``) at each
    iteration (e.g. :meth:`graph_tool.inference.BlockState.mcmc_sweep` and
    :meth:`graph_tool.inference.BlockState.gibbs_sweep`), and keeps track of
    the value of ``state.entropy(**args)`` with ``args`` corresponding to
    ``mcmc_args["entropy_args"]``.

91
92
93
    Returns
    -------

94
    history : list of tuples of the form ``(nattempts, nmoves, entropy)``
95
96
97
98
        Summary of the MCMC run. This is returned only if ``history == True``.
    entropy : ``float``
        Current entropy value after run. This is returned only if ``history ==
        False``.
99
100
    nattempts : ``int``
        Number of node move attempts.
101
102
103
    nmoves : ``int``
        Number of node moves.

104
105
106
107
108
109
    References
    ----------

    .. [peixoto-efficient-2014] Tiago P. Peixoto, "Efficient Monte Carlo and
       greedy heuristic for the inference of stochastic block models", Phys.
       Rev. E 89, 012804 (2014), :doi:`10.1103/PhysRevE.89.012804`,
Tiago Peixoto's avatar
Tiago Peixoto committed
110
       :arxiv:`1310.4378`
111

112
113
114
115
116
117
    """

    count = 0
    break_count = 0
    niter = 0
    total_nmoves = 0
118
    total_nattempts = 0
119
120
121
122
123
    S = state.entropy(**mcmc_args.get("entropy_args", {}))
    min_S = max_S = S
    m_eps = 1e-6
    hist = []
    while count < wait:
124
        if gibbs:
125
            delta, nattempts, nmoves = state.gibbs_sweep(**mcmc_args)
126
        elif multiflip:
127
            delta, nattempts, nmoves = state.multiflip_mcmc_sweep(**mcmc_args)
128
        else:
129
            delta, nattempts, nmoves = state.mcmc_sweep(**mcmc_args)
130
131
132
133

        S += delta
        niter += 1
        total_nmoves += nmoves
134
        total_nattempts += nattempts
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171

        if force_niter is not None:
            if niter >= force_niter:
                break
        else:
            if abs(delta) >= (S - delta) * epsilon:
                if S > max_S + m_eps:
                    max_S = S
                    count = 0
                elif S < min_S - m_eps:
                    min_S = S
                    count = 0
                else:
                    count += 1
            else:
                count += 1

            if count >= wait:
                break_count += 1
                if break_count < nbreaks:
                    count = 0
                    min_S = max_S = S

        extra = []
        if callback is not None:
            extra = callback(state)
            if extra is None:
                extra = []

        if check_verbose(verbose):
            print((verbose_pad(verbose) +
                   u"niter: %5d  count: %4d  breaks: %2d  min_S: %#8.8g  " +
                   u"max_S: %#8.8g  S: %#8.8g  ΔS: %#12.6g  moves: %5d %s") %
                   (niter, count, break_count, min_S, max_S, S, delta, nmoves,
                    str(extra) if len(extra) > 0 else ""))

        if history:
172
            hist.append(tuple([nattempts, nmoves, S] + extra))
173
174
175
176
177
178
179

        if niter >= max_niter:
            break

    if history:
        return hist
    else:
180
        return (S, total_nattempts, total_nmoves)
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196

def mcmc_anneal(state, beta_range=(1., 10.), niter=100, history=False,
                mcmc_equilibrate_args={}, verbose=False):
    r"""Equilibrate a MCMC at a specified target temperature by performing simulated
    annealing.

    Parameters
    ----------
    state : Any state class (e.g. :class:`~graph_tool.inference.BlockState`)
        Initial state. This state will be modified during the algorithm.
    beta_range : ``tuple`` of two floats (optional, default: ``(1., 10.)``)
        Inverse temperature range.
    niter : ``int`` (optional, default: ``100``)
        Number of steps (in logspace) from the starting temperature to the final
        one.
    history : ``bool`` (optional, default: ``False``)
197
        If ``True``, a list of tuples of the form ``(nattempts, nmoves, beta, entropy)``
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
    mcmc_equilibrate_args : ``dict`` (optional, default: ``{}``)
        Arguments to be passed to :func:`~graph_tool.inference.mcmc_equilibrate`.
    verbose : ``bool`` or ``tuple`` (optional, default: ``False``)
        If ``True``, progress information will be shown. Optionally, this
        accepts arguments of the type ``tuple`` of the form ``(level, prefix)``
        where ``level`` is a positive integer that specifies the level of
        detail, and ``prefix`` is a string that is prepended to the all output
        messages.

    Notes
    -----

    This algorithm employs exponential cooling, where the value of beta is
    multiplied by a constant at each iteration, so that starting from
    `beta_range[0]` the value of `beta_range[1]` is reached after `niter`
    iterations.

    At each iteration, the function
    :func:`~graph_tool.inference.mcmc_equilibrate` is called with the current
    value of `beta` (via the ``mcmc_args`` parameter).

219
220
221
    Returns
    -------

222
    history : list of tuples of the form ``(nattempts, nmoves, beta, entropy)``
223
224
225
226
        Summary of the MCMC run. This is returned only if ``history == True``.
    entropy : ``float``
        Current entropy value after run. This is returned only if ``history ==
        False``.
227
228
    nattempts : ``int``
        Number of node move attempts.
229
230
231
    nmoves : ``int``
        Number of node moves.

232
233
234
235
236
237
    References
    ----------

    .. [peixoto-efficient-2014] Tiago P. Peixoto, "Efficient Monte Carlo and
       greedy heuristic for the inference of stochastic block models", Phys.
       Rev. E 89, 012804 (2014), :doi:`10.1103/PhysRevE.89.012804`,
Tiago Peixoto's avatar
Tiago Peixoto committed
238
       :arxiv:`1310.4378`
239
240
241
    """

    beta = beta_range[0]
242
243
    hist = ([], [], [], [])
    nattempts = 0
244
    nmoves = 0
Tiago Peixoto's avatar
Tiago Peixoto committed
245
    speed = exp((log(beta_range[1]) - log(beta_range[0])) / niter)
246
247
248
    mcmc_args = mcmc_equilibrate_args.get("mcmc_args", {})
    while beta < beta_range[1] * speed:
        ret = mcmc_equilibrate(state,
249
250
251
252
253
254
255
                               **dict(mcmc_equilibrate_args,
                                      mcmc_args=dict(mcmc_args,
                                                     beta=beta),
                                      history=history,
                                      verbose=verbose_push(verbose,
                                                           ("β: %#8.6g  " %
                                                            beta))))
256
257
        if history:
            ret = list(zip(*ret))
258
259
260
261
            hist[0].extend(ret[0])
            hist[1].extend(ret[1])
            hist[2].extend([beta] * len(ret[0]))
            hist[3].extend(ret[2])
262
263
264
            S = ret[0][-1]
        else:
            S = ret[0]
265
266
            attempts += ret[1]
            nmoves += ret[2]
267
268
269
270
271
272

        beta *= speed

    if history:
        return list(zip(hist))
    else:
273
        return S, nattempts, nmoves
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324


def mcmc_multilevel(state, B, r=2, b_cache=None, anneal=False,
                    mcmc_equilibrate_args={}, anneal_args={}, shrink_args={},
                    verbose=False):
    r"""Equilibrate a MCMC from a starting state with a higher order, by performing
    successive agglomerative initializations and equilibrations until the
    desired order is reached, such that metastable states are avoided.

    Parameters
    ----------
    state : Any state class (e.g. :class:`~graph_tool.inference.BlockState`)
        Initial state. This state will **not** be modified during the algorithm.
    B : ``int``
        Desired model order (i.e. number of groups).
    r : ``int`` (optional, default: ``2``)
        Greediness of agglomeration. At each iteration, the state order will be
        reduced by a factor ``r``.
    b_cache : ``dict`` (optional, default: ``None``)
        If specified, this should be a dictionary with key-value pairs of the
        form ``(B, state)`` that contain pre-computed states of the specified
        order. This dictionary will be modified during the algorithm.
    anneal : ``bool`` (optional, default: ``False``)
        If ``True``, the equilibration steps will use simulated annealing, by
        calling :func:`~graph_tool.inference.mcmc_anneal`, instead of
        :func:`~graph_tool.inference.mcmc_equilibrate`.
    mcmc_equilibrate_args : ``dict`` (optional, default: ``{}``)
        Arguments to be passed to :func:`~graph_tool.inference.mcmc_equilibrate`.
    mcmc_anneal_args : ``dict`` (optional, default: ``{}``)
        Arguments to be passed to :func:`~graph_tool.inference.mcmc_anneal`.
    shrink_args : ``dict`` (optional, default: ``{}``)
        Arguments to be passed to ``state.shrink``
        (e.g. :meth:`graph_tool.inference.BlockState.shrink`).
    verbose : ``bool`` or ``tuple`` (optional, default: ``False``)
        If ``True``, progress information will be shown. Optionally, this
        accepts arguments of the type ``tuple`` of the form ``(level, prefix)``
        where ``level`` is a positive integer that specifies the level of
        detail, and ``prefix`` is a string that is prepended to the all output
        messages.

    Notes
    -----

    This algorithm alternates between equilibrating the MCMC state and reducing
    the state order (via calls to ``state.shrink``,
    e.g. :meth:`graph_tool.inference.BlockState.shrink`).

    This greatly reduces the changes of getting trapped in metastable states if
    the starting point if far away from equilibrium, as discussed in
    [peixoto-efficient-2014]_.

325
326
327
328
329
330
    Returns
    -------

    state : The same type as parameter ``state``
        This is the final state after the MCMC run.

331
332
333
334
335
336
337
338
339
340
341
342
    References
    ----------

    .. [peixoto-efficient-2014] Tiago P. Peixoto, "Efficient Monte Carlo and
       greedy heuristic for the inference of stochastic block models", Phys.
       Rev. E 89, 012804 (2014), :doi:`10.1103/PhysRevE.89.012804`,
       :arxiv:`1310.4378`
    """

    if "mcmc_equilibrate_args" in anneal_args:
        raise ValueError("'mcmc_equilibrate_args' should be passed directly " +
                         "to mcmc_multilevel(), not via 'anneal_args'")
343

344
    mcmc_args = mcmc_equilibrate_args.get("mcmc_args", {})
345
346
    if not mcmc_equilibrate_args.get("gibbs", False):
        mcmc_args["d"] = 0
347
348
349
    else:
        mcmc_args["allow_new_group"] = False
    mcmc_equilibrate_args["mcmc_args"] = mcmc_args
350

351
352
353
354
    while state.B > B:
        B_next = max(min(int(round(state.B / r)), state.B - 1), B)

        if b_cache is not None and B_next in b_cache:
355
            state = b_cache[B_next][1]
356
357
358
359
360
361
362
363
364
365
366
            if check_verbose(verbose):
                print(verbose_pad(verbose) +
                      "shrinking %d -> %d (cached)" % (state.B, B_next))
            continue

        if check_verbose(verbose):
            print(verbose_pad(verbose) +
                  "shrinking %d -> %d" % (state.B, B_next))
        state = state.shrink(B=B_next, **shrink_args)
        if anneal:
            mcmc_anneal(state,
367
368
369
370
                        **dict(anneal_args,
                               mcmc_equilibrate_args=mcmc_equilibrate_args,
                               verbose=verbose_push(verbose,
                                                    "B=%d  " % state.B)))
371
372
        else:
            mcmc_equilibrate(state,
373
374
375
376
                             **dict(mcmc_equilibrate_args,
                                    verbose=verbose_push(verbose,
                                                         ("B=%d  " %
                                                          state.B))))
377
378
379
380
381
382
383
384
385
386
387
388
389
        if b_cache is not None:
            mcmc_args = mcmc_equilibrate_args.get("mcmc_args", {})
            entropy_args = mcmc_args.get("entropy_args", {})
            b_cache[B_next] = (state.entropy(**entropy_args), state)
    return state


class MulticanonicalState(object):
    r"""The density of states of a multicanonical Monte Carlo algorithm. It is used
    by :func:`graph_tool.inference.multicanonical_equilibrate`.

    Parameters
    ----------
390
391
    g : :class:`~graph_tool.Graph`
        Graph to be modelled.
392
393
394
395
396
397
398
399
    S_min : ``float``
        Minimum energy.
    S_max : ``float``
        Maximum energy.
    nbins : ``int`` (optional, default: ``1000``)
        Number of bins.
    """

400
401
402
    def __init__(self, g, S_min, S_max, nbins=1000):
        self._g = g
        self._N = g.num_vertices()
403
404
405
406
407
408
        self._S_min = S_min
        self._S_max = S_max
        self._density = Vector_double()
        self._density.resize(nbins)
        self._hist = Vector_size_t()
        self._hist.resize(nbins)
409
        self._perm_hist = numpy.zeros(nbins, dtype=self._hist.a.dtype)
410
        self._f = None
411
412

    def __getstate__(self):
413
414
        state = [self._g, self._S_min, self._S_max,
                 numpy.array(self._density.a), numpy.array(self._hist.a),
415
                 numpy.array(self._perm_hist), self._f]
416
417
418
        return state

    def __setstate__(self, state):
419
        g, S_min, S_max, density, hist, phist, self._f = state
420
421
422
423
        self.__init__(g, S_min, S_max, len(hist))
        self._density.a[:] = density
        self._hist.a[:] = hist
        self._perm_hist[:] = phist
424
425
426
427
428
429
430

    def get_energies(self):
        "Get energy bounds."
        return self._S_min, self._S_max

    def get_allowed_energies(self):
        "Get allowed energy bounds."
431
        h = self._hist.a.copy()
432
        h += self._perm_hist
433
434
435
436
437
438
439
440
        Ss = self.get_range()
        Ss = Ss[h > 0]
        return Ss[0], Ss[-1]

    def get_range(self):
        "Get energy range."
        return numpy.linspace(self._S_min, self._S_max, len(self._hist))

441
442
443
444
445
446
    def get_density(self, B=None):
        """Get density of states, normalized so that total sum is :math:`B^N`, where
        :math:`B` is the number of groups, and :math:`N` is the number of
        nodes. If not supplied :math:`B=N` is assumed.
        """
        r = numpy.array(self._density.a)
447
        r -= r.max()
448
        r -= log(exp(r).sum())
449
        if B is None:
450
451
            B = self._g.num_vertices()
        r += self._g.num_vertices() * log(B)
452
453
        return r

454
455
    def get_entropy(self, S, B=None):
        r = self.get_density(B)
456
        j = self.get_bin()
457
458
        return r[j]

459
460
461
462
    def get_bin(self, S):
        return int(round((len(self._hist) - 1) * ((S - self._S_min) /
                                                  (self._S_max - self._S_min))))

463
464
    def get_hist(self):
        "Get energy histogram."
465
466
467
468
469
        return numpy.array(self._hist.a)

    def get_perm_hist(self):
        "Get permanent energy histogram."
        return self._perm_hist
470

471
    def get_flatness(self, h=None, allow_gaps=True):
472
        "Get energy histogram flatness."
473
474
        if h is None:
            h = self._hist.a
475
        if h.sum() == 0:
476
            return 0
477
        if allow_gaps:
478
            idx = (h + self._perm_hist) > 0
479
480
481
        else:
            Ss = self.get_range()
            S_min, S_max = self.get_allowed_energies()
482
483
484
485
486
            idx =numpy.logical_and(Ss >= S_min, Ss <= S_max)

        h = array(h[idx], dtype="float")

        if len(h) == 1:
487
            h = array([1e-6] + list(h))
488
489
490
491

        h_mean = h.mean()
        return min(h.min() / h_mean,
                   h_mean / h.max())
492
493
494
495
496

    def get_posterior(self, N=None):
        "Get posterior probability."
        r = self.get_density(N)
        Ss = numpy.linspace(self._S_min, self._S_max, len(r))
497
498
499
        y = -Ss + r
        y_max = y.max()
        y -= y_max
500
        return y_max + log(exp(y).sum())
501
502
503

    def reset_hist(self):
        "Reset energy histogram."
504
505
        self._perm_hist += self._hist.a
        self._hist.a = 0
506

507
508
def multicanonical_equilibrate(state, m_state, f_range=(1., 1e-6), r=2,
                               flatness=.95, allow_gaps=True, callback=None,
509
                               multicanonical_args={}, verbose=False):
510
    r"""Equilibrate a multicanonical Monte Carlo sampling using the Wang-Landau
Tiago Peixoto's avatar
Tiago Peixoto committed
511
    algorithm.
512
513
514
515
516
517
518
519
520
521
522

    Parameters
    ----------
    state : Any state class (e.g. :class:`~graph_tool.inference.BlockState`)
        Initial state. This state will be modified during the algorithm.
    m_state :  :class:`~graph_tool.inference.MulticanonicalState`
        Initial multicanonical state, where the state density will be stored.
    f_range : ``tuple`` of two floats (optional, default: ``(1., 1e-6)``)
        Range of density updates.
    r : ``float`` (optional, default: ``2.``)
        Greediness of convergence. At each iteration, the density updates will
Tiago Peixoto's avatar
Tiago Peixoto committed
523
        be reduced by a factor ``r``.
524
    flatness : ``float`` (optional, default: ``.95``)
525
        Sufficient histogram flatness threshold used to continue the algorithm.
526
    allow_gaps : ``bool`` (optional, default: ``True``)
527
528
        If ``True``, gaps in the histogram (regions with zero count) will be
        ignored when computing the flatness.
529
530
531
532
533
534
535
536
537
538
539
540
541
    callback : ``function`` (optional, default: ``None``)
        If given, this function will be called after each iteration. The
        function must accept the current ``state`` and ``m_state`` as arguments.
    multicanonical_args : ``dict`` (optional, default: ``{}``)
        Arguments to be passed to ``state.multicanonical_sweep`` (e.g.
        :meth:`graph_tool.inference.BlockState.multicanonical_sweep`).
    verbose : ``bool`` or ``tuple`` (optional, default: ``False``)
        If ``True``, progress information will be shown. Optionally, this
        accepts arguments of the type ``tuple`` of the form ``(level, prefix)``
        where ``level`` is a positive integer that specifies the level of
        detail, and ``prefix`` is a string that is prepended to the all output
        messages.

542
543
544
545
546
547
    Returns
    -------

    niter : ``int``
        Number of iterations required for convergence.

548
549
550
551
552
553
    References
    ----------

    .. [wang-efficient-2001] Fugao Wang, D. P. Landau, "An efficient, multiple
       range random walk algorithm to calculate the density of states", Phys.
       Rev. Lett. 86, 2050 (2001), :doi:`10.1103/PhysRevLett.86.2050`,
Tiago Peixoto's avatar
Tiago Peixoto committed
554
       :arxiv:`cond-mat/0011174`
555
556
557
558
    .. [belardinelli-wang-2007] R. E. Belardinelli, V. D. Pereyra,
       "Wang-Landau algorithm: A theoretical analysis of the saturation of
       the error", J. Chem. Phys. 127, 184105 (2007),
       :doi:`10.1063/1.2803061`, :arxiv:`cond-mat/0702414`
559
560
    """

561
    count = 0
562
563
564
565
    if m_state._f is None:
        m_state._f = f_range[0]
    while m_state._f >= f_range[1]:
        state.multicanonical_sweep(m_state, **multicanonical_args)
566
        hf = m_state.get_flatness(allow_gaps=allow_gaps)
567
568

        if callback is not None:
569
            callback(state, m_state)
570
571

        if check_verbose(verbose):
572
            print(verbose_pad(verbose) +
573
574
575
576
577
578
579
580
581
582
                  "count: %d  f: %#8.8g  flatness: %#8.8g  nonempty bins: %d  S: %#8.8g  B: %d" % \
                  (count, m_state._f, hf, (m_state._hist.a > 0).sum(),
                   state.entropy(**multicanonical_args.get("entropy_args", {})),
                   state.get_nonempty_B()))

        if hf > flatness:
            m_state._f /= r
            if m_state._f >= f_range[1]:
                m_state.reset_hist()

583
        count += 1
584

Tiago Peixoto's avatar
Tiago Peixoto committed
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
    return count


class TemperingState(object):
    """This class aggregates several state classes and corresponding
    inverse-temperature values to implement `parallel tempering MCMC
    <https://en.wikipedia.org/wiki/Parallel_tempering>`_.

    This is meant to be used with :func:`~graph-tool.inference.mcmc_equilibrate`.

    Parameters
    ----------
    states : list of state objects (e.g. :class:`~graph_tool.inference.BlockState`)
        Initial parallel states.
    betas : list of floats
        Inverse temperature values.
    """

603
604
    def __init__(self, states, betas):
        if not (len(states) == len(betas)):
Tiago Peixoto's avatar
Tiago Peixoto committed
605
606
607
608
609
610
611
612
613
            raise ValueError("states and betas must be of the same size")
        self.states = states
        self.betas = betas

    def entropy(self, **kwargs):
        """Returns the weighted sum of the entropy of the parallel states. All keyword
        arguments are propagated to the individual states' `entropy()`
        method.
        """
614
        return sum(s.entropy(beta_dl=beta, **kwargs) for
615
                   s, beta in zip(self.states, self.betas))
Tiago Peixoto's avatar
Tiago Peixoto committed
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634

    def states_swap(self, **kwargs):
        """Perform a full sweep of the parallel states, where swaps are attempted. All
        relevant keyword arguments are propagated to the individual states'
        `entropy()` method."""

        verbose = kwargs.get("verbose", False)
        eargs = kwargs.get("entropy_args", {})

        idx = numpy.arange(len(self.states) - 1)
        numpy.random.shuffle(idx)
        nswaps = 0
        dS = 0
        for i in idx:
            s1 = self.states[i]
            s2 = self.states[i + 1]
            b1 = self.betas[i]
            b2 = self.betas[i + 1]

635
636
            P1_f = -s1.entropy(beta_dl=b2, **eargs)
            P2_f = -s2.entropy(beta_dl=b1, **eargs)
Tiago Peixoto's avatar
Tiago Peixoto committed
637

638
639
            P1_b = -s1.entropy(beta_dl=b1, **eargs)
            P2_b = -s2.entropy(beta_dl=b2, **eargs)
Tiago Peixoto's avatar
Tiago Peixoto committed
640

641
            ddS = -(P1_f + P2_f - P1_b - P2_b)
Tiago Peixoto's avatar
Tiago Peixoto committed
642
            a = exp(-ddS)
643

Tiago Peixoto's avatar
Tiago Peixoto committed
644
645
646
647
648
649
650
            if numpy.random.random() < a:
                self.states[i + 1], self.states[i] = \
                            self.states[i], self.states[i + 1]
                nswaps += 1
                dS += ddS
                if check_verbose(verbose):
                    print(verbose_pad(verbose)
651
652
                          + u"swapped states: %d [β = %g] <-> %d [β = %g], a: %g" % \
                            (i, b1, i + 1, b2, a))
Tiago Peixoto's avatar
Tiago Peixoto committed
653
654
        return dS, nswaps

655
    def states_move(self, sweep_algo, **kwargs):
Tiago Peixoto's avatar
Tiago Peixoto committed
656
        """Perform a full sweep of the parallel states, where state moves are
657
        attempted by calling `sweep_algo(state, beta=beta, **kwargs)`."""
Tiago Peixoto's avatar
Tiago Peixoto committed
658
659
        dS = 0
        nmoves = 0
660
        nattempts = 0
661
662
        for state, beta in zip(self.states, self.betas):
            entropy_args = dict(kwargs.get("entropy_args", {}))
663
664
665
666
667
            ret = sweep_algo(state,
                             **dict(kwargs,
                                    entropy_args=dict(entropy_args,
                                                      beta_dl=beta)))
            dS += ret[0]
668
669
670
            nattempts += ret[1]
            nmoves += ret[2]
        return dS, nattempts, nmoves
Tiago Peixoto's avatar
Tiago Peixoto committed
671

672
673
674
675
676
677
    def _sweep(self, algo, **kwargs):
        if numpy.random.random() < .5:
            return self.states_swap(**kwargs)
        else:
            return self.states_move(algo, **kwargs)

Tiago Peixoto's avatar
Tiago Peixoto committed
678
679
680
681
    def mcmc_sweep(self, **kwargs):
        """Perform a full mcmc sweep of the parallel states, where swap or moves are
        chosen randomly. All keyword arguments are propagated to the individual
        states' `mcmc_sweep()` method."""
682
683
684
685
686
687
688
689
690
        algo = lambda s, **kw: s.mcmc_sweep(**kw)
        return self._sweep(algo, **kwargs)

    def multiflip_mcmc_sweep(self, **kwargs):
        """Perform a full mcmc sweep of the parallel states, where swap or moves are
        chosen randomly. All keyword arguments are propagated to the individual
        states' `mcmc_sweep()` method."""
        algo = lambda s, **kw: s.multiflip_mcmc_sweep(**kw)
        return self._sweep(algo, **kwargs)
Tiago Peixoto's avatar
Tiago Peixoto committed
691
692
693
694
695
696

    def gibbs_sweep(self, **kwargs):
        """Perform a full Gibbs mcmc sweep of the parallel states, where swap or moves
        are chosen randomly. All keyword arguments are propagated to the
        individual states' `gibbs_sweep()` method.
        """
697
698
        algo = lambda s, **kw: s.gibbs_sweep(**kw)
        return self._sweep(algo, **kwargs)