plain tex
BOBY:
%%%%%%%%%%%%%%% FORMATO
\magnification=\magstep1\hoffset=0.cm
\voffset=1truecm\hsize=16.5truecm \vsize=21.truecm
\baselineskip=14pt plus0.1pt minus0.1pt \parindent=12pt
\lineskip=4pt\lineskiplimit=0.1pt \parskip=0.1pt plus1pt
\def\ds{\displaystyle}\def\st{\scriptstyle}\def\sst{\scriptscriptstyle}
\font\seven=cmr7
%%%%%%%%%%%%%%%% GRECO
\let\a=\alpha \let\b=\beta \let\c=\chi \let\d=\delta \let\e=\varepsilon
\let\f=\varphi \let\g=\gamma \let\h=\eta \let\k=\kappa \let\l=\lambda
\let\m=\mu \let\n=\nu \let\o=\omega \let\p=\pi \let\ph=\varphi
\let\r=\rho \let\s=\sigma \let\t=\tau \let\th=\vartheta
\let\y=\upsilon \let\x=\xi \let\z=\zeta
\let\D=\Delta \let\F=\Phi \let\G=\Gamma \let\L=\Lambda \let\Th=\Theta
\let\O=\Omega \let\P=\Pi \let\Ps=\Psi \let\Si=\Sigma \let\X=\Xi
\let\Y=\Upsilon
%%%%%%%%%%%%%%% DEFINIZIONI LOCALI
\let\ciao=\bye \def\fiat{{}}
\def\pagina{{\vfill\eject}} \def\\{\noindent}
\def\bra#1{{\langle#1|}} \def\ket#1{{|#1\rangle}}
\def\media#1{{\langle#1\rangle}} \def\ie{\hbox{\it i.e.\ }}
\let\ig=\int \let\io=\infty \let\i=\infty
\let\dpr=\partial \def\V#1{\vec#1} \def\Dp{\V\dpr}
\def\tende#1{\vtop{\ialign{##\crcr\rightarrowfill\crcr
\noalign{\kern-1pt\nointerlineskip}
\hskip3.pt${\scriptstyle #1}$\hskip3.pt\crcr}}}
\def\otto{{\kern-1.truept\leftarrow\kern-5.truept\to\kern-1.truept}}
\def\LS{Logarithmic Sobolev Inequality }
\def\LSC{Logarithmic Sobolev Constant }
\def\Z{{\bf Z^d}}
\def\supnorm#1{\vert#1\vert_\infty}
\def\grad#1#2{(\nabla_{\L_{#1}}#2)^2}
\def\log#1{#1^2log(#1)}
\def\logg#1{#1log((#1)^{1\over2})}
%%%%%%%%%%%%%%%%%%%%% Numerazione pagine
\def\data{\number\day/\ifcase\month\or gennaio \or febbraio \or marzo \or
aprile \or maggio \or giugno \or luglio \or agosto \or settembre
\or ottobre \or novembre \or dicembre \fi/\number\year}
%%\newcount\tempo
%%\tempo=\number\time\divide\tempo by 60}
\setbox200\hbox{$\scriptscriptstyle \data $}
\newcount\pgn \pgn=1
\def\foglio{\number\numsec:\number\pgn
\global\advance\pgn by 1}
\def\foglioa{A\number\numsec:\number\pgn
\global\advance\pgn by 1}
%\footline={\rlap{\hbox{\copy200}\ $\st[\number\pageno]$}\hss\tenrm
%\foglio\hss}
%\footline={\rlap{\hbox{\copy200}\ $\st[\number\pageno]$}\hss\tenrm
%\foglioa\hss}
%
%%%%%%%%%%%%%%%%% EQUAZIONI CON NOMI SIMBOLICI
%%%
%%% per assegnare un nome simbolico ad una equazione basta
%%% scrivere \Eq(...) o, in \eqalignno, \eq(...) o,
%%% nelle appendici, \Eqa(...) o \eqa(...):
%%% dentro le parentesi e al posto dei ...
%%% si puo' scrivere qualsiasi commento;
%%% per assegnare un nome simbolico ad una figura, basta scrivere
%%% \geq(...); per avere i nomi
%%% simbolici segnati a sinistra delle formule e delle figure si deve
%%% dichiarare il documento come bozza, iniziando il testo con
%%% \BOZZA. Sinonimi \Eq,\EQ,\EQS; \eq,\eqs; \Eqa,\Eqas;\eqa,\eqas.
%%% All' inizio di ogni paragrafo si devono definire il
%%% numero del paragrafo e della prima formula dichiarando
%%% \numsec=... \numfor=... (brevetto Eckmannn); all'inizio del lavoro
%%% bisogna porre \numfig=1 (il numero delle figure non contiene la sezione.
%%% Si possono citare formule o figure seguenti; le corrispondenze fra nomi
%%% simbolici e numeri effettivi sono memorizzate nel file \jobname.aux, che
%%% viene letto all'inizio, se gia' presente. E' possibile citare anche
%%% formule o figure che appaiono in altri file, purche' sia presente il
%%% corrispondente file .aux; basta includere all'inizio l'istruzione
%%% \include{nomefile}
%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\global\newcount\numsec\global\newcount\numfor
\global\newcount\numfig
\gdef\profonditastruttura{\dp\strutbox}
\def\senondefinito#1{\expandafter\ifx\csname#1\endcsname\relax}
\def\SIA #1,#2,#3 {\senondefinito{#1#2}
\expandafter\xdef\csname #1#2\endcsname{#3} \else
\write16{???? ma #1,#2 e' gia' stato definito !!!!} \fi}
\def\etichetta(#1){(\veroparagrafo.\veraformula)
\SIA e,#1,(\veroparagrafo.\veraformula)
\global\advance\numfor by 1
\write15{\string\FU (#1){\equ(#1)}}
\write16{ EQ \equ(#1) == #1 }}
\def \FU(#1)#2{\SIA fu,#1,#2 }
\def\etichettaa(#1){(A\veroparagrafo.\veraformula)
\SIA e,#1,(A\veroparagrafo.\veraformula)
\global\advance\numfor by 1
\write15{\string\FU (#1){\equ(#1)}}
\write16{ EQ \equ(#1) == #1 }}
\def\getichetta(#1){Fig. \verafigura
\SIA e,#1,{\verafigura}
\global\advance\numfig by 1
\write15{\string\FU (#1){\equ(#1)}}
\write16{ Fig. \equ(#1) ha simbolo #1 }}
\newdimen\gwidth
\def\BOZZA{
\def\alato(##1){
{\vtop to \profonditastruttura{\baselineskip
\profonditastruttura\vss
\rlap{\kern-\hsize\kern-1.2truecm{$\scriptstyle##1$}}}}}
\def\galato(##1){ \gwidth=\hsize \divide\gwidth by 2
{\vtop to \profonditastruttura{\baselineskip
\profonditastruttura\vss
\rlap{\kern-\gwidth\kern-1.2truecm{$\scriptstyle##1$}}}}}
}
\def\alato(#1){}
\def\galato(#1){}
\def\veroparagrafo{\number\numsec}\def\veraformula{\number\numfor}
\def\verafigura{\number\numfig}
%\def\geq(#1){\getichetta(#1)\galato(#1)}
\def\Eq(#1){\eqno{\etichetta(#1)\alato(#1)}}
\def\eq(#1){\etichetta(#1)\alato(#1)}
\def\Eqa(#1){\eqno{\etichettaa(#1)\alato(#1)}}
\def\eqa(#1){\etichettaa(#1)\alato(#1)}
\def\eqv(#1){\senondefinito{fu#1}$\clubsuit$#1\else\csname fu#1\endcsname\fi}
\def\equ(#1){\senondefinito{e#1}eqv(#1)\else\csname
e#1\endcsname\fi}
\let\EQS=\Eq\let\EQ=\Eq
\let\eqs=\eq
\let\Eqas=\Eqa
\let\eqas=\eqa
%%%%%%%%%%%%%%%%%% Numerazione verso il futuro ed eventuali paragrafi
%%%%%%% precedenti non inseriti nel file da compilare
\def\include#1{
\openin13=#1.aux \ifeof13 \relax \else
\input #1.aux \closein13 \fi}
\openin14=\jobname.aux \ifeof14 \relax \else
\input \jobname.aux \closein14 \fi
\openout15=\jobname.aux
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\BOZZA
\footline={\rlap{\hbox{\copy200}\ $\st[\number\pageno]$}\hss\tenrm
\foglio\hss}
\vskip 1cm
\centerline{\bf MARKOV
CHAINS WITH EXPONENTIALLY SMALL TRANSITION}
\centerline {\bf PROBABILITIES:
FIRST EXIT PROBLEM FROM A GENERAL DOMAIN}
\bigskip
{\bf I. THE REVERSIBLE CASE.}
\vskip 1cm
\centerline{E.Olivieri$^{(1)}$, E.Scoppola$^{(2)}$}
\vskip 1cm
{\it (1) Dipartimento di Matematica - II Universit\`a di Roma - Tor Vergata}\par
{\it Via della Ricerca Scientifica - 00173 ROMA - Italy}\par
E-mail: OLIVIERI@MAT.UTOVRM.IT
\bigskip
{\it (2) Dipartimento di Fisica - Universit\`a di Bari.
V. Amendola 173 - 70126 BARI - Italy }\par
E-mail: SCOPPOLA@ROMA1.INFN.IT
\vskip 1cm
\centerline { Dedicated to the memory of Claude Kipnis}
\vskip 1cm
{\bf Abstract}
\bigskip
We consider general ergodic aperiodic Markov chains with finite state space
whose transition probabilities between pairs of different communicating
states are exponentially small in a large parameter $\beta$.\par
We extend previous results by Freidlin and Wentzell ( [FW] ) on the first
exit problem from a general domain $Q$. \par
In the present paper we analyze the case of {\it reversible} Markov chains.
The general case will be studied in a forthcoming paper.\par
We prove, in a purely probabilistic way and without using F-W graphical
technique, some results on the first exit problem from a general domain $Q$
containing many attractors. In particular we
analyze the properties of special domains called {\it cycles } and,
by using the new concept of {\it temporal entropy}, we
obtain new results
leading to a complete description of
the typical tube of trajectories during the first excursion outside
$Q$.\par
\bigskip
{\bf Keywords : Markov chains, first exit problem, large deviations,
reversibility.}
\vfill
\eject
\bigskip
\noindent
{\bf Section 1. Introduction.}
\bigskip
In this paper we consider ergodic, aperiodic Markov chains, with finite
state space $S$ and with transition probabilities $P(x,y)$ satisfying the
following:\bigskip
{\bf Property ${\cal P}$}\bigskip
{\it If $x$ and $y$ are
communicating states, i.e. $x\not=y$ and $P(x,y)>0$, then}
$$\exp(-\D(x,y)\b-\gamma\b)\le P(x,y)\le \exp(-\D(x,y)\b+\gamma\b)\eqno (1.1)$$
{\it where $\D(.,.)$ is a non-negative function on the set of pairs of communicating
states and $\g\to 0$ as $\b\to\infty$.}
\bigskip
Freidlin and Wentzell introduced this kind of Markov chains
as auxiliary stuctures
in their study of the asymptotic properties of diffusion processes describing
small random perturbation of dynamical systems.\par
Another very interesting application, which actually is our main motivation,
comes from non-equilibrium statistical mechanics: stochastic dynamics for
interacting particle systems at very low temperature, like Glauber dynamics
for Ising-like models, in a finite volume, satisfy property ${\cal P}$
(in this case $\b$ is the inverse temperature).
(See e.g. [NSch1], [NSch2], [Sch], [KO1], [KO2], [MOS]).\par
We will mainly study the problem of the first exit from a domain $Q\subset S$
containing many attracting equilibrium states for the dynamics at $\b=\infty$.
Many results on this subject are already known: in particular Freidlin and
Wentzel [FW] proved estimates for the average exit time and the typical point,
on the boundary of $Q$, reached during the first excursion outside $Q$. They
also describe the tube of typical trajectories exiting
from $Q$, when this set contains a unique attracting state.\par
The study of the typical exiting trajectories is of fundamental
importance and, in a certain sense, it is the central problem in the
description of nucleation phenomenon, in the framework of general stochastic
Ising models (see [Sch], [KO1], [KO2]). In that case we are interested in the
analysis of the typical sequence of growing droplets and in particular in
their shapes. Indeed the growth of the so called critical nucleus can be
seen as a particular case of the first exit from a non-completely attracted
domain.\par
It turns out, by looking at several particular models, that a crucial ingredient
in the description of the growth is given by the {\it resistance times} inside
some subsets of $Q$. These can be considered as a sort of
{\it temporal entropy} related to fluctuations, taking place during suitable
random times which grow exponentially fast in $\b$. This temporal entropy
turns out to be necessary to give rise to an efficient escape mechanism.
Neglecting these random fluctuations, during the escape,
would lead to a mechanism
extremely depressed in probability.\par
In other words the last escape from $Q$, occurs in very different way in the
two cases of one or several attracting point in $Q$.
In the completely attracted case,
the typical trajectories,
during the first excursion outside $Q$,
spend a finite time, independent of $\b$, to
bring the process out of $Q$ without any "hesitation". In the general case
the last escape takes place
by visiting a suitable sequence of more or less stable attractors
$z_1,....,z_n$ and spending some suitable random times inside certain domain
$A_1,...,A_n$ which can be considered as a sort of generalized basins of
attraction of $z_1,...,z_n$ respectively.\par
We can say that the formulation itself of the problem of the characterization
of the
tube of typical exiting trajectories, in the general case, requires new concepts
with respect to what has been done in the completely attracted case. \par
On one hand our work can be considerd as a completion and a generalization
of the results contained in chapter 6 of the Freidlin and Wentzell's book [FW];
on the other hand we formulate a general set-up useful to treat, in a unified
way, a large class of stochstic dynamics.\par
Our results are general and we are able to reduce the solution of the above
mentioned typical large deviation problem, connected to the escape from
a general domain, to the solution of a well defined sequence of variational
problems. These variational problems constitute the model-dependent work to
be done.
In other words we state
the results concerning the general behaviour of the class of Markov chains
satisfying property
${\cal P}$, by specifying their common features and by reducing
the model-dependent work to the solution of some well specified problems
whose formulation can be given in general.\par
In the present note we will concentrate ourselves on the {\it reversible} case
(see hypotheses 1 and 2 in section 3) where the unique invariant measure
$\m$ of the chain
has the Gibbsian form: $\m = \exp(-\b H)/Z$ with a given energy function
$H$ on $S$.\par
For the general case we just give here the formulation of the problem, the
complete treatement being the object of a forthcoming paper.\par
The discussion of the general case will require some
generalization of the graphical technique introduced by Freidlin and Wentzel
(see [FW] pg.177) and, more important, the use of the approach introduced by
one of the authors in [S1]. This approach is based on the introduction of
{\it renormalized chains}, obtained by a time rescaling related to the degree
of stability of different attracting equilibrium states.\par
The reversible case is much easier. The crucial point of our approach to that
case is to base our discussion on the analysis of the "energy landscape".\par
We will provide new probabilistic proofs of results obtained by Freidlin
and Wentzell with their graphical technique. Moreover we will prove new results
on the characterization of the tube of the typical exiting trajectories.\par
The paper is organized as follows: in section 2 we state the problem for the
general case
and we sketch the strategy for the solution of the general problem,
in section 3 we consider the reversible case and we give the
definition and the main properties of the cycles; finally in section 4 we
define the typical exiting tube of trajectories of the first escape in the
reversible case.\par
\bigskip
\numsec=2\numfor=1
{\bf Section 2. The exit problem and the renormalization procedure.}\par
\bigskip
Let $X_t$ be a Markov chain satisfying property ${\cal P}$ above;
given any set of states $Q\subset S$, we will denote by
$\tau_Q$ the first hitting time to $Q$:
$$\tau_Q\equiv\min\{t>0;\, X_t\in Q\}$$
We define the (outer) boundary $\partial Q$ of $Q$ as the set:
$$
\partial Q = \{ x \not\in Q : \exists \; x' \in Q : P(x',x) > 0\}
$$
A first description of the exit of the
chain $X_t$ from the set $Q$ can be given by means of the
following two quantities:
the expectation of the first exit time from $Q$:
$$E_x\tau _{\partial Q}\Eq (2.1)$$
and the spatial distribution
of the first exit:
$$P_x(X_{\tau _{\partial Q}}=y)\Eq (2.2)$$
with $x\in Q,\, y\in \partial Q$
( we denote by $P_x$ the probability distribution
on the process starting from $x$ at $t=0$; $E_x$ denotes the corresponding
expectation).\par
Estimates of these quantities, from above and from below, are given by
Freidlin and Wentzell in the framework of
their study of diffusion processes describing
small random perturbations of dynamical systems [FW].
In fact they proved that these continuous processes
can be approximated by discrete Markov chains satisfying property ${\cal P}$.
Moreover they show that the quantities (2.1) and (2.2) can be expressed
in terms of sums of products of transition probabilities of the chain,
and these products can be
defined by means of graphs of arrows. Then the estimates of these quantities
can be reduced to a problem of minimization of a suitable
cost function associated to each graph (see [FW] ch 6, sect. 3, pg. 176).
\par
The theory of large deviations,
developed in [FW] for the continuous case, can be very easily adapted to our
discrete Markov chain.
To each path, i.e. to each function
$\phi :{\bf N}\to S,\quad \phi =\{\phi_t\}_{t\in {\bf N}}$, we can
associate a functional
$$I_{[0,t]} (\phi ) \equiv \sum_{i=0}^{t-1}\D (\phi_{i},\phi_{i+1})
\Eq (2.3)$$
where the fonction $\D(x,y)$ is defined in (1.1) and we set
$\D (x,x)=0$ for each $x\in S$ and $\D (x,y) = \infty $ if
$P(x,y) =0$.
This functional can be interpreted as
the cost function of each path $\phi$, and the following
large deviation estimates can be easily proved (see [S1]):\par
\bigskip
{\bf Lemma 2.1}\par
{\it Let $\phi$ be a fixed function starting at x at time 0, then \par
i)
$$P_x(X_s=\phi_s\quad \forall s\in [0,t]) \leq e^{ -I_{[0,t]} (\phi ) \b
+\gamma t\b}$$
ii) if $\phi$ is such that $\phi_s\not=\phi_{s+1}$ for any $s\in [0,t]$ then
we have also a lower bound:
$$P_x(X_s=\phi_s\quad \forall s\in [0,t]) \ge e^{ -I_{[0,t]} (\phi ) \b -
\gamma t\b}$$
iii) for any constant $I_0>0$, for any sufficiently small $\alpha >0$,
for any $t0\Eq (2.6)$$
i.e. if each path leaving from $x$ has a positive cost.
We will denote by $M$ the set of stable states.\par
It is immediate to see that if the set $M$ contains a state $x$ then
it contains the whole equivalence class of $x$, namely $M\supset (x)_{\sim}$,
where $(x)_{\sim}\equiv \{y\in S;\, y\sim x\}$\par
An immediate consequence of lemma 2.1 is the following:\par
\bigskip
{\bf Lemma 2.2}\par
{\it There exist constants $T_0\in [0,|S|]$ and $\b _0$ such that for any
$\b >\b _0$ :\par
i) for any $t>T_0$:
$$\sup_{x\in S} P_x(\tau_M>t)\leq a^{[{t\over T_0}]}$$
with $a=1-C^{T_0}$ for some constant $00$ and
for any $t\ge e^{\eta\b }$ and $\b$ sufficiently large
we have:
$$\sup_{x\in S} P_x(\tau_M>t)\leq \exp\{-e^{{\eta\b\over 2}}\}$$ }
\bigskip
Let us now come back to the problem of the exit of our chain $X_t$ from
a domain $Q$ and let us suppose that this set contains a unique stable
state $x_0$
completely attracting this set, i.e.
for each $y\in Q$ there exists a path $y_0=y,y_1,....,y_n=x_0$ such that
$\D (y_1,y_{i+1})=0\quad \forall i0$ for each $y\in Q,
z\in \partial Q$.
Then in this case Freidlin and Wentzell can describe in complete detail
the exit from $Q$.\par
First of all in this case the quantities (2.1)
and (2.2) can be easily estimated as follows (see Th. 2.1 and Th.4.1 ch 4
[FW]): for all $x\in Q$
$$\lim_{\b\to\infty}{1\over\b}\ln E_x \tau_{\partial Q} = \min_{y\in\partial
Q} V(x_0,y)$$
and if there exists a unique state $y_0\in\partial Q$ such that
$V(x_0,y_0)= \min_{y\in\partial Q}
V(x_0,y)$ then
$$\lim_{\b\to\infty}P_x(X_{\tau_{\partial Q}}=y_0)=1$$
We notice, at this point, that in the case of reversible chains, i.e. when the
function $\D(x,y) $ defined in (1.1) can be expressed in terms of a unique
energy
function $H$ on $S$, then the quantities (2.1)
and (2.2) can be estimated more easily in terms of the function $H$,
since the quantity $\min_{y\in\partial Q}
V(x_0,y)$ can be easily expressed in terms of $H$.\par
Moreover in this case of
a domain $Q$ containing a unique stable state,
the last escape can be described quite precisely and completely.\par
We state now the result in our discrete case of Markov chains (see [FW] Th.2.3,
ch 4 for the continuous version of this result).\par
We want to notice here that in the continuous case of diffusion processes
discussed in [FW], the dynamics corresponding to zero random noise, was
given by a dynamical system, that is the unperturbed system was completely
deterministic and for each starting point there was a unique deterministic path
emerging from it. The tube of typical exiting trajectories
was given, in that case, as a neighborhood, in the uniform topology, of such a
deterministic path.\par
Here, in the discrete case of Markov chains, the situation is different and
even for $\b=\infty$ the system can still be
random. This means that there is not a unique deterministic path but
several possible paths emerging from the same starting point. Moreover, we
do not have
to consider a neighborhood since the space is discrete. So the
typical exiting tube, in this case, is a finite set of individual paths.
\bigskip
{\bf Proposition 2.1 }\par
{\it Let $Q$ be a set of states containing a unique stable state $x_0$ and
for each given $\alpha$ define
$$\Phi\equiv \{ \{\phi_s\}_{s\in {\bf N}};\, \phi_0=x_0,\,\phi_{T_{\phi}}
\in\partial Q,\, \phi_s\in Q,\, \forall ss}}
\in\partial Q])\Eq (2.9bis)$$
where
$$\tau_{x_0\cup\partial Q}^{>s}\equiv \min\{t>s;\,X_t\in\{x_0\cup\partial Q\}\}
$$
The r.h.s. of \equ (2.9bis) can be estimated,
by using the Markov property, as follows:
$$\le \sum_{s=0}^{\infty}P_x(\tau_{\partial Q}>s)
P_{x_0}([\{X_{t}\}_{t=0}^{\tau_{\partial Q}}
\not\in\bar\Phi]\bigcap
[X_1\not=x_0,\,X_{\tau_{x_0\cup\partial Q}}\in\partial Q])\le$$
$$\le E_x\tau_{\partial Q}\,.\, \{P_{x_0}([\{X_{t}\}_{t=0}^{\tau_{\partial Q}}
\not\in\Phi]\bigcap
[X_1\not=x_0,\,X_{\tau_{x_0\cup\partial Q}}\in\partial Q])
+P_{x_0}(\{X_{t}\}_{t=0}^{\tau_{\partial Q}}
\in\Phi\backslash\bar\Phi)\} \le$$
$$\le
e^{\min_{y\in\partial Q}V(x_0,y)\b + \delta\beta}\,.\,
\{\exp\{-e^{{\a\b\over 2}}\} +
P(I_{[0,e^{\alpha\b}]}
(X_s(x_0))\ge \min_{y\in \partial Q}V(x_0,y) + d)\}$$
for some positive constant $d$.
The theorem
follows by applying lemma 2.2 and by using that $\delta$ goes to 0 as $\b\to
\infty$ while $d$ is fixed.\par
\bigskip
We remark that in this theorem the hypothesis of the uniqueness of the
stable state in $Q$ is crucial; in fact the large deviation estimate can
be applied, here as in the continuous case, only on intervals of time
which do not grow too fast in $\b$ (they have to be bounded by $e^{\alpha\b}$
with $\alpha$ sufficiently small). However, if the set
Q contains several stable states, then the functions in $\bar\Phi$ can
( and we will see that they will do)
visit other stable states before leaving $Q$ where the process is likely to
spend exponentially long times. This means that in this case,
due to the above mentioned resistance times,
the time $\tau _{ \partial Q}-\theta_{x_0}$ is exponentially large with large
probability.
Thus an extension of proposition 2.1 to a general domain $Q$ would require
a control on large deviation estimates over exponentially long intervals
of time. This is the crucial point to be
solved.
Really new ideas and techniques are necessary;
new concepts will be needed to define the tube.\par
To this end we recall here
some recent results on Markov chains satisfying property ${\cal P}$,
representing
a completly different approach to the study of long time behavior of Markov
chains, based on a renormalization procedure [S1].\par
The main idea of this {\it renormalization} procedure
can be summarized as follows.\par
The behaviour of the chain $X_t$ involves a
sequence of different time scales $T_1,\, T_2, \, T_3,\,....$, exponentially
large in $\beta$, related to the stability of the different states.
For each time
scale $T_i$ it is possible to define a {\it renormalized chain} $X^{(i)}_t$ ,
corresponding to the original chain $X_t$ on this time scale; this is a
coarse grained version of the chain $X_t$ in the sense that
it gives a less detailed
description of the process, but the loss of
information concerns only events which occur in a typical time less or
equal to $T_i$. \par
In order to explain more clearly how this renormalization works, we
briefly recall the construction of the first chain $X^{(1)}_t$.
( see [S1] for more details)\par
A first simple classification of states is that given in terms
of stable and unstable states (see (2.6)).
If we denote by $M$ the set of stable states, then,
as shown in lemma 2.2,
the process spends, with large probability, almost all
the time in M. This result suggests that, if we look at the process
$X_t$ on a
sufficiently large time scale, then it can be described in terms
of transitions between states in M; in
this way only the behaviour of the process on small times is neglected.\par
In fact we can consider the less stable states in $M$ and we can define a time
scale $T_1$ corresponding to this smallest stability:
$$T_1\equiv e^{V_1\beta +\delta\b}\Eq (2.11)$$
where
$$V_1\equiv \min_{x\in M,\,y\in S,\,x\not\sim y} V(x,y)\Eq(2.12)$$
and $ \delta$ is a small constant going to zero as $\b$ goes to infinity.\par
We can then
construct a new Markov chain $X^{(1)}_t$ with state space $ M$,
corresponding to the original
process with a rescaling time $T_1$, by defining a sequence of stopping times
$\zeta_1, \zeta_2,...,\zeta_n,... $ such that
$\zeta_{n+1}-\zeta_n $ is of order $T_1$ with large probability and
$X_{\zeta_n}$ belongs to $M$.\par
More precisely we define the sequence of stopping times:
$$\sigma_1 \equiv \hbox{min}\{ t>0 ; \, X_t\not\sim X_0\}$$
$$\tau_1 \equiv \hbox{min}\{t\ge \sigma_1 ;\, X_t\in M\}$$
$$\zeta_1 = \cases{t_1\quad &if $ \sigma_1 > t_1$\cr
\tau_1 &if $\sigma_1\leq t_1$\cr} \Eq (2.12a)$$
and for each $n>1$:
$$\sigma_n \equiv \hbox{min}\{ t>\zeta_{n-1} ; \, X_t\not\sim
X_{\zeta_{n-1}}\}$$
$$\tau_n \equiv \hbox{min}\{t\ge \sigma_n ;\, X_t\in M\}$$
$$\zeta_n = \cases{ \zeta_{n-1}+
t_1\quad &if $ \sigma_n - \zeta_{n-1} > t_1$\cr
\tau_n &if $\sigma_n - \zeta_{n-1}\leq t_1$\cr} \Eq (2.13)$$
It is simple to prove that the sequence $X^{(1)}_n =X_{\zeta_n}$ is a
homogeneous Markov chain.
For any
couple of states $x,y\in M$ we
denote by $ P^{(1)}(x,y)$ the transition probability of the chain
$ X^{(1)}_n$,
it is possible to prove that these transition probabilities satisfy the same
assumption (property ${\cal P}$) verified by the original chain $X_t$,
provided we identify states which are equivalent with respect to the relation (2.5).
More precisely
for any $x,y\in M,\quad x\not\sim y$:
$$\exp\{-\D^{(1)} (x,y) \beta - \gamma' \b \} \leq P^{(1)}(x,y)\equiv
P(X_{\zeta_n}=y| \, X_{\zeta_{n-1}}=x)\leq
\exp\{-\D^{(1)} (x,y) \beta + \gamma' \b \}\Eq (2.14) $$
The quantities $\D^{(1)}(x,y)$ are defined by:
$$\D^{(1)} (x,y)=\inf_{t,\phi;\,\phi_0=x,\,\phi_t=y,\atop \phi_s\not\in M
\backslash [(x)_{\sim}\cup (y)_{\sim}]}I_{[0,t]}(\phi)
-V_1\Eq (2.15)
$$
and $\g '\to 0$ as $\b \to\infty$.\par
Thus we have a new chain $X^{(1)}_t$ on the state space $S^{(1)}=M$ (modulo
the equivalence relation (2.5))$\equiv M/_{\sim}$) ,
to which we can apply again the same
analysis, by defining new stable states,
a time scale $T_2$, a corresponding chain
$X^{(2)}_t$ and so on.\par
The iteration scheme is the following one (see [S1]):
for any $k\ge 1$ we define the
following quantities:
for any $\phi : {\bf N}\to S^{(k)}$:
$$I_{[o,t]}^{(k)} (\phi) = \sum_{i=0}^{t-1}\D^{(k)}(\phi_i,\phi_{i+1})
\Eq(2.16)$$
$$V^{(k)}(x,y)\equiv \min_{t,\phi;\, \phi_0=x,\,\phi_t=y}I_{[o,t]}^{(k)} (\phi)
\quad \forall x,y\in S^{(k)}\Eq(2.17)$$
$$x\sim^{(k)}y\quad\hbox{if and only if}\quad V^{(k)}(x,y)=V^{(k)}(y,x)=0
\Eq(2.18)$$
$$M^{(k)} = \{x\in S^{(k)}; \forall y\in S^{(k)},\, y\not\sim^{(k)}x\quad
V^{(k)}(x,y)>0\}\Eq(2.19)$$
$$V_{k+1} = \min_{x\in M^{(k)}, y\in S^{(k)} \, x\not\sim^{(k)} y}
V^{(k)}(x,y)\Eq (2.20)$$
$$t_{k+1} = e^{V_{k+1}\b + \delta \b}\Eq (2.21)$$
$$T_1=t_1$$
$$T_{k+1}=t_1t_2......t_kt_{k+1}\Eq (2.22)$$
$$S^{(k+1)}= M^{(k)}/_{\sim^{(k)}}\Eq (2.23)$$
$$\D^{(k+1)}(x,y) = \min_{t,\phi ; \phi_0=x, \phi_t=y,\atop \phi_s\not\in
M^{(k)}
\backslash [(x)_{\sim^{(k)}}\cup (y)_{\sim^{(k)}}]
\,
\forall s\in [0,t]}I_{[0,t]}^{(k)}(\phi) - V_{k+1}
\quad \forall x,y\in S^{(k+1)}\Eq (2.24)$$
\bigskip
The main results proved in [S1] can be summarized as follows:
we can estimate from above and from below the following quantities for the
original chain $X_t$:
\bigskip
\item{i)} the invariant measure of the chain: $\mu(.)$\par
\item{ii)} the expected value of the first hitting
time
of the process starting at x to a set $B\subset S$:
$E \t_B(x) \quad B\in S, \, x\in S$\par
\item{iii)} the spatial distribution of the first entry:
$P(X_{\t_B}(x)=y) \quad B\in S,\, x\in S,\, y\in B$\par
\bigskip
in terms of the same quantities for the
rescaled chains $X^{(i)}$.
Since $|S^{(i)}|\le |S^{(i-1)}|$ (actually one can prove that
$|S^{(i+1)}|<|S^{(i-1)}|$),
the above results provide a useful tool for the evaluation of these
quantities when $|S|$ is large, in fact one can consider
a time rescaling $T_n$ so large that
the corresponding state space $S^{(n)}$
is so small that explicit computations are easy
at this level.\par
\bigskip
In [S2] the easier case of reversible Markov chains
is analyzed. The construction of the renormalized chains is more explicit
providing, in that case, a sequence of
"smoothed" hamiltonian functions $H^{(k)}$.
\bigskip
It is possible to establish a connection between these results and the
graphical approach by Freidlin and Wentzell. The minimization problem involved
in the graph approach to estimate (2.1) and (2.2), turns out to be broken
by this renormalization procedure in a sequence of easier minimization
problems. (See [S3]).
\bigskip
We propose here a new application of this renormalization procedure.
We claim that we can use the renormalized chains $X^{(k)}_t$ to control
the large deviation phenomena, for
the Markov chain $X_t$,
taking place during exponentially long times $T_k$.\par
In fact the control on the chain $X^{(k)}_t$ over a small time $t>\sum_{y,z\in Q}\D(y,z)$ .\par
To this chain we apply the previously discussed analysis
by evaluating the sequence of renormalized chains
$X^{(1)}_t,......, X^{(i)}_t,...$ and the corresponding sequence of state
spaces $S^{(1)},......, S^{(i)},...$.
We warn the reader of an abuse of notation: we will omit from now on the
superscript $Q$.\par
Since $\D(\partial Q)>>\sum_{y,z\in Q}\D(y,z)$ it is immediate to show that
there exists a step N of the iteration such that in Q there are only unstable
states. More precisely
let $N=N(Q)=
\inf\{n;\, S^{(n+1)}\subset \partial Q\}$ then $X^{(N)}$ has stable
states only on the boundary of Q
and all the states in $\partial Q$ are stable. Thus the exit
from Q for the chain
$X^{(N)}_t$ is a trivial problem since it is a downhill exiting. This means
that for each $x\in S^{(N)}\cap Q$
there exists at least a time k and a sequence $x^{(N)}_0,....x^{(N)}_k$ of
states in $S^{(N)}$ such that $ x^{(N)}_0=x,\quad
x^{(N)}_i \in Q\quad \forall i0$,
$$
\D(x,y) \; = \; [ H(y) - H(x)]_+
\Eq(3.1)
$$
where $ (a)_+$ is the
positive part $(\equiv a \vee 0 )$ of the real number $a$.}\par
\vskip.5cm
The above choice corresponds to a {\it quasi-reversible Metropolis Markov chain
}
( here "Metropolis " is used in a slightly more general meaning than the common
Metropolis algorithm).
\par
The most general quasi-reversible Markov chain can be
defined in the following way:
we suppose given a function $ H : \G \cup S \to { \bf R}^+ $ , $\G $
being the subset of $S \times S$ of {\it communicating} pairs of states,
namely pairs $\{x,y\}$ where $P(x,y)$ is strictly positive, with
$$
H(\{ x,y\}) \geq H(x) \vee H(y)
$$
and
$$
\D(x,y) = H(\{ x,y\}) - H(x)\Eq(3.2b)
$$
It is not difficult to convince oneself that the paricular Metropolis choice,
corresponding to $H(\{ x,y\}) = H(x) \vee H(y)$, does not represent a real loss
of generality : one can always enlarge the space $S$ to include a new point for
each "bond" $ \{x,y\} \in \G$ between communicating states assigning to it the
energy $H(\{ x,y\})$.\par
Moreover
the Metropolis form is used in many applications, like the usual stochastic
Ising models. \par
We will always assume that $P(x,y)>0$ implies $P(y,x)>0$ for any $x,y\in S$.\par
For most of our result we will assume not only the hypothesis H1 of
quasi-reversibility, but the stronger hypothesis of {\it reversibility}: \par
\vskip.5cm
{\bf Hypothesis H2} (Detailed balance condition)
\vskip.5cm
{\it There exists a strictly positive probability measure $\m$ on $S$ such that:
$$
\forall \; x, x' \; \in S \;:\; \m (x) P(x,x') \;=\; \m (x') P(x',x) \Eq(3.3)
$$}
\vskip.5cm
>From \equ(3.3) one immediately deduce that $\m$ is the unique invariant measure
of the chain.\par
A {\it path} $\o $ is a
sequence $ \o \equiv x_1, \dots , x_N, \; N \in {\bf N}$,
with $ x_j,x_{j+1} ,\; j=1,\dots ,N-1$, communicating states
( i.e. $ P(x_j,x_{j+1}) \; > \; 0 $).\par
A set $Q \subset S$ is {\it connected} if $\forall \; x,x' \in Q $ there esists a
path $ \o : x \to x'$ all contained in $Q$ (we use the notation $ \o : x \to
x'$ to denote a path $\o$ joining $x$ to $x'$ ). \par
Given $ Q \subset S$ we denote by $U=U(Q)$ the set of all minima of the energy
on the boundary $\partial Q $ of $Q$ :
$$
U(Q) = \{ z \in \partial Q : \min _{ x \in \partial Q } H(x) = H(z) \} \Eq(3.4)
$$
Given $ Q \subset S$ we denote by $F=F(Q)$ the set of all minima of the energy
on $ Q $ :
$$
F(Q) = \{ y \in Q : \min _{ x \in Q } H(x) = H(y) \} \Eq(3.5)
$$
A connected set of equal energy states $\bar P\subset S$ is called
{\it plateau}.
It is easy to convince oneself that, in the framework of our asymptotic
estimates, exponential in the parameter $\b$, we can identify these plateaux
with single points. In other words, states which are equivalent with respect
to the relation (2.5) of the previous section, can be identified.
\par
Moreover it follows from \equ (3.3) the property: $P(x,y) >0$
{\it implies} $P(y,x) >0$.
\par
It is immediate to verify that in the quasi reversible case a state $x$ is
stable, in the sense of definition (2.6), if either it is a local minimum of
the function $H$ or it belongs to a plateau equivalent to a local minimum.
\bigskip
{\bf Definition 3.1} \vskip.5cm
{\it A connected set $A$ which satisfies : \par \noindent
$$
\max _ {x\in A} H(x) = \bar H < \min _ {z \in \partial A } H(z) \equiv H(U(A))
$$
is called {\it cycle}.} \par
\bigskip
It is easy to see that, under the quasi-reversibility hypothesis for our Markov
chain, the above definition is equivalent to the one given by Freidlin-Wentzell
(see
[FW] pag. 198).
In the following we will give some propositions (beside other definitions).
Most of them are intended to clarify the structural properties of the cycles.
For some of them the proof is immediate and we omit it.
The first non-immediate (though in a sense elementary) statement is contained in
Proposition 3.7 for which we provide a proof.\noindent \vskip.5cm
{\bf Proposition 3.1} \vskip.5cm
{\it Given a state $\bar x \in S $ and a real number $c$ the set of all $x$'s
connected to $\bar x $ by paths with energy always below a given $c$ either
coincides with $S$ or it is a cycle $A$ with
$$
H(U(A)) \geq c
$$}
{\bf Proposition 3.2 }
\vskip.5cm
{\it Given two cycles $A_1,\; A_2$, either \par
1) $ A_1 \cap A_2 = \emptyset$ \par
or \par
2) $A_1 \subset A_2 $ or, viceversa, $A_2 \subset A_1 $}
\par\vskip.5cm
{\it Proof.} \vskip.5cm
Let $ A_1 \cap A_2 \neq \emptyset$.\par
It is immediate to see that one cannot have that, at the same time,
$$ \exists x_1 \in \partial A_1 \cap A_2 ,\qquad
\exists x_2 \in \partial A_2 \cap A_1 ,$$
otherwise one would have, at the same time:
$$ H(x_1) < H(x_2) $$
and
$$ H(x_2) < H(x_1) $$
Then, either $A_1 \subset A_2 $ or, $A_2 \subset A_1$
\vskip.5cm
{\bf Definition 3.2 }
\vskip.5cm
{\it A cycle $A$ such that $\forall z \in U(A) $ one has that for all $x$'s
communicating with $z$ for which:
$$ H(x) < H(z)
$$
belong to $A$ is called {\it stable} or {\it attractive} cycle}.
\vskip.5cm
{\bf Definition 3.3 }
\vskip.5cm
{\it A cycle $A$ for which there exists $y^* \in U(A)$ {\it downhill connected}
to some
point $x$ in $A^c$ ( namely $\exists \;x \not \in A, $ communicating with $y^*$,
with $H(x) < H(y^*) \equiv H(U(A))$ ), is called {\it transient}; points like
$y^*$ are called {\it ( minimal ) saddles}.}
\par \vskip.5cm
{\bf Definition 3.4 }
\vskip.5cm
{\it A transient cycle $A$ such that $ \exists \; \bar x \not \in A $ with
$H(\bar x) < H(F(A))$, there exists $y^* \in {\cal S}(A)$ and a path
$ \o : y^* \to \bar x$ {\it below } $y^*$ ( namely $\forall x \in \o :
H(x) < H(y^*)$ ), is called {\it metastable}.}
\vskip.5cm
{\bf Remark }
\vskip.5cm
It can happen that there exist several points $z \in U(A)$ communicating
with each other and with the same energy: as far as asymptotic estimates,
exponential in $\b$, are concerned we can consider them as a unique point.
\vskip.5cm
{\bf Definition 3.5}
{\it For each couple of states $x,y\in S$ we define their minimal saddle
${\cal S}(x,y)$ as the set of states corresponding to the
solution of the following minimax problem:
$$
\min_{\o : x \to y} \max _{ z\in \o} H(z) \; =\; H({\cal S}(x,y))
$$
where $\o :x\to y$ is a generic path joining $x$ to $y$.
For any $z,z'\in {\cal S}(x,y)$ we have $H(z)=H(z')$
and we set $H({\cal S}(x,y))\equiv H(z) \quad \forall z\in {\cal S}(x,y)$.}
\bigskip
>From our assumptions on the chain it immediately follows that ${\cal S}(x,y)=
{\cal S}(y,x)\; \forall x,y\in S$.\bigskip
{\bf Remark }\par
We will call $trivial$ the saddles corresponding to a trivial minimax
solution, i.e. if ${\cal S}(x,y)\ni x$ or $y$.
We want to note that saddles between stable states,
not equivalent with respect
to the relation $\sim$ introduced in (2.5),
are not trivial, since, by the definition of stable states,
the energy of the saddle
must be strictly larger than the energy of both the
stable states considered. The saddles between stable states will be called
{\it natural saddles} .
\bigskip
{\bf Proposition 3.3}\par
{\it If $A$ is a cycle then:\par
\item{i)} for each $x,y,z\in A$ and $w\not\in A$:
$$H({\cal S}(x,y)) H(y_i)$. By propositions 3.1 and 3.5 the maximal
connected components $A_1, \dots, A_k \subset A$ of states in $A$ with energy strictly
less than $H(y_i)$ are cycles contained in $A$ with $ H(U(A_i)) \; = \; H(y_i)$.\par
Since, by proposition 3.3 ${\cal S}(x_i,x_j)$ does not depend on $x_i$ and $x_j$,
then $\forall x_i \in A_i, \; x_j \in A_j\; :\; {\cal S}(x_i,x_j) \; = \;
{\cal S}(F(A_i),F(A_j ) )\subset \{ y_1, \dots y_m\}$.\par
The only remaining, not completely immediate point is iv): it easily follows from the fact
that
$$
\forall \; x \; \in \; A\cap M, \;\;\; H( {\cal S}(x,F(A ) ) \;
\leq \; H(y_i)
$$
since $\{ y_1, \dots , y_m \} $ are the internal natural saddles with maximal
energy.\par
\vskip.5cm
The results contained in the following Proposition 3.7 are less immediate than the
previous ones. As a matter of fact they are already known even in
a more general situation ( see
[FW] for a proof based on the F-W graphical technique). We provide here, in the
reversible case, a new, purely probabilistic, proof which, in our opinion, is much
more transparent since it is based on a simple intuitive argument in which the {\it
resistance times} play an important role. As we already remarked in the introduction
and it will appear clear in the following section this is a crucial mechanism that,
apparently, escaped to previous researches on the subject.\par
\vskip.5cm
{\bf Proposition 3.7}
\vskip.5cm
{\it Suppose hypotheses H1 and H2 are satisfied. Given a cycle $A$ \bigskip
\noindent
i) for all $ \epsilon >0 $ there exist $\b_0>0$ and
$k>0$ such that for any $\b>\b_0$ and $\forall x \in A$
$$
P_{x} (\;\t_{\partial A} \; <\; \exp (\b \;[\;H({\cal S}(A)) -
H(F(A))\; + \e\;])\;) \; \ge \; 1 - e^{-k\b}
$$
ii) there exist $\d >0 ,\, \b_0>0$ and $k'>0$ such that
for all $\b>\b_0$ and $\forall x,x' \;\in A$ :
$$
P_x( \t_{x'} \; <\; \t _{\partial A} \; ; \; \t_{x'} \;<\;
\exp (\b [ H({\cal S}(A)) - H(F(A))- \d] ) \;\ge\; 1 - e^{-k'\b}
$$
iii) $\forall x \; \in A ,\;\forall \epsilon >0 \;\; \hat y \;\in \; \partial A $
and $\b$ sufficiently large:
$$
P_{x} (\;X_{\t _{\partial A}} = \hat y ) \geq \exp (-\e \b )
\exp (-\b [H (\hat y ) - H( {\cal S}(A))]\;) $$}
\vskip.5cm
{\it Proof}
\vskip.5cm
The proof uses induction on the total number of the internal natural saddles $|C(A)|$.
We first assume that proprties i) ,ii), iii), are verified for
all cycles $A$ with $|C(A)|$ less or equal to
an integer $n \geq 0$ and we prove them for all the cycles $A$ with
a number of internal natural saddles $|C(A)|$ less or equal to $n+1$; then we prove
these properties for the case $n = 0$, the basis of the induction, which
corresponds to $A$ being the strict domain of attraction of a "plateau" $F(A)$ of
communicating, equal energy, points (in particular $F(A)$ can reduce to a single
local minimum $x$).\par
Consider a generic cycle $A$ with a number of internal {\it maximal } saddles
$N(A) = m$. We can use the decomposition given by Proposition 3.6, namely :
$$
A \; =\; \{ y_1,\dots, y_{ m}\} \cup \tilde V \cup \tilde A
$$
where $\tilde A$, defined in \equ (3.13), is a union of cycles $A_j$ which,
beyond satisfying the properties specified in Proposition 3.6, have, $ \forall j
=1,\dots , m $, a number of
internal saddles $|C(A_j)|$ less or equal to $n$ and then satisfy the
recursive hypotheses i), ii), iii). \par
Let us start proving i) for our cycle $A$.
Given any sufficiently small $\e >0$ let
$$
T_1 = T_1 ( \e) \; \equiv \exp (\b [ H(y_j) - H(F(A)) + \e/2] ) \Eq(3.14)
$$
and
$$
T_2 = T_2 ( \e) \; \equiv \exp (\b [ H({\cal S}(A)) - H(F(A)) + \e] ). \Eq(3.15)
$$
Then the argument is the following one :
we will construct, for every state $x \;\in \;A$, an event $E_{x,T_1}$
starting at $x$
at time $t=0$, taking place in the interval of time $[0,T_1]$
and satisfying the
following conditions: \par
1) if $E_{x,T_1}$ takes place, our Markov chain $X_t$ hits $\partial A$ before
$T_1$ and \par
2)
$$
\inf_{x\in A} P(E_{x,T_1}) \; \geq \; \a _{T_1} \; > \; 0 \; \; \;\hbox
{with} \; \; \
\lim _{\b \to \infty} ( 1- \a _{T_1})^{ T_2/T_1} = 0
\Eq(3.16)
$$
In particular we will take
$$
\a _{T_1} = \exp (-\b [H({\cal S}(A)) - H(y_j) + \e /4] ) \Eq(3.17)
$$
Let us now divide the interval
$[0\;T_2] $ into $q = [T_2 /T_1]$ ( here $[\;]$ means
integer part) intervals of length $T_1$ ; by properties 1) and 2) above of
$E_{x,T_1}$, we easily get the proof of our
Proposition 3.7.
For, if $\t _{\partial A} > T_2 $, necessarily, by property 1), in
none of the
$q$ intervals of length $T_1$ the (translated of ) event $E_{x,T_1}$ can
have taken place;
by 2) and the strong Markov property , part i) of our Proposition
directly follows.
Then we are reduced to the construction of such an event $E_{x,T_1}$.\par
Let us first give a rapid description, in words, of $E_{x,T_1}$.\par
Let $y^*$
be a state in $U(A)$, by definition there exists a downhill trajectory
from $y^*$ to the set $\tilde A$:
$$\bar x_0=y^*, \bar x_1, \bar x_2,....,\bar x_k\in \tilde A;\qquad
\bar x_1,....,\bar
x_{k-1}\in V$$
with $H(\bar x_{i+1})\le H(\bar x_i)$.
We put $k=1$ if $y^*$ is communicating with $\tilde A$.
Let $A_{j^*}$ be the
particular component of $\tilde A$ hitted by this trajectory
(i.e. $\bar x_k\in A_{j^*}$).
The event $E_{x,T_1}$ is then defined by requiring that the
process
hits the set $A_{j^*}$ in a time much shorter than $T_1$, and then follows
the trajectory obtained by $\bar x_0,....,\bar x_k$ by time reversal.\par
More precisely let $\epsilon'<\epsilon/2$
and let
$$\t_{\partial A_{j^*}}^{(>\t_{A_{j^*}})}\equiv
\min\{t>\t_{A_{j^*}};\, X_t\not\in
A_{j^*}\}$$
where, as before, $\t_{A_{j^*}}$ is the first hitting time to the set $A_{j^*}$.
Then:
$$E_{x,T_1}
\equiv
\{\t_{A_{j^*}}
\t_{A_{j^*}})}
< T_1 e^{-\epsilon'\b}\}
\cap \{X_{\t_{\partial A_{j^*}}^{(>\t_{A_{j^*}})}}=\bar x_{k-1}\}\cap$$
$$\{\cap X_{\t_{\partial A_{j^*}}^{(>\t_{A_{j^*}})}+s}=
\bar x_{k-1-s}\qquad \forall s=1,...,k-1\}$$
By using the strong Markov property we have:
$$P(E_{x,T_1})= P_x(\t_{A_{j^*}}t_k;\quad \o_t\not\in A_{j_k}\}$$
where $j_k=j_k(\o)$ is such that $\o_{t_k}\in A_{j_k}$ and $k=1,...,l(\o)$
so that $j_{l(\o)}=j^*$.\par
Let
$$\bar A\equiv \tilde A \cup \{x\in A:\,H(x)=H(y_i)\}$$
>From Proposition 3.6 it is easy to see that there exists a set of paths
$\bar\O$ going from $x$ to $A_{j^*}$
with the following characteristics:\par
Given any $\bar\o\in\bar \O$, if $\bar\o_1=x$ with $x\not\in \bar A$, $\bar\o$
first enters $\bar A$, following a downhill sequence, and then it does not
leave $\bar A$ anymore. Subsequently $\bar\o$ follows a well specified sequence
of cycles $A_1, ...., A_{j^*}$ and saddles $y_1,....,y_l$,
spending a certain time
in each $A_j$ (typically of order of $\exp \{\b[H(y_j)-H(F(A_j))\}$), exiting
from $A_j$ through the saddles $y_j\in \partial A_j$. Moreover
the path $\bar\o$ does
not visit more than once the same saddle $y_j$ and
it is downhill for each $t\in (s_k,t_k]$ for each
$k=1,...,l(\bar\o)$, $l(\bar \o)\le m$ and $H(\bar\o_{s_k})=H({\cal S}(A_{j_k})
)=H(y_i)$.
The existence of such an $\bar\O$ suggests the way to estimate the first factor
in the r.h.s. of \equ(3.agg). In order to simplify the notation we set:
$$A_{j_1(\bar\o)}, ...., A_{j_{l(\bar\o)}(\bar\o)}\equiv
A_1, ..., A_{j^*}$$
$$s_1(\bar\o),...., s_{l(\bar\o)}(\bar\o)\equiv s_1,....,s_l$$
We have:
$$P_x(\t_{A_{j^*}} e^{-\a''\b}$$
with $\a''\to 0$ as $\b\to\infty$, we have:
$$P_x(\t_{A_{j^*}} 0 $
and $\b $
large, with probability almost one, before leaving $A'$, and in a time
typically much
shorter that $T_1(\e)/3$ we touch $x'$. This concludes the proof of \equ
(3.25).\par
By using reversibility, we get the full condition ii) since with
probability
tending to one as $\b \to \infty$ for every $x \in A,\;\e >0$ one has:
$$
\t_{\partial A } >\; \exp (\b [H({\cal S}(A))-H(F(A)) -\e) \Eq(3.26a)
$$
and choosing $\e$ sufficiently small :
$$
\exp (\b [ H({\cal S}(A)) - H(F(A))- \d] )
\;<\;\exp (\b [H({\cal S}(A))-H(F(A)) -\e)
$$
\bigskip
Let us now prove point iii). Given $y\in F(A)$ we can estimate from below the
probability
$P_{x} (\;X_{\t _{\partial A}} = \hat y )$ by imposing to the process to visit
the state $y$ before the exit time $\t_{\partial A}$ as follows:
$$P_{x} (\;X_{\t _{\partial A}} = \hat y )\ge
\sum_{z\in A}\sum_{s=0}^{\infty} P_x(X_{s'}\in A\quad\forall s'\le s,\, X_s=z)
P_z(\t_y<\t_{\partial A}).$$
$$.\sum_{t=1}^{\infty}\sum_{\bar x_1,...\bar x_{t-1}\in
A\backslash y}P_y(X_1=\bar x_1,.....,X_{t-1}=\bar x_{t-1},\, X_t=\hat y)\Eq(3.26b)$$
By using reversibility
and the already proved point ii), valid now for the whole cycle $A$,
the last term in the
r.h.s. of \equ(3.26b) can be estimated as follows:
$$\sum_{t=1}^{\infty}\sum_{\bar x_1,...\bar x_{t-1}\in
A\backslash y}P_y(X_1=\bar x_1,.....,X_{t-1}=\bar x_{t-1},\, X_t=\hat y)=$$
$$e^{-\b[H(\hat y)-H(y)]}.
\sum_{t=1}^{\infty}\sum_{\bar x_1,...\bar x_{t-1}\in
A\backslash y}P_{\hat y}(X_1=\bar x_{t-1},.....,X_{t-1}=\bar x_{1},\, X_t=y)=$$
$$= e^{-\b[H(\hat y)-H(y)]}.[P(\hat y,y)+$$
$$+
\sum_{t=2}^{\infty}\sum_{\bar x_{t-1}\in
A\backslash y}P(\hat y,\bar x_{t-1})
\sum_{\bar x_1,...\bar x_{t-2}\in
A\backslash y}P_{\bar x_{t-1}}(X_1=\bar x_{t-2},.....,X_{t-1}=y)]\ge$$
$$\ge e^{-\b[H(\hat y)-H(y)+\epsilon ']}\min_{z\in A\backslash y}
P_{z}(\t_y<\t_{\partial A})\ge
e^{-\b[H(\hat y)-H(y)+\epsilon]}.$$
Putting this estimate in \equ(3.26b)
and using again point ii) to estimate from below the quantity
$P_z(\t_y<\t_{\partial A})$
we obtain, for $\b$ large enough,
$$P_{x} (\;X_{\t _{\partial A}}=\hat y ) \ge e^{-\b[H(\hat y)-H(y)+\epsilon']}
\sum_{s=0}^{N} P_x(\t_{\partial A}\ge s)\ge
$$
$$\ge e^{-\b[H(\hat y)-H(y)+\epsilon']}.N.P_x(\t_{\partial A}\ge N)$$
It is easy to prove that there exists $\z$ going to zero as $\b\to\infty$
such that if we chose $N=e^{\b[H({\cal
S}(A))-H(F(A))-\z]}$ we have $P_x(\t_{\partial A}\ge N)>1/2$.
\par
This conclude the proof of iii).
\bigskip
To conclude the proof of our proposition we have to show that properties i), ii),
iii) are true for $A$ such that the number $N(A)$ of internal saddles is zero,
namely when $A$ is part of ( or coincides with ) the strict basin of attraction of
$F(A)$ , $F(A)$ being a plateau in the previously specified sense.\par
Let we are given such an $A$. Property i) easily follows by the same argument used
before:
we costruct, for any $x \in A , \e >0 $ an event $E_{x,T}$ with $T = T(\e)=
\exp (\b \e /2)$ which consists in descending from $x$ to
$F(A)$ in a time at most $T/2$ following a downhill path $\o $ from $x$ to $F(A)$;
then in following an uphill path $\o' $ from $F(A)$ up to $U(A)$
in a time shorter
that $T/2$. This path $\o' $ is the time-reversed of a path going downhill from
$U(A)$ to $F(A)$. The paths $\o $ and $\o' $ certainly exist as $A$ is
the strict basin of
attraction of $F(A)$.\par
With $T_2 $ given by \equ (3.15) we easily verify, in the present case,
\equ (3.16) and \equ (3.17) with $T_1 \equiv T(\e)$ since : 1) for every
$\e >0$ the
descent to $F(A)$, along a downhill path, takes place
in a suitable finite time, much smaller than $T(\e)$, with a
probability approaching one as $\b \to \infty$ and : 2) the ascent from
$F(A)$ to
$U(A)$, along an uphill path, in a suitable finite time , much smaller
than $T(\e) \;
\forall \e >0$ and $\b$ sufficiently large takes place with a probability
larger than
$\exp (\b [H(U(A))-H(F(A)) +\e/4])$ . Then property i) easily follow.\par
Combining the
previously discussed methods, that we used before to prove ii) and iii)
starting from
the inductive hypothesis, with the idea leading to
the construction of the above
event $E_{x,T}$ we easily get,
in our present case of $A \;=\bar B(F(A))$, ii) and iii)
(we leave the details to the reader).\par
This concludes the proof of Proposition 3.7.\par
\bigskip
We will analyze now, in more detail, the first exit from a cycle $A$.
In particular, following the
ideas developed in the framework of the so called "pathwise approach to
metastability",
(see [CGOV]), we will prove asymptotic exponentiality of the properly
renormalized first
exit time from any cycle in the limit $\b \to \infty$ . Then we
will deduce the asymptotic behaviour of the expectation
of this exit time; notice
that the methods
developed in the previous sections naturally lead only to
estimates in probability
of the typical exit times but, as we will see, we can even
get a good control on the tails of the distribution of these random
variables and
this will be important to get the asymptotics of the averages.
\par
Suppose given, once for ever, a cycle $A$.
Given a point $x \in F(A)$ let the time $T_{\b}= T_{\b}(x)$ be defined by
$$
P_x ( \t _{ \partial A} > T_{\b}) \; =\; e^{-1}. \Eq(3.27)
$$
The above definition is interesting since, as it easily follows from
Proposition 3.7,
the asymptotic distribution,
( in the
sense of the most probable behaviour), of
the first exit time from $A$ , does not depend on $x \;\in \;A$
in the sense of logarithmic
equivalence;
thus, as we will see, also $T_{\b}$ will not depend on $x \;\in \;A$,
always in the sense of
logarithmic equivalence.\par
Proposition 3.7, i) and the usual estimate, (see Lemma 1 in [KO1] ) based on
reversibility, give, $\forall \; \e >0, \; \forall \; x \; \in \; A$,
$$
\lim _{\b \to \infty} P_{x} (\exp (\b \;[H({\cal S}(A)) -
H(F(A))- \e]) < \t_{\partial A} < \exp (\b [H({\cal S}(A)) -
H(F(A)) + \e])
$$
$$
= 1 \Eq(3.28)
$$
>From \equ (3.27), \equ (3.28) it easily follows that,
$\forall \; \e >0, \; \forall \; x \; \in \; A$,
$$
\exp (\b \;[H({\cal S}(A)) -
H(F(A))- \e]) < T_{\b} < \exp (\b [H({\cal S}(A)) -
H(F(A)) + \e]
$$
\vskip.5cm
{\bf Proposition 3.8}
{\it \vskip.5cm
$\forall \; x \;\in \;A, \;\forall \; s\;\in \;{\bf R}^+:$
$$
\lim _{\b \to \infty }
P_x ( {\t _{ \partial A} \;\over T_{\b}} > \;s\;) =\; e^{-s}. \Eq(3.29)
$$}
\vskip.5cm
{\it Proof.}
\vskip.5cm
Given $ s,\; t\; \in \;{\bf R}^+$ we write:
$$
P_x ( \t _{ \partial A} \;> \;(t+s)\; T_{\b})=
\sum _{\bar x \in A} P_x ( \t _{ \partial A} \;> \;(t+s)\; T_{\b}\; ; \;X_t
= \bar x)
$$
$$ =
\sum _{\bar x \in A} P_x ( \t _{ \partial A} \;> \;t \; T_{\b}\;; \;X_t
= \bar x)\;\;
P_{\bar x} ( \t _{ \partial A} \;> \;s\; T_{\b})
\Eq(3.30)
$$
We can write, $\forall T < s T_{\b}$ :
$$
P_{\bar x} ( \t _{ \partial A} \;> \;s\; T_{\b}\;) =
P_{\bar x} ( \t _{ \partial A} \;> \;s\; T_{\b}\;; \t_x < T)
+ P_{\bar x} ( \t _{ \partial A} \;> \;s\; T_{\b}\;; \t_x \geq T).
\Eq(3.31)
$$
We have, from Proposition 3.7 ii),
that there exists a $\d >0$ such that for the time
$\bar T_1$, defined as
$$\bar T_1 =
\exp (\b [H({\cal S}(A)) -
H(F(A))- \d] \Eq(3.32)
$$
moreover,
denoting by $\hbox{o}(\b)$ an infinitesimal quantity as $\b \to \infty$,
$$ P_{ x} ( \t _{ \partial A} \;> \;s\; T_{\b}) \;
- \hbox {o}(\b)
\leq P_{\bar x} ( \t _{ \partial A} \;> \;s\; T_{\b}\;; \t_x < \bar T_1)\; \leq
$$
$$
\leq P_{ x} ( \t _{ \partial A} \;> \;s\; (T_{\b}- \bar T_1/s)) \Eq(3.33).
$$
since, for $\b$ sufficiently large, $\bar T_1 < s T_{\b}$.
On the other hand, from \equ (3.31) and again from Proposition 3.7 ii), we get:
$$
P_{\bar x} ( \t _{ \partial A} \;> \;s\; T_{\b}\;) - \hbox {o}(\b) \leq
P_{\bar x} ( \t _{ \partial A} \;> \;s\; T_{\b}\;; \t_x < T) \leq
P_{\bar x} ( \t _{ \partial A} \;> \;s\; T_{\b}\;) \Eq(3.34).
$$
>From \equ (3.27), \equ (3.30), \equ (3.33), \equ (3.34), we get the result; for, \equ
(3.30) together with
\equ (3.33), \equ (3.34),
since $\lim _ {\b \to \infty}\bar T_1 /T _{\b} = 0$,
imply the convergence of
$P_x ( {\t _{ \partial A} \;\over T_{\b}} > \;s\;)$
to a (possibly degenerate) exponential; \equ (3.27) fixes the average
( $\equiv 1$) of the limiting exponential.\par
\vskip.5cm
{\bf Proposition 3.9 }
\vskip.5cm
{\it For every $x \;\in \; A$ and $\e>0$, if ${\bf E} _x$ denotes average over the
trajectories of the process starting, at $t=0$, from $x$, we have:
$$
\exp (\b \;[H({\cal S}(A)) -
H(F(A))- \e]) < {\bf E} _x (\t _{ \partial A}) < \exp (\b [H({\cal S}(A)) -
H(F(A)) + \e] \Eq(3.35)
$$}
\vskip.5cm
{\it Proof.}
\vskip.5cm
For every integer valued random variable $\x$ we have:
$$
{\bf E}( \x) \; =\; \sum _{m=1} ^{\infty} {\bf P}( \x \geq m) \Eq(3.36)
$$
Now, following the argument of proof of Proposition 3.7 i), based on the introduction
of the set $E_{x, T_1}$, using estimate \equ (3.15) with $ n T_2 $ in place of $T_2=
T_2 (\e)$ and the strong Markov property, it is easy to get the following estimate :
$$
P_x ( \t _{ \partial A} \;> \;n T_2) \; < \; \exp (-n c) \Eq(3.37)
$$
valid for every $x \; \in \; A $, $\e>0$, for a suitable positive constant $c$
independent of $\b$ ; we
recall that
$$
T_2 = T_2 ( \e) \; \equiv \exp (\b [ H({\cal S}(A)) - H(F(A)) + \e] )
$$
Applying formula \equ (3.36) we get:
$$
{\bf E^{(\b)}}_x ( \t _{ \partial A}) \; =
\; \sum _{m=1} ^{\infty} P^{(\b)}_x ( \t _{ \partial A} \geq m) \Eq(3.38)
$$
where we have put in evidence, with a superscript, the dependence on $\b$ of the
distribution of our process.\par
Now the result of Proposition 3.9 follows,
via \equ (3.37), from standard arguments ( see, for instance,
[CGOV]).
\par
\vskip.5cm
\numsec=4\numfor=1
{ \bf Section 4. The exit tube.}
\vskip.5cm
In this section we will analyze the typical trajectories of the first excursion
outside a cycle $A$. \par
We will start analyzing the first descent from any point $y_0$ in $A$ to $F(A)$.
For every $y_0\;\in A$ we will define a tube of trajectories ${\cal T} _ {y_0} $
(it will be called " standard");
then we will
prove that with high probability, during the first excursion from $y_0$ to $F(A)$, we
will follow this tube. As we will see this tube will be , in general, very widely
defined.
This means that both possible multiple bifurcations and resistance times will be
considered in its definition.
\par
Roughly speaking ${\cal T} _ {y_0} $ will be characterized by a set of sequences
(cascades) of minimaxes $y_1, \dots , y_n$ towards $ F(A)$,
decreasing in energy, intercalated by sequences of
downhill paths $\o_1, \dots , \o _n$ and sets
$ Q_1, \dots , Q_n$ which are a sort of generalized cycles.\par
Given any point $y_0$ in $A$ and a downhill path $\o _1 $ starting from $y_0$,
we will
define a set $Q_1\;=\; Q_1(y_0, \o_1)$. This set $Q_1$ is a union of cycles having
common minimal saddles of the same height. It will have the meaning of the first set
where our process, during its first excursion to $F(A)$, is captured if it follows the
path $\o_1$; after entering into $Q_1$ it will spend some time inside it before
leaving it to enter, after another downhill path $\o_2$ , into another similar set
$Q_2$ and so on untill it enters a cycle containing part of $F(A)$.\par
Finally we will analyze the problem of the typical tube of trajectories during the
first excursion outside a cycle $A$. As already observed by R. Schonmann for the
case of stochastic Ising models ( see [S 2] ), it will turn out, using
reversibility, that this tube is simply related, via a time reversal transformation,
to the typical tube followed by the process during the first descent to the bottom
$F(A)$ of $A$.\par
\vskip.5cm
{\bf The construction of $Q_1$.}\par
\vskip.5cm
Given $y_0$ let us consider a downhill path $\o _1 $ starting from $y_0$.
We stress, once more, that this path is not in general unique. This means that
the whole construction we are defining must be repeated for each path.\par
Let $ x_1$ be
the first stable state in $ \o_1$ (see fig.1 as an example).
If such a point $ x_1$ is in $F(A)$ then
$y_0$ belongs to the wide basin of attraction of a connected component $G
=G(A,y_0,\o_1)$ of $F(A)$, i.e. $ y_0 \;\in \bar B( G) $.\par
In this degenerate case we set $Q_1 \equiv
G(A,y_0,\o_1)$ and the {\it cascade of saddles}
$y_0, y_1,$ $ \dots , y_n$ reduces to $y_0$. \par
Let us now suppose that
$ x_1\not\in F(A)$. Let $H_1$ be the energy of the saddle (not necessarily
unique) between $x_1$ and $F(A)$:
$$H({\cal S}(x_1,F(A)))=H_1$$
We denote by $A^{(1)}$ the cycle containing $x_1$ with energy less than $H_1$.
By definition of $H_1$ we have that $A^{(1)}\cap F(A)=\emptyset$. We define
$${\cal S}^{(1)}\equiv {\cal S} (A^{(1)})$$
and
$$\tilde{\cal S}^{(1)}\equiv \cap_{z\in {\cal S}^{(1)}}\bar P(z)$$
Let us now consider the cycles:
$ A^{(2)}_1, \dots,A^{(2)}_{k_2}$
with energy less than $H_1$
not coinciding with
$A^{(1)}$, with which $\tilde {\cal S}^{(1)} $
is downhill
communicating and such that
$ A^{(2)}_j \cap F(A) \;=\; \emptyset
\; \forall \; j=1,\dots ,k_2$. If there are no such cycles we
define $N=1$ otherwise we continue, by iteration, the construction
as follows.
We call $ A^{(2)}$ the union of all the cycles $A^{(2)}_j$'s
(which do not contain
points in $F(A)$),
$$A^{(2)} = \cup_j A^{(2)}_j$$
and $\tilde {\cal S}^{(2)}$ the union of all the plateaux
containing minimal saddles of the $ A^{(2)}_j$'s
which are not contained in $\tilde {\cal S}^{(1)}$. Now consider,
similarly to before, the cycles
$ A^{(3)}_1, \dots,A^{(3)}_{k_3}$
with energy less than $H_1$, not coinciding with any of the previous
$A^{(2)}_j$'s,
with which $\tilde {\cal S}^{(2)}$ is downhill communicating and such that
$ A^{(3)}_j \cap F(A) \;=\; \emptyset
\; \forall \; j=1,\dots , k_3$.
If there are no such cycles we
define $N=2$ otherwise we iterate the construction.
\par
This procedure stops at a
given finite index $ N = N(x_1 , A)$.\par
It easy to convince oneself that
$$ \forall \;j=2, \dots , N : A^{(j)} \cap A^{(l)} = \emptyset \; \forall \;l=1, \dots
, j-1$$
and
$$ \forall \; j=2, \dots , N-1 : {\cal S}^{(j)} \cap {\cal S}^{(l)} =
\emptyset \; \forall \;l=1,
\dots , j-1$$
We set
$$ Q_1 = Q_1 ( y_0, \o_1) \equiv ( \cup _{j =1}^N A^{(j)}) \cup (\cup _{j =1}^{N}
\cup _{z \in U(A^{(j)}) } \bar P(z) \;)$$.\par
It is easily seen that $ Q_1$ is the maximal connected set, containing
$ x_1$, of
points $x$ such that
$$
H({\cal S} ( x, F(A)) ) = H_1
$$
The boundary of $ Q_1$ is given by
$$\partial Q_1 = \partial^u Q_1 \cup \partial^d Q_1\;\;\;\;
\partial^u Q_1 \cap \partial^d Q_1 = \emptyset
$$
where $\partial^u Q_1$ is
made of points $z$ with energy larger than
$H_1$ and so, trivially, with
$$
H({\cal S} (z, F(A)) ) > H_1
$$
whereas $\partial^d Q_1$ is made of points $z$ with energy smaller than $ H_1$ ,
belonging to some cycle containing points of $F(A)$ and then such
that
$$
H({\cal S} (x, F(A)) ) < H_1 .
$$
Let us call $\bar {\cal S}_1 $ the subset of $ \tilde {\cal S}^{(1)} \cup \dots \cup
\tilde {\cal S}^{(N-1)}$ which is downhill communicating with
$\partial^d Q_1$.
Chose a point $y_1$ in $\bar {\cal S}_1 $ and a downhill path $\o_2$
starting from $y_1$ not belonging to $Q_1$.
Start again from $y_1$, $\o_2$ a hierarchical construction totally analogous to
the previous one. Denote by $x_2$ the first stable state in $\o_2$ and by $H_2$
the energy of the saddle between $x_2$ and $F(A)$. By definition $H_20 \;\; \hbox {such that}\;\;
\lim _{ \b \to \infty}
P_{y_0} (\t _{ F(A)} <
\exp ( \b [ H(y_1) - H(F(A)) - \d] )=1
$$
ii)\par
$$
\lim _{ \b \to \infty}
P_{y_0} (x_t \; \in \; {\cal T} _ {y_0} \; \forall \; t \leq \t _{ F(A)} )\;
=\; 1
$$
\indent moreover :
\par \noindent
iii)\par
$$
\lim _{ \b \to \infty}
P_{y_0} (\forall \; t \leq \t _{ F(A)} \;:\;
x_t \; \in \;{\cal T} ( y_0, \o_1, y_1,\o_2, \dots , y_{N-1}, \o_M)
$$
$$
\hbox { for some}\; y_0, \o_1, y_1,\o_2, \dots , y_{N-1}, \o_M ) \; =\; 1,
$$
\indent more precisely :\par \noindent
iv)\par
with a probability tending to one as $\b \to \infty$, there exists
a sequence $y_0, \o_1, y_1,\o_2,$ $ \dots , y_{N-1}, \o_M $ such that
our process
starting at $t=0$ from $y_0$, between
$t=0$ and $t= \t _{ F(A)}$,
after having followed the initial downhill path
$\o_1$, visits, sequentially, the sets $Q_1, Q_2, Q_{M-1}$
exiting from $Q_j$ through $y_j$ and then following the path $\o_{j+1}$ before
entering $Q_{j+1}$.\par
For every $\e >0$
with a probability tending to one as $\b \to \infty$ the process
spends inside each $Q_j$ a time less than
$ \exp ( \b [ H(y_j) - H(F(Q_j)) + \e] )$\par
Finally: before exiting from $Q_j$ it can perform an arbitrary sequence
of passages through the cycles $A^{(j)}$ belonging to $Q_j$ . Each passage is
made through a minimal saddle $z_j$ in the boundary of $A^{(j)}$ ;
for every $\e >0$
with a probability tending to one as $\b \to \infty$, once the system
enters into a particular $A^{(j)}$, it spends there, a time $T$ :
$ \exp ( \b [ H(z_j) - H(F(A^{(j)})) - \e] ) \; < \; T \;< \;
\exp ( \b [ H(z_j) - H(F(A^{(j)})) + \e] )
$}
\vskip.5cm
{\it Proof }
\vskip.5cm
By
construction, using
Proposition 3.7 applied either directly to our original cycle $A$ if $y_0 \in
\bar B(F(A))$ or to
the cycles in $Q_1$
otherwise. The rest of the theorem also easily follows from
Proposition 3.7 applied to the cycles contained in the $Q_j$'s.
We leave the details to
the reader. \par
\vskip.5cm
Now, given any cycle $A$, we want to describe, in
the maximal possible detail, the
first excursion from $F(A)$ to $\partial A $.\par
Following Schonmann ([Sch2]), we first give some simple general definitions.\par
Given $x, y \;\in \;S$, we denote by $ \O ^* (x,y)$ the set of all paths $\o$
starting in $x$,
visiting $y$ at some finite time $t$ and never visiting $ x$
and $y$ in between :
$$
\O ^* (x,y) \equiv \{ \o = x_1, \dots , x_t
\hbox{ for some } t \; : \; x_1 = x, \; x_t = y,\;;\;
x_2, \dots , x_{t-1} \ne x,y\} \Eq(4.4)
$$
We denote by $R$ the time reversal operator defined on finite paths:
$$
\forall\; \o \equiv ( x_1, \dots ,
x_t )\; :\; R \o \equiv \bar \o \equiv
( x_t, \dots , x_1) \Eq(4.5)
$$
We naturally define, for every set of paths $\D $,
$$
R\D = \{ \bar \o = R \o \; ; \; \o \; \in \; \D\} .
$$
Let us call $\bar \t _x$ the last time our process visits the state $x$
before touching, for the first time, $y$, namely :
$$
\bar \t _x \; \equiv \; \max \{ t < \t_y : X_t = x\} \Eq(4.6)
$$
Given a finite path $\tilde\o = \tilde x_1, \dots , \tilde x_t \;$, we say that
our process $\{X_t\}_{t>0}$
{\it starts as} $\tilde\o$ if $X_1 = \tilde x_1, \dots ,
X_t = \tilde x_t $.\par
For any $x,y\in S$ we define a measure on the (infinite) paths
$\o = x_1, \dots , x_t, ... \;$ starting at $x$ ($x_1\equiv x$),
as follows:\par
if $\o\not\in \O^*(x,y)$ then $\r (\o)=0$\par
if $\o\in \O^*(x,y)$ we set
$\tilde\o_i=\o_i$ for all $i<\inf\{t>0;\, \o_t=y\}$, that is $\tilde\o$
is the finite path given by the first segment of $\o$ before hitting
$y$. Then
$$\r (\o) = P_{x}
( \; \{X_t\}_{t\ge 0} \; \hbox {starts as } \tilde\o \; |
\o \in \O ^* (x,y ))$$
and by $\bar \r (\o)$ the measure on the paths
$\o = x_1, \dots , x_t, ... \;$ given by :
$$\bar \r (\o) = P_{y}
( \; \{X_t\}_{t\ge 0} \; \hbox {starts as } R\tilde\o \; |
\o \in\; \O ^* (y,x)\;)
\;\;\;, \hbox {if}\;\;
\o \; \in \; \O ^* (y,x)
$$
$$
\bar \r (\o)= 0\;\;\;\hbox {otherwise}
$$
In [Sch2] it is proven that,
for every $x,y \; \in \;S $, every $\L \; \in \; \O ^* (x,y)$,
$$
P_x ( X_t \; \in \; \L \; \forall t
\; \in [\bar \t _x, \t_y] ) = \r (\L) = \bar \r (R\L)=
P_y ( X_t \; \in \; R\L \; \forall t
\; \in [\bar\t _y, \t_x])\Eq(4.7)
$$
Let us now denote by $\partial ^- F(A)$ the set of all $\hat x \;\in \; F(A)$, uphill
communicating with $A\setminus F(A)$.
Given a point $\hat x $ in
$\partial ^- F(A)$, consider the set $ V( \hat x )$ of all the
points $x\;\in \;A$ uphill communicating with some point $\hat y \; \in \; U(A)$ and
such that there exists a standard cascade
${\cal T} ( y_0 = \hat y, \o_1, y_1,\o_2, \dots , y_{N-1}, \o_M)\;$
starting from $\hat y$ and ending in $\hat x$: $\hat x$ will belong to some component
$G^*$ of $F(A)$ ; $G^* \equiv Q_M $ and $\o_M$ will end entering into
$Q_M $ at $\hat x$.\par
Now we are able to state our main result about the typical trajectories
realizing the escape from a cycle $A$.\par
\vskip.5cm
{\bf Theorem 2}\par
\vskip.5cm
{\it Let
$$
\bar \t _{F(A)}= \max \{ t < \t_{S\setminus A} : X_t\; \in F(A).\}
$$
Call $\bar \partial ^- A $ the set of all the points $x\; \in A$ uphill connected to
$U(A)$ and
$$
{\cal T}= \cup _{y_0\;\in\;\bar \partial ^- A }{\cal T} _ {y_0}
\Eq(4.8)$$
the set of all possible standard tubes starting from points in
$\bar \partial ^- A $ and ending in $F(A)$.
Then:\par\noindent
i)\par
$$
P_{F(A)} ( X_{\t_{S\setminus A}} \;\in U(A) \; ;
\; X_t \; \in \; R{\cal T}
\; \forall \; t
\; \in [\bar \t _{F(A)}, \t_{S\setminus A} - 1])
\to\; 1\; \hbox {as }\;\; \b \; \to \;\; \infty \Eq(4.9)
$$
\noindent
ii) \par
\indent
Given $\hat x \;\in \;\partial ^- F(A)$ and any $x \; \in \; A$,
$$
P_{ x} ( \exists \;\hat y\;\in \; V(\hat x)\;:\; X_t \in R{\cal T}
( y_0 = \hat y, \o_1, y_1,\o_2, \dots , y_{N-1}, \o_M)
\forall t
\in [\bar \t _{\hat x}, \t_{\hat y} - 1 ]
| X_{\bar \t _{F(A)}} = \hat x )
$$
$$\to\; 1\; \hbox {as }\;\; \b \; \to \;\; \infty
\Eq(4.10)
$$
\noindent
iii) \par
\indent
During the first excursion from $F(A)$ to $S\setminus A$, conditioning to
$X_{\bar \t _{F(A)}} = \hat x$ (for some $\hat x \;\in \;\partial ^- F(A)$),
to $ X_{ \t _{S \setminus A} -1} = \hat y$
(for some $\hat y \;\in \;V(\hat x)$)
and to follow a particular " anticascade"
$R{\cal T}
( y_0 = \hat y, \o_1, y_1,\o_2, \dots , y_{N-1}, \o_M)$
between $ \bar \t _{\hat x}$ and $ \t_{\hat y} $, all the " time reversed " of the
properties specified in point iv) of Theorem 1 hold true ; namely $\forall \; \e
>0,\;$ with probability tending to one as $\b \to \infty$ our process , during the
above mentioned first excursion, visits the time reversed of the sequence specified in
point iv) of Theorem 1 spending in each set the {\it same } typical times given
there.} \par
\vskip.5cm
{\bf Remark}
\vskip.5cm
In the particular case ( relevant for the applications to stochastic Ising models)
where the sets $Q_i$ always coincide with a single cycle $A_i$, it immediately follows
from Theorem 2, that the typical tube of trajectories during the first excursion from
$F(A)$ to $S \setminus A$ is an anticascade starting
from $\hat x \;\in \;\partial ^- F(A)$ and ending in some $y^* \;\in \;U(A)$
given by a sequence:
$ \bar A_1, \bar \o _1, \bar y_1,\bar A_2,\bar \o _2, \bar y_2, \dots ,
\bar A_M, \bar \o _M, y^*$ with the properties:\par
\noindent
i) \par
\indent $ H(\bar y_i)< H(\bar y_{i+1}), \; 1 = 1, \dots, M-1$
\par
\noindent
ii) \par
\indent
$ \bar y_i \; \in \; S(\bar A_{i+1})$ .\par \noindent
Some $\o_i $ can be empty;
in that case : $ \bar y_i $ is also a saddle point in $ \partial \bar A_i$
\par
\bigskip
{\bf Aknowledgements.}\par
We want to thank Roman Kotecky, Fabio Martinelli and Roberto Schonmann for
very interesting and stimulating discussion.\par
We want to express thanks for the kind hospitality to the Isaac Newton
Institute in Cambridge where part of this work has been done.\par
\bigskip
After the completion of the present paper we became aware that concepts and
arguments, somehow related to the ones developed in the present note, have
been considered
by O.
Catoni [Ca] in the framework of the theory of simulated annealing.\par
We want to thank G. Ben Arous who gave us the information about that.\par\bigskip
This work has been partially supported by the grant
SC1-CT91-0695 of the Commission of European Communities
\bigskip
\bigskip
{\bf References}\par
\item{[CGOV]}M.Cassandro, A.Galves, E.Olivieri, M.E.Vares,
`` Metastable behaviour of stochastic dynamics: A pathwise approach'',
J.Stat.Phys. {\bf 35}, 603-634 (1984)
\item{[Ca]} O.Catoni, ``Sharp large deviations estimates for simulated
annealing algorithms '', Ann. Inst. H. Poincar\'e {\bf 27}, 291-383 (1991)
\item{[FW]} M.I.Freidlin, A.D.Wentzell, Random Perturbations of Dynamical
Systems, Springer-Verlag 1984
\item{ [KO1]} R.Kotecky, E.Olivieri ``Droplet dynamics for asymmetric
Ising model'' Preprint (1992).
\item{ [KO2]} R.Kotecky, E.Olivieri ``Shapes of growing droplets- a model of
escape from a matastable phase'' in preparation.
\item{[MOS]} F. Martinelli, E. Olivieri, E. Scoppola,
``Metastability and exponential approach to equilibrium for
low temperature stochastic Ising models'' Journal of Stat.
Phys.
{\bf 61}, N.5/6 1105 1990.
\item{[NSch1]} E.J. Neves, R.H. Schonmann, ``Behaviour
of droplets for
a class of Glauber dynamics
at very low temperatures'' Comm. Math.Phys. {\bf 137} 209
(1991).
\item{[NSch2} E.J. Neves, R.H. Schonmann, ``Critical
Droplets and Metastability for a Glauber Dynamics
at Very Low Temperatures'', Prob.Theor.Rel.Fields {\bf 91}, 331 (1992)
\item{[OS]} E. Olivieri, E. Scoppola, `` Markov chains with exponentially small
transition probabilities: First exit problem from a general domain -II. The
general case'' In preparation.
\item{[Sch]} R. H. Schonmann,
`` The pattern of escape from metastability of a stochastic Ising model '',
Comm. Math. Phys. {\bf 147} (1992) 231-240.
\item{[S1]} E.Scoppola `` Renormalization group for Markov chains and
application to metastability'', Jour. Stat. Phys. {\bf 73}, 83 (1993)
\item{[S2]} E.Scoppola `` Metastability for Markov chains: a general
procedure based on renormalization group ideas'', in ``Probability and
Phase Transition'', Ed. G.Grimmett, NATO ASI Series, Kluwer Ac. Publ.
\item{[S3]} E.Scoppola `` Renormalization and graph methods for Markov
chains'', Proceedings of the Conference "Advances in Dynamical Systems
and Quantum Physics" - Capri 1993. In press
\end
ENDBODY