Content-Type: multipart/mixed; boundary="-------------0410121701147"
This is a multi-part message in MIME format.
---------------0410121701147
Content-Type: text/plain; name="04-323.comments"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment; filename="04-323.comments"
To appear in the Annals of Probability, Vol. 33, no. 1, January 2005.
---------------0410121701147
Content-Type: text/plain; name="04-323.keywords"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment; filename="04-323.keywords"
quasi-stationarity, point process, edge distribution, REM states, spin glass models
---------------0410121701147
Content-Type: application/x-tex; name="particlesarchive.tex"
Content-Transfer-Encoding: 7bit
Content-Disposition: inline; filename="particlesarchive.tex"
\documentclass[12pt, twoside]{article}
\usepackage{amsmath,amsfonts,amsthm,amssymb}
\pagestyle{myheadings} \markboth{A. RUZMAIKINA and M. AIZENMAN}
{INVARIANT MEASURES AT THE LEADING EDGE}
\marginparwidth 0pt
\oddsidemargin 0pt
\evensidemargin 0pt
\marginparsep 0pt
\topmargin 0pt
\textwidth 6.5in
\textheight 8.5 in
\renewcommand{\baselinestretch}{1.2}
\date {{\small February 25, 2003 (revised Feb. 6, 2004)}}
\setlength{\oddsidemargin}{.3in}
\setlength{\evensidemargin}{.3in}
\setlength{\textwidth}{6.2in}
\setlength{\textheight}{8.3in}
\setlength{\topmargin}{0in}
\setlength{\leftmargin}{-0.3in}
\parskip=7pt
\parindent 0.4in
\def\abst#1{\begin{minipage}{5.25in}
{\noindent \normalsize
{\bf Abstract} #1} \\ \end{minipage} }
\def \be{\begin{equation}}
\def \ee{ \end{equation} }
\def \bea{\begin{eqnarray}}
\def \eea{\end{eqnarray}}
\newcommand{\eq}[1]{eq.~(\ref{#1})} %% to invoke write: \eq{...}
\def\proof{ \noindent {\bf Proof :} }
\def\qed{\hbox{\hskip 6pt\vrule width6pt height7pt depth1pt
\hskip1pt}\bigskip}
\def\too#1{\parbox[t]{.4in} {$\longrightarrow\\[-9pt]
{\scriptstyle #1}$}}
\def\dim{\overline{dim}_{B}}
\def\liminf{\mathop{\underline{\rm lim}}}
\def\limsup{\mathop{\overline{\rm lim}}}
\def\lg{\stackrel{\scriptstyle <}{_{_{\scriptstyle >} }} }
\def\blackbox{{\vrule height 1.3ex width 1.0ex depth -.2ex}
\hskip 1.5truecm}
\def \L{{\mathcal L}}
\def \B{{\mathcal B}}
\def \E {{\mathbb E}}
\def \F {{\mathcal F}}
\def \I {\mathrm I}
\def \Var {{\rm Var}}
\def \C {{\mathcal C}}
\def \F {{\mathcal F}}
\def \O {{\Omega}}
\def \o {{\omega}}
\def \t {{\tau}}
\def \NN {{\mathbb N}}
\def \N {{\mathcal N}}
\def \M {{\mathcal M}}
\def \Chi {{\cal X}}
\def \d {{\rm d}}
\def \eps {\varepsilon}
\def \ve {\varepsilon}
\def \R {{\mathbb R}}
\def \Z {{\mathbb Z}}
\def\abs#1{\left\vert #1 \right\vert}
\def\norm#1{\left\| #1 \right\|}
\newcommand{\CC}{\mathbb C}
\newcounter{masectionnumber}
\setcounter{masectionnumber}{0}
\newcommand{\masect}[1]{\setcounter{equation}{0}
\refstepcounter{masectionnumber} \vspace{1truecm plus 1cm}
\noindent
{\large\bf \arabic{masectionnumber}. #1}\par \vspace{.2cm}
\addcontentsline{toc}{section}{\arabic{masectionnumber}. #1}
}
\renewcommand{\theequation}
{\mbox{\arabic{masectionnumber}.\arabic{equation}}}
\newcounter{masubsectionnumber}[masectionnumber]
\setcounter{masubsectionnumber}{0}
\newcommand{\masubsect}[1]{
\refstepcounter{masubsectionnumber} \vspace{.5cm} \noindent
{\large\em \arabic{masectionnumber}.\alph{masubsectionnumber} #1}
\par\vspace*{.2truecm}
\addcontentsline{toc}{subsection}
{\arabic{masectionnumber}.\alph{masubsectionnumber}\hspace{.1cm}
#1}
}
%%%%%%%%%%%% appendix sections:
\newcommand{\startappendix}{ \setcounter{masectionnumber}{0} }
%%resetsection counter
\newcommand{\maappendix}[1]{
\setcounter{equation}{0}
\refstepcounter{masectionnumber} \vspace{1truecm plus 1cm}
\noindent
{\large\bf \Alph{masectionnumber}. #1}\par \vspace{.2cm}
\renewcommand{\theequation}
{\mbox{\Alph{masectionnumber}.\arabic{equation}}}
\addcontentsline{toc}{section}{\Alph{masectionnumber}. #1}
}
\newtheorem{Thm}{Theorem}
\newtheorem{lem}{Lemma}[masectionnumber]
\newtheorem{thm}[lem]{Theorem}
\newtheorem{prop}[lem]{Proposition}
\newtheorem{cor}[lem]{Corollary}
\newtheorem{df}[lem]{Definition}
\newtheorem{rem}[lem]{Remark}
\newtheorem{stt}[lem]{Statement}
\newtheorem {Definition}{Definition}
\newtheorem {Lemma}{Lemma}
\newtheorem {Theorem}{Theorem}
\newtheorem {Proposition}{Proposition}
\newtheorem {Corollary}{Corollary}
\newtheorem {Remark}{Remark}
\newtheorem {Statement}{Statement}
\begin{document}
\title{\vspace*{-.35in}
Characterization of invariant measures at the
leading edge for competing particle systems}
\author{A. Ruzmaikina ${}^{(a)}$
\qquad
M. Aizenman ${}^{(b)}$ \\ \hskip 1cm
\vspace*{-0.05truein} \\
\normalsize \it ${}^{(a)}$
Department of Mathematics, University of Virginia, Charlottesville, VA
22903 \\ (\normalsize \it present address: Departments of Statistics and Mathematics, Purdue University, \\
\normalsize \it West Lafayette, IN 47905.) \\
\normalsize \it ${}^{(b)}$ Departments of Physics and
Mathematics, Jadwin Hall \\
\normalsize \it
Princeton University, P. O. Box 708, Princeton, NJ 08544. }
\maketitle
\thispagestyle{empty} %removes # on p.1
\begin{abstract}
We study systems of particles on a line which have
a maximum, are locally finite, and evolve with independent
increments. `Quasi-stationary states' are defined as probability
measures, on the $\sigma$ algebra generated by the
gap variables, for which the joint distribution of
the gaps is invariant under the time evolution. Examples are
provided by Poisson processes with densities of the form,
$\rho(dx) \ =\ e^{- s x} \, s\, dx$, with $ s > 0$, and linear
superpositions of such measures. We show that conversely:
any quasi-stationary state for the independent dynamics, with
an exponentially bounded integrated density of particles,
corresponds to a superposition of the above described probability
measures, restricted to the relevant $\sigma$-algebra. Among the
systems for which this question is of some relevance are
spin-glass models of statistical mechanics, where the point process
represents the collection of the free energies of distinct ``pure
states'', the time evolution corresponds to the addition of a spin
variable, and the Poisson measures described above correspond to the
so-called REM states.
\end{abstract}
%\vfill
%\noindent {\bf PACS numbers:} \\
%\noindent {\bf Key words:}
\vskip .25truecm
\newpage
\vskip .25truecm
\masect{Introduction}
\label{sect:intro}
Competitions involving large number of contestants
are the object of interest in various fields.
One could list here the energy levels of complex systems and the free
energies of competing extremal states of spin glass
models~\cite{MPV}. We are particularly interested in dynamical
situations where the competition continues in ``time'',
though time may be interpreted loosely. E.g., in
the motivating example of spin glass models (\cite{MPV}),
a point process on the line represents
the collection of the free energies of distinct ``pure states''
of a system of many spin variables,
and the ``time evolution'' corresponds to the incorporation
in the system of yet another spin variable.
Influenced by the
terminology of statistical mechanics, we use here the
term {\em state} to mean a probability measure on the relevant
$\sigma$-algebra of subsets of the space of the point process
configurations. For much of the discussion which follows, the
relevance would be limited to the information concerning only the
relative positions of the points, relative to the one which leads at the
given instant.
As in the pictures seen in marathon races,
often the point process describing the
relative positions appears to be time invariant.
We refer to such states as quasi-stationary.
In this paper we characterize the quasi-stationary states for
the class of systems in which
the evolution occurs by independent identically distributed
increments of the individual contestants.
The main result is that any such state, of a point process
with locally finite configurations with more than one
point and exponentially bounded density, corresponds to
a linear superposition of Poisson processes with densities of the form
\be
\rho(dx) \ =\ e^{- s x} \, s\, dx
\label{rem}
\end{equation}
with $s > 0$. This may be rephrased by saying, in the
terminology coined by D. Ruelle~\cite{R} (who invokes the work of B.
Derrida~\cite{D}), that all quasi-stationary states correspond to
superpositions of the Random Energy Model (REM) states.
\noindent {Remarks:} \\
1) Our main result may have a familiar ring to it, since
the above distributions are known to describe
the ``Type-I'' case of
the Extremal Statistics~\cite{Ex}. \\
2) It would be of interest to
see an extension of the classification of the quasi-stationarity
to a broader class of dynamics where the evolution may exhibit
correlations. One may note that the REM states have an extension,
based on a hierarchical construction, to the family of the
GREM states~\cite{R,BS},
which exhibit quasi-stationarity under a broad class of
correlated dynamics. Is that structure singled in some way by
its broader quasi-stationary properties?
In the following section we introduce the concepts more
explicitly. We refer to the system as the
Indy-500 model, ignoring the fact that for a number of
obvious reasons this is not a faithful description of the
dynamics in that well known car race.
\masect{The Indy-500 model}
\nopagebreak
The configuration space of the Indy-500 model
is the space, $\Omega$, of
infinite configurations of points on the line, which are locally
finite and have a maximum (in the order of $\R$). Its elements,
$\omega \in \Omega$, can also be described as sequences, $\omega = \{
x_n\}_{n=1,2,\ldots}$, with \be x_1 \ge x_2 \ge ... \quad \;, \quad
\mbox{and $x_n \to -\infty$} \; .
\label{order}
\end{equation}
(Variables written as $x_n$ should by default be
understood to be ordered).
In the time evolution considered here the points evolve
by independent increments.
As is generally the case with stochastic evolutions, the dynamics can be
presented in two ways: as a stochastic map, in which the configuration
$\o\in \Omega$ is changed in a random way - through the independent
increments, or as a reversible transformation taking place in a larger
space, which encompasses the full information about both the future and
the past dynamics. Our terminology is based on the former view, however
the second perspective provides a useful basis for the intuition
guiding the analysis.
Thus, the time evolution
is given by a stochastic map determined by the collection of random
variables $\eta=\{ h_n \}_{n=1,2,..}$: \be T_{\eta}: \{x_n \}
\longmapsto \{\tilde x_n \} \qquad \mbox{with} \quad \tilde
x_n=x_{\Pi_n} + h_{\Pi_n} \; .
\label{evolution}
\end{equation}
where $h_n$ are independent random variables with a
common probability distribution $g(dh)$ on $\R$, and $\Pi$ is
a permutation of $\NN$, which depends on both $\o$ and $\eta$,
aimed at recovering the monotonicity for $\tilde x_n$.
In other words, $\Pi=\Pi(\o, \eta)$ is a relabeling of
the moving particles according to the new order.
For a given probability measure $\mu(d\omega)$ on $\Omega$, we
denote by $T\mu$ the corresponding probability distribution of the
one step evolved configuration $\{\tilde x_{\alpha} \}$. To be
explicit: the average over $T\mu$ corresponds to
averaging over both $\mu$ and $\eta$.
One needs to pay some attention to the $\sigma$-algebras on which the
measures $\mu$ and $T\mu$ are to be defined. Since we are interested
in the classification of states which are only {\em quasi-stationary},
we allow those to correspond to probability measures defined on a
smaller $\sigma$-algebra than the one usually used for point processes
on a line. (Such a change makes the result only stronger.)
The standard $\sigma$-algebra, which is natural for the state space of
particle configurations, is generated by the occupation numbers of
finite intervals (see, e.g., \cite{DaVe}). Let us denote it by $\cal B$.
Measurable functions include all $\psi : \Omega \to \R $ of the form
\be \psi_f(\omega) \
:= \ \sum_{n} f(x_n)
\end{equation}
with bounded measurable functions $f: \R \to \R $, of compact support.
However, in this work we are interested in probability measures on the smaller
$\sigma$-algebra $\widetilde{\cal B}$ generated by functions
which are invariant, $\psi(S_{b}\omega)=\psi(\omega)$,
under the uniform shifts
\be S_{b}: \{x_n \} \longmapsto \{\tilde x_n
\} \qquad \mbox{with} \quad \tilde x_{n}=x_n + b \; .
\label{shift}
\end{equation}
Functions which are measurable with respect to $\widetilde{\cal B}$
depend on the configuration only thorough the sequence
of the distances of the particles from the leading one:
\be u_n = x_1 - x_n \; .
\label{gaps}
\end{equation}
Thus, a probability measure $\mu$ on $(\O, \widetilde \B)$
is uniquely determined by the ``modified probability generating
functional'' (MPGFL)
\begin{equation}\label{notationQ}
\widetilde G_\mu (f) = \E_{\mu}\left(
e^{-\sum_n f (x_{1} - x_{n})} \right) \, .
\end{equation}
with $f(\cdot)$ ranging over smooth positive functions of compact
support. (The regular ``probability generating
functional'' is defined without $x_{1}$ in
( \ref{notationQ}).)
One can now formulate a number of distinct ``steady state'' conditions,
where the term {\em state} refers to a probability measure on a
suitable $\sigma$-algebra, which is not always the same.
\noindent {\bf Definition:} {\em
A \underline{stationary state} is a probability measure $\mu(d\omega)$ on
$(\Omega, {\cal B})$ which is invariant under the stochastic map
$T$, i.e., $T \mu = \mu$, or more explicitly:
\be \E_{\mu}\left( \psi(T
\omega) \right) \ = \ \E_{\mu}\left( \psi(\omega) \right)
\label{stationary}
\end{equation}
for any {\cal B}-measurable $\psi$, where the expectation
functional $\E_{\mu}$ includes an average over both $\omega$ (distributed by
$\mu$) and $T$ (determined through $\{ h_n \}$, as in \eq{evolution} ).
A \underline{steady state} is a probability measure
$\mu(d\omega)$ on $(\Omega, {\cal B})$
for which there is a non-random $V$ ( = the ``front velocity'')
such that
$T\mu = S_{V} \mu$, i.e.,
\be
\E_{\mu}\left( \psi(T \{ x_n \}) \right) \ = \
\E_{\mu}\left( \psi(\{ x_n +V \}) \right)
\label{steady-state}
\end{equation}
for all ${\cal B}$- measurable functions $\psi$.
A \underline{quasi stationary} state is a probability measure
$\mu(d\omega)$ on the $\sigma$-algebra
$\widetilde{\cal B}$ (sub-$sigma$-algebra of $\cal B$)
such that (\ref{stationary})
restricted to {\underline{shift invariant}} functions $\psi$ holds, i.e.,
for which \be \E_{\mu}\left( \psi(\{ u_n \})
\right) \ = \ \E_{\mu}\left( \psi(\{ \tilde u_n \}) \right) \end{equation} with
$\{ u_n \}$ the gaps defined by (\ref{gaps}), and $\{ \tilde u_n \}$ the
gaps for the configuration $\tilde \omega = T\omega$. }
For an alternative characterization of quasi-stationary measures,
in terms which are more standard for point processes, let us note that
each configuration is shift-equivalent to a unique element of the set
\be
\Omega_o = \left\{ \{ x_n \} \ | \ x_1 = 0 \right\} \; . \end{equation}
The ``normalizing shift'' $S: \, \o \mapsto S_{-x_1(\omega)}\o $,
induces a measurable map from $(\Omega, \widetilde {\cal B})$ to $
(\Omega_o, {\cal B}) \subset (\Omega, {\cal B}) $, and thus also a map
(for which we keep the symbol $S$) which associates to each
probability measure $\mu$ on $(\Omega, \widetilde {\cal B})$ a
probability measure $S \mu$ on $(\Omega, {\cal B})$, supported on $\Omega_o$.
The measure $\mu$ is quasi-stationary if and only if the corresponding
measure $S\mu $ is invariant under $ST$ - the time evolution followed
by the normalizing shift.
Stationarity is a special case of the steady state, and the latter
reduces to it when viewed from a frame moving at a fixed speed.
Quasi-stationarity is the less demanding property of the three
mentioned above, and is the condition of interest if one follows
only the relative positions.
Through a combination of the results in ~\cite{L} and
~\cite{CD} one may conclude that any {\em steady}
state of the Indy-500 model, whose
jump distribution satisfies the {\em non-lattice} condition (meaning
that its support is not contained in any set of the form $a+b\Z\subset
\R$), is a Poisson processes with a density of the form $\rho(d x) = s
e^{sx} dx$. These are the REM states which are discussed in the next
section.
Our main result is that for the infinite systems discussed here
{\em quasi-stationary} probability measures can only be linear
superpositions (as probability measures) of the above steady states
restricted to $\widetilde{\cal B}$.
\noindent {\bf Remarks:}
{\em i.} The restriction, in the above statement, to infinite number of
particles excludes the trivial example of a {\em quasi-stationary}
state which is not the projection of any {\em steady} state, which is
provided by a single point moving on the line by independent
increments. In this case the state looks stationary from the
perspective of the ``leader'': there is always just one point, at the
origin. There is, however, no steady velocity $V$ such that
(\ref{steady-state}) holds.
{\em ii.} Linear superpositions (of measures on the suitable $\sigma$
algebras) preserve the property of {\em quasi stationarity} though not
that of {\em steady state} -- due, in the latter case, to the possible
variation in the front velocities.
\masect{The REM states}
We recall that for a probability measure $\rho(dx)$ on $\R$,
a Poisson process with the density $\rho$, is a probability measure on
$(\Omega, {\cal B}) $ for which the occupation numbers for disjoint
sets $A\subset \R$ form independent random variables,
$\xi(A;\omega) \ \equiv \xi(A)$, with the Poisson distributions \be
\mbox{Prob\/} \left( \xi(A) =k \, \right) = \frac{\rho(A)^k}
{k!}e^{-\rho(A)} \qquad \mbox{and mean} \qquad \ E(\xi(A) ) \ = \ \rho(A)
\; .
\label{independ}
\end{equation}
We denote by $\mu_{s,z}(d\omega) $ the Poisson process with density
$ \rho_{s,z} (dx) = se^{-s(x-z)} dx$ on $\R$.
The special role of the above states in the present context
is already seen in the following statement, which is
based on known results.
\begin{prop} (Based on \cite{L,CD,MPV,R}.)
For any non-lattice single step probability
distribution $g(dx)$ the collection of
the {\underline {steady states}} corresponding to the evolution by
IID increments $\{ h_n \}$ with the distribution $g(dh)$, as described by
(\ref{evolution}), consists exactly of the probability measures
$\mu_{s,z}(d\omega)$ (on $(\Omega, {\cal B})$), with $s>0$, $z\in \R$.
For each of these states, the corresponding {\underline {front
velocity}} $V$ is the solution of:
\begin{equation}
e^{sV} = \int e^{sx} g(dx) \, .
\end{equation}
\label{V}
Furthermore, with respect to $\mu_{s,z}(d\omega)$, the past increments
also form an IID sequence, however with a modified distribution:
conditioned on $\{ \tilde x_n \} $, the variables $\{ h_{\Pi_n } \}$
form a sequence of IID variables with the probability distribution
\begin{equation} \label{conditional}
\tilde g(dh) \ = \ \frac{e^{sh} \, g(dh)}{\int_{\R} e^{sy} \,
g(dy) } \; .
\end{equation}
\end{prop}
Thus for these steady states the distribution of the increments changes depending on
whether one looks forward or backward in time (!). In other words,
the permutation $\Pi_n (\omega)$ transforms the sequence of IID
variables $\{ h_n \}$ into an IID sequence ($\{ h_{\Pi_n} \}$) with a
different distribution. (Of course this is possible only in infinite
systems.)
\begin{proof}
The evolution by independent increments is well known, and easily seen,
to take a Poisson point process into another such process with the
density modified through convolution ($\rho \mapsto \rho*g$). Therefore
just the steady state property of the states $\mu_{s,z}$ is an
elementary consequence of the behavior of the exponential density
under convolutions. However, for the more complete statement made
above it is useful to appreciate the following observation, concerning
two possible ways of viewing the collection of variables $\widetilde
\o =\{(x_n, h_n)\}$. The following are equivalent constructions of a
point process in $\R\times \R$: \\
{\em i.\/} A collection of points $\{x_n\}$ is generated via a
Poisson process on $\R$, with the density $\rho(dx)$, and then to each
point is attached, by its order on $\R$, a random variable $\{h_n\}$,
taken from an IID sequence with the distribution $g(dh)$. \\
{\em ii.\/} The configuration is generated directly as a Poisson point
process in $\R \times \R$, with the two-dimensional density $\rho(dx)
g(dh)$. \\
The transition of the perspective from {\em ii.} to {\em i.}
requires only the second factor in the product measure on $\R \times \R$
to be normalized $\int_{\R} g(dh) = 1$.
Now, the map $(x,h) \mapsto \ (x+h,h)\equiv (\tilde x, h) $ takes the Poisson
process describing $\widetilde \o$ into another Poisson process on
$ R\times \R $, which yields the joint distribution of the ``new''
positions paired with the steps ``just taken''. In case of
$\mu_{s,z}(dx)\times g(dh)$, the density of the new process is:
$s e^{-s x} dx \,g(dh)\ = \ s e^{-s (\tilde x -h)} d\tilde x \,g(dh)$.
This can also be
written as a product $\left[\int e^{sy} g(dy) \right] \ s e^{-s
\tilde x} d\tilde x \times \frac{e^{sh}g(dh)}{\int e^{sy} g(dy) } $, where
now the second factor is properly normalized. By the previous
observation it immediately follows that:
\noindent {\em i.\/} The positions after the jump $\{ \tilde x_n\} $ are
distributed as a Poisson process on $\R$ with the modified density
$\tilde \xi(dx) \ = \ \left[\int e^{sy} g(dy) \right] \ s e^{-s
\tilde x} d\tilde x = s e^{-s (\tilde x-V)} d\tilde x, $. I.e., $\{\tilde x_n\}$
have the same distribution as $\{x_n\}+V$ with $V$ satisfying (\ref{V}). \\
\noindent {\em ii.\/} When conditioned on the configuration
$\{\tilde x_n\}$, the jumps just taken
are generated by an independent process on $\R$ with
the probability density given by (\ref{conditional}), as claimed.
For the converse statement, i.e., to prove that all steady states are of the
REM type, one may first note that if $\mu(d\omega)$ is a {\em steady
state} for the dynamics corresponding to $g(dx)$, with the front
velocity $V$, then $\mu$ is {\em stationary} under the dynamics
corresponding to a shifted single step distribution: $g_V(dx) = g(\, d
(x -V) \, )$. The classification of stationary states, and hence also
steady states, is found in~\cite{L}, where it is implied that
any stationary measure is a superposition of Poisson processes whose
random density solves the equation $\rho=\rho * g$. As established
in~\cite{CD}, for non-lattice $g(dh)$ the only solutions in the space
of positive measures are measures of the form $\xi(dh)= [A e^{-sh}+ B]
dh$. The condition that the typical configurations be bounded on the
positive side, imply that $s>0$ and $B=0$.
\end{proof}
Having introduced the REM states, we are ready to formulate the main
result.
\masect{Classification of quasi-stationary states.}
\begin{df}\label{gregular}
A probability measure $\mu$ on $\O$ is g-regular if for almost
every $T \o = \{ \o , \{ h_n\}_n \} $, with respect to $\mu(d \omega)
\, \Pi_{n \in \Z} g (d h_{n})$, the point configuration $\{
x_n+h_n\}_n $ is locally finite, with a finite maximum.
\end{df}
The g-regularity of $\mu$ means that with probability one
the configuration obtained through the independent increments
has a maximum and can be ordered. This is a preliminary
requirement for the possible quasi-stationarity of $\mu$.
It is
easy to see that a sufficient condition for g-regularity
is met in
the situation discussed next.
The general sufficient condition is
the finiteness, for all $x\in \R$,
of:
\be
\E\left(\mbox{card}\{
n\, : x_n + h_n \ge x \} \right) \ = \
\E_\mu\left(\sum_n
\mbox{Prob}_g ( h_n \ge x - x_n) \right) \, .
\end{equation}
In the following pages to simplify the exposition and to avoid
confusion we will always assume that at $t = 0$ the rightmost particle
in the configuration is at $x_1 = 0$ (we can do this without the loss of generality),
we will denote by $x_n$ the
positions of the particles at $t =0$ and by $y_n$ the positions of the
particles at $t = \t$.
Following is our main result:
\begin{thm}\label{th:main}
Let $g$ be a probability measure with a density on $\R$ and
$\mu$ a
probability measure on $(\O, \widetilde \B)$, satisfying
\be \int
e^{s x} g (x) dx~<~\infty \qquad , \qquad
\forall s \in \R \; ,
\label{exp-condition}
\end{equation}
and
\be \label{expbound}
\E_\mu\left( \{\sharp {\rm ~ of~ particles~ within~distance ~
}~y ~{\rm of~the ~leading ~particle}\} \right)
\ \le \ A \ e^{\lambda y} \qquad , \qquad
\forall y \geq 0
\end{equation}
for some $\lambda > 0$, and $A
< \infty$.
If $\mu$ is quasi-stationary with respect to
the dynamics corresponding to independent increments with the
distribution $g$, then it is supported on
Poisson processes with densities $s e^{-sx} dx$, $s > 0$.
\end{thm}
The meaning of the theorem is that the probability space $\Omega$ can be split into pieces and the process on each piece of $\Omega$ is a Poisson process with a density $s e^{-sx} dx$ for a particular $s$.
In the proof we shall use the fact that point processes are uniquely
determined by their probability generating functionals (as
discussed
in \cite{DaVe}). Our derivation of Theorem~\ref{th:main}
proceeds
along the following steps.
\begin{enumerate}
\item
First we note that any quasi-stationary state can be presented as the
result of evolution of arbitrary duration ($\t$) which starts from a
random initial configuration,
distributed by the given quasi-stationary state, and evolves
through independent increments.
\item Analyzing the above dynamics, we show that for large $\t $
the resulting distribution is asymptotic
to Poisson processes with the corresponding (evolving) densities.
Thus, it is shown that the
quasi-stationary measure $\mu$ can be presented as the limit
of a superposition of
{\em random} Poisson processes, where the randomness is in both the
Poisson measure and in the resulting particle configuration
(Theorem~\ref{th:approxpoisson}).
\item Applying a result from the theory of large
deviations (Theorem~A.1),
and some compactness bounds which are derived from
quasi-stationarity,
we show that the quasi-stationary measure admits a
representation as a random Poisson process, whose Poisson
densities ($F$)
are the Laplace transforms of (random) positive measures
(Theorem~\ref{th:accpts}).
Furthermore, in this integral representation of $\mu$,
$F$ may be replaced by its convolution with $g$, followed
by a normalizing shift.
\item For the last step we develop some monotonicity tools
(Section~\ref{sect:monotonicity}), for which the underlying
fact is that under the
convolution dynamics the Laplace measures increase their relative
concentration on the higher values of the Laplace parameter
(Theorem~\ref{th:steeper}). This corresponds to the
statement that unless the function $F$ is a strict exponential,
under the convolution dynamics the function $F$
becomes steeper, and the distribution of the gaps is
shifted down.
Using a strict monotonicity argument, we show that
quasi-stationarity requires the measures in the above
superposition to be supported on pure exponential functions
(or, alternatively stated, functions whose
Laplace measure is concentrated at a point).
\end{enumerate}
The final implication is that the quasi-stationary measure is a
superposition of REM measures, as asserted in Theorem~\ref{th:main}.
Let us remark that Section~\ref{sect:monotonicity} may be of
independent interest. It is noted there that within the space of
decreasing functions which are the Laplace transforms of positive
measures on $[0,\infty)$, convolution with a probability measure
makes any function steeper, in the sense presented below, except for
the pure exponentials on which the effect of such a convolution is
only a shift.
\newpage
\masect{Representation of $\mu$ as a random Poisson processes}
\masubsect{`Poissonization' -- the statement}
Let $\F$ be the space of monotone decreasing, continuous
functions
$F:\, \R \rightarrow [0, \infty]$, with $F(x)\rightarrow 0 $ for $x
\to \infty$ and $F(x) \rightarrow \infty$ for $x \rightarrow -\infty$.
We regard a function $F\in \F$ as normalized if
\be \label{eq:norm}
F(0) \ = \ 1 \, ,
\end{equation}
and denote by $\N$ the normalizing shift: $\N : \, F(\cdot)
\mapsto F(\cdot + z_F)$, with $z_F = \sup \{ z\in \R \, : \,
F(z) \ge 1 \, \}$ .
For each $F\in \F$, the Poisson process on $\R$ which corresponds to
the
measure $(-)d\, F$ will almost surely exhibit a configuration which
can be ranked in the decreasing order of $\R$. The probability
than there is no particle above $x\in \R$ is $\exp(-F(x))$.
Conditioned on the location
of the leading particle ($x$), the rest are distributed by a Poisson
process on $(-\infty, \, x]$ with the density $d\,(- F)$. Thus, the
MPGFL (defined in (\ref{notationQ}) ) of the Poisson process with density
$F$, which we shall denote by $\widehat G_{F}(f)$, is given
by:
\begin{equation}\label{eq:poisson}
\widehat G_F(f) = \int
_{-\infty}^{\infty} d \left[ e^{-F(x)}\right]\, e^{-\int
_{-\infty}^{x} (1-e^{-f (x-y)}) d (- F(y))}
\, .
\end{equation}
Let us note that
\be \label{eq:Norm}
\widehat G_F (f) \ = \ \widehat G_{\N F}(f) \, ,
\end{equation}
since the
probability distribution of the gaps is not affected by uniform shifts.
For the purpose of the following theorem let $S_{\t}$
be a random variable with the probability distribution
$ P(S_{\t} \geq y) = \int I\left[ \sum y_j \ge y \right]
\, g(y_1)\cdot \ldots \cdot g(y_\t) d y_1 \ldots d y_{\t}$.
We associate with each
configuration $\o$, and $\t \in {\NN } $, the function
\begin{equation}\label{defofF}
F_{\o;\t} (x) = \sum_{m} P(S_{\t} \geq x - x_m),
\end{equation}
and denote by $z_{\o,\t}$ the position at which:
\be
F_{\o;\t} (\, z_{\o,\t}\, ) \ = \ 1 \, .
\end{equation}
One may note that
$F_{\o;\t}(x)$ is the expected number of particles on
$[x, \infty)$ for the configuration which will be obtained
from $\o$ after
$\t$ steps of evolution with independent increments.
If the support of $g(y)$ is not bounded one may easily find
configurations
for which $F_{\o;\t}(\cdot)$ diverges. However,
if the measure $\mu$ is $g$-regular
then a.s. $F_{\o;\t}(\cdot) < \infty $. Furthermore, we
shall see that if $\mu$ is quasi-stationary then the position of the
front after $\t$ steps can be predicted to be in the vicinity of
$z_{\o;\t}$ --
up to a fluctuation whose distribution remains stochastically bounded
(i.e., forms a `tight' sequence) as $\t\to \infty$.
The main result of this section is:
\begin{thm}\label{th:approxpoisson}
Let $\mu$ be a $g$-regular quasi-stationary measure, for the
independent evolution by steps with some common probability
distribution which has a density $g(u)$.
Then for every positive function $f$ of compact support in $\R$,
\bea \label{eq:approxpoisson}
\widetilde G_\mu (f) & = & \lim_{\t \rightarrow \infty }
\int_{\Omega} \mu(d \o) \, \widehat G_{\N F_{\o;\t}} (f)
\nonumber \\ \\
& = & \lim_{\t \rightarrow \infty }
\int_{\Omega} \mu(d \o) \, \widehat G_{g* \N F_{\o;\t}}(f) \;
\nonumber
\end{eqnarray}
(where $\widetilde G_\mu (f)$ is the modified probability generating
functional
defined in (\ref{notationQ})).
\end{thm}
This statement implies that
the measure $\mu$ is, in the ``weak sense'', a limit of random
Poisson processes, of measures corresponding to the random functions
$\N F_{\o;\t}(\cdot)$ whose probability distribution
is induced from $\mu$ through their dependence on $\o$.
Let us note that this result is related to - but not covered
by - the known statement that
any limit of a sequence of point processes which is derived through
successive random independent increments is a mixed
Poisson process (e.g., \cite{DaVe}, Theorem 9.4.2). Unlike in that
case, the time evolution considered here incorporates shifts according to
the position of the leading point (and the limiting process is not
stationary under translations).
The rest of this section is devoted to the proof of this assertion,
for which we need some preparatory estimates.
First let us make the following observation:
\begin{lem}
Any quasi-stationary measure is supported on configurations with
either exactly one particle, or infinitely many.
\end{lem}
\begin{proof}
The statement is a simple consequence of the
spreading of the probability distribution of the sum of independent
increments, i.e., of the variable $S_\t$. E.g., one may
consider the function:
\be \label{eq:Y}
Y_{\mu}^{(2)}(y) \ = \ \mu( \, \{ y_1 - y_2 \ge y \, \} ).
\end{equation}
By the dominated-convergence theorem:
$Y_{\mu}^{(2)}(y) \too{y \to \infty} 0$. However, for any finite
number of particles, the probability that after $\t$ steps
the smallest gap will exceed $y$ tends to one as $\t \to \infty$.
Thus finite configurations of more than one particles can carry only zero
probability in any quasi-stationary measure. Of course a measure with
exactly one particle is quasi-stationary.
\end{proof}
\masubsect{Some auxiliary estimates}
Given an initial configuration $\o=\{ x_n\}$, the probability
distribution of the position of the leading particle after $\t>0$
steps
is $\, d\, P_{\o}^{(\t)}(x)$, with
\bea
P_{\o}^{(\t)}(x) \ &=& \ {\rm Prob}\left( \left\{ \mbox{at time $\t$ all particles are on $(- \infty, x]$} \right\} \right) \nonumber \\
&=& \ \prod_{n} \left[ 1 - P(S_{\tau} \geq x - x_n) \right] \, .
\end{eqnarray}
We shall need to compare $\, d\, P_{\o}^{(\t)}(x)$
with the probability distribution
associated with the function
\be
\widetilde P_{\o}^ {(\t)}(x) \ = \
e^ {- \sum_n P(S_{\tau} \geq x - x_n) }
\ = \ e^{-F_{\o;\t} (x) } \, .
\end{equation}
\noindent {\bf Remark:}
It is instructive to note that
$\, d\, \widetilde P_{\o}^{(\t)}(x)$ is the probability
distribution
of the maximum of a modified process, in which at first each
particle is replaced by a random number of descendents,
with the Poisson distribution $p_n=e^{-1}/n!$, and then
each particle evolves by $\t$ independent increments, as in the
Indy-500 model. Conditioned on the starting configuration,
the modified process is (instantaneously) a Poisson process.
The probability that its maximum is in $(-\infty,x]$ is given by:
\be
\prod_{n} \left [ \sum_{n}\frac{ e^{-1} }{n!}
(\, 1 - P(S_{\tau} \geq x - x_n) \, )^n \right ]
= \ e^{ - \sum_n P(S_{\tau} \geq x - x_n) } \ = \ \widetilde
P_{\o}^{(\t)}(x) \ \, . \qquad \qquad
\end{equation}
Our first goal is to show that the probability
measures $\, d\, P_{\o}^{(\t)}(x)$ and
$\, d\, \widetilde P_{\o}^{(\t)}(x)$
are ``typically'' -- in a suitable stochastic sense-- asymptotic to
each other as $\t\to \infty $.
This statement is not true for some $\o$, and it is not difficult
to construct examples of configurations for which it doesn't hold.
We note that it is easy to show that the step described by the graph of
$P_{\o}^{(\t)}(\cdot)$ remains tight, in the sense that
the width of the
intervals $\{ x\, : \delta \le P_{\o}^{(\t)}(x) \le 1-\delta \} $
does not spread indefinitely, as $\t \to \infty$.
\begin{lem} \label{lem:maxG}
For any quasi-stationary measure $\mu$:
\be \label{eq:maxG}
\E_{\mu}\left( \int_{-\infty}^{\infty}
\sup_{n}P(S_{\tau} \geq x - x_n) \ d\, P_{\o}^{(\t)}(x)
\right) \ \too{\t \to \infty} \ 0
\end{equation}
Furthermore,
\be
\E_{\mu}\left( \sup_{x} | \widetilde P_{\o}^{(\t)}(x) -
P_{\o}^{(\t)}(x) | \ \right) \
\too{\t \to \infty} \ 0 \, . \label{eq:PtoP}
\end{equation}
\end{lem}
\noindent{\bf Remark:}
The supremum in (\ref{eq:maxG}) is clearly attained at $n=1$
(by monotonicity). Since $d\, P_{\o}^{(\t)} (x) $ is a probability
measure, and the c.d.f. of $S_{\t}$ is a bounded function,
the statement means that the maximum typically occurs in a
region whose a priori probability of being reached by
any specific point is asymptotically zero.
\begin{proof}
Due to the spreading property of convolutions of probability measures,
(see \cite{DaVe} Lemma 9.4.1), for any $D < \infty$
$$ b(\t, D) = \sup_x P(x \leq S_{\t} < x+D) \too{\t \to \infty} \ 0.$$
Observe that $P_{\o}^{(\t)}(x) \leq \tilde P_{\o}^{(\t)}(x) \leq 1$ for
all $x$.
Let us pick $\lambda > 0$ such that
$$ e^{-x(1+\lambda x) } \leq 1 -x ~ ~ \forall x \in [0,
\frac{1}{2}].$$
Thus if $P(S_{\t} \geq x) \leq \frac{1}{2}$ we have
\be\label{PandtildeP} \tilde P_{\o}^{(\t)} (x) \leq P_{\o}^{(\t)} (x)^{1/[1 + \lambda
P(S_{\t} \geq x)]}.\ee
Suppose that $x$ is such that $P(S_{\t} \geq x) \leq \ve.$
Then
$$ \tilde P_{\o}^{(\t)}(x) - P_{\o}^{(\t)}(x) \leq \sup_{u \in [0,1]}
|u^{1/(1+\lambda \ve)} - u|.$$
Suppose that $x$ is such that $P(S_{\t} \geq x- x_1) \geq \ve.$
Let $n_0 = \frac{2}{\ve} \ln
\frac{1}{\ve}$. Then for all $\t$ large enough and for all $n \leq n_0$
$$ b(\t, - x_n) \leq \frac{\ve}{2}.$$ Consequently $$ P(S_{\t} \geq x-x_n) \geq
P(S_{\t} \geq x) - b(\t, x_n) \geq \frac{\ve}{2}.$$
Then
$$ - \sum_n P(S_{\t} \geq x - x_n) \leq - \sum_{n = 0}^{\frac{2}{\ve}
\ln \frac{1}{\ve}} \frac{\ve}{2} \leq - \ln \frac{1}{\ve}.$$
and therefore
\be\label{epsilonestimate} \tilde P_{\o}^{(\t)}(x) \leq e^{- \ln \frac{1}{\ve}} \leq \ve. \ee
So in this case we obtain
$$ P_{\o}^{(\t)} (x) \leq \ve.$$
\qed
\end{proof}
Putting the above together we have:
\begin{lem} \label{lem:PtildeP2}
If $\mu$ be a quasi-stationary measure, then for each $\eps>0$
\be \label{eq:PtildeP}
\mu\left( \left\{ \o \, : \,
\mbox{{\rm dist}}(d\, P_{\o}^{(\t)}, d\, \widetilde P_{\o}^{(\t)} ) \,
\ge
\eps
\right \} \right) \ \too{\t \to \infty} \ 0 \, ,
\end{equation}
where {{\rm dist}} is the distance between the two measures, defined
as
\be
\mbox{dist}(d\, P, d\, \widetilde P) \ = \
\sup_{h} \{ |\int h(x) d\, P(x) - \int h(x) d\, \widetilde P(x)| /
\|
h
\|_{\infty} \} \, .
\end{equation}
\end{lem}
\begin{proof}
The distributions $d\, P_{\o}^{(\t)}(x) $ and $d\,
\widetilde P_{\o}^{(\t)}(x) $ can be written as:
\bea
d\, P_{\o}^{(\t)}(x) \ & =& \ \sum_{k}
\frac{d\, P(S_{\tau} \geq x-x_k) }{1- P(S_{\tau} \geq x-x_k) } \, \,
\Pi_{n} [1- P( S_{\tau} \geq x-x_n) ] \nonumber \\
\\
d\, \widetilde P_{\o}^{(\t)}(x) \ & =& \ \sum_{k}
d\, P(S_{\tau} \geq x-x_k) \, \times
\exp \{ -\sum_{n} P(S_{\tau} \geq x-x_n) \} \nonumber \, .
\end{eqnarray}
By Lemma ~\ref{lem:maxG} we obtain that
$$ | P_{\o}^{(\t)}(x) - \tilde P_{\o}^{(\t)}(x) | \leq \ve ~~ \forall x.$$
If $P(S_{\t} \geq x) \leq \ve$, then we obtain by the same arguments as in the previous Lemma that $$| d \tilde P_{\o}^{(\t)} (x) - d P_{\o}^{(\t)} (x) | \leq \ve d \tilde P_{\o}^{(\t)}(x).$$ Integrating with respect to $\frac{h}{||h||_{\infty}}$ over the $x$ such that $P(S_{\t} \geq x) \leq \ve$ we obtain that the result is small.
If $ P(S_{\t} \geq x) > \ve$ then
$$ \Pi_{n \neq k} [1 - P(S_{\t} \geq x-x_n)] \leq \ve, ~{\rm and}
~e^{-\sum_n P(S_{\t} \geq x-x_n)} \leq \ve.$$
Consequently for such $x$ $$\int \frac{h(x)}{||h||_{\infty}} d \tilde P_{\o}^{(\t)} (x) \leq
\sqrt{\ve} \int \sum_n d P(S_{\t} \geq x -x_n) e^{-\frac{1}{2} \sum_n P(S_{\t} \geq x -x_n)} \leq {\rm const} \sqrt{\ve},$$
and also using (\ref{PandtildeP})
$$ \int \frac{h(x)}{||h||_{\infty}} d P_{\o}^{(\t)} (x) \leq \sqrt{\ve} \int d\sum_n P(S_{\t} \geq x -x_n) e^{-\frac{1}{2} \sum_n P(S_{\t} \geq x-x_n)} \leq {\rm const} \sqrt{\ve}.$$
\qed
\end{proof}
\masubsect{`Poissonization' -- the proof}
We are now ready to prove the main result of this section.
% \begin{proof}
\noindent {\bf Proof of Theorem ~\ref{th:approxpoisson} }
Due to the quasi-stationarity of the measure $\mu$, one may evaluate
$\widetilde G_\mu (f)$ by taking the average of the future expectation
value of $ e^{- \sum_n f(y_1-y_n)}$, corresponding to the configuration
$\o$ as it appears at time $t=0$.
In the following argument we fix the (non-negative)
``test function'' $f $, and take $D<\infty$ such that ${\rm supp} f
\subset [-D,0]$.
In the approximations which follow we use the fact that
$e^{-\sum f(y_1-y_n)}$ is a bounded function ($\le 1$),
which is integrated against a probability measure. As before,
$\o$-dependent quantities are denoted $o(1)$ if in the limit
$\t \to 0$ they tend to $0$ `in law', i.e., the probability
distribution
which they inherited from $\o$, is non-zero only for $[0, \eps]$ for any
$\eps>0$.
The conditional expectation of the future value of $e^{-\sum f(y_1-y_n)}$, conditioned
on the initial configuration $\o$ is:
\be\label{integral}
\begin{array}{l}
\E_{\o}\left( \, e^{-\sum f(y_1-y_n)} \right) \ = \\ \\
\quad = \int_{-\infty}^{\infty} e^{-f(0)}\, d P(S_{\t} \geq x -x_k) \Pi_{n\not= k}
[1 - P(S_{\t} \geq x - x_n)] \Pi_{n \neq k} \frac{ \int_{-\infty}^x
e^{-f(x-y)} d P(S_{\t} \geq y - x_n)}{ (1 - P(S_{\t} \geq x -x_n))} ,
\end{array}
\end{equation}
where $d P(S_{\t} \geq x - x_k)$ is the probability that $k$th
particle is at $x$ at time $\t$,
$\Pi_{n \neq k} [1 - P(S_{\t} \geq x-x_n)]$ is the probability that
other particles are at $(- \infty, x]$ at time $\t$,
$\frac{ \int_{-\infty}^x
e^{-f(x-y)} d P(S_{\t} \geq y - x_n)}{ (1 - P(S_{\t} \geq x -x_n))}$
is the expectation of $e^{- f(x-y_n)}$ given that the particle which is at $x_n$ at $t=0$ is
at $(- \infty, x]$ at time $\t$.
As in the previous discussion, the contribution of $x$ such that
$P(S_{\t} \geq x) \geq \ve$ to the integral in (\ref{integral}) is
negligible.
Consider $x$ such that $P(S_{\t} \geq x) \leq \ve$.
We can write
\begin{eqnarray} \Pi_{n \neq k} \frac{\int_{-\infty}^{x} e^{-f(x-y)} d P(S_{\t} \geq y -x_n)}{ 1
- P(S_{\t} \geq x -x_n)}& =&
\Pi_{n \neq k} [1 - \frac{\int_{-\infty}^x (1 - e^{-f(x-y)}) d P
(S_{\t} \geq y -x_n)}{1 - P(S_{\t} \geq x -x_n)} \nonumber \\ & = &
(1 + o(1)) e^{- \int_{-\infty}^x (1 - e^{-f(x-y)}) d (\sum_n P(S_{\t}
\geq x - x_n)}.\nonumber \\ && \qquad
\end{eqnarray}
As noted in ~(\ref{eq:Norm}), the normalizing shift has no
effect on $\widehat G_F(f)$. The result is the first of the two equations
in ~(\ref{eq:approxpoisson}). The second equation is an
immediate corollary of the first one, since
\be
g*F_{\o;\t} \ = \ F_{\o;\t+1} \, \, .
\end{equation}
\qed
For a later use, let us note that the arguments used in the above
discussion readily imply the following two bounds.
\begin{cor} \label{cor:tail2} For any $\eps >0$, there is
$W(\eps) < \infty$ such that
\be \label{eq:tail2}
\E_{\mu} \left( \int_{|x|> W(\eps)} d\,
[e^{-\N\, F_{\o;\t}(x) }] \right)
\ \le \ \eps
\end{equation}
and
\be \label{tailgF}
\E_{\mu} \left ( \int_{|x| > W(\ve)} d\ [e^{- g* \N F_{\o;\t}(x)} ] \right) \leq \ve.
\ee
\end{cor}
\begin{proof}
Let $f = \I_{[0, W(\ve)]}.$ Denote
\begin{equation}\label{defofphi}
\phi(W(\ve)) = \E_{\mu}[e^{-\I_{[0, W(\ve)]}(y_1-y_n)}].
\end{equation}
Since $\I_{[0, W(\ve)]} (x) \too{W(\ve) \to \infty} 1$ for $x \in \R$ and
since, in a typical configuration, the
number of particles within distance $W(\ve)$ behind the leader
increases to $\infty$ as $W(\ve)$ increases,
$\phi(W(\ve))$ must decay monotonically to $0$ as $W(\ve)$ increases. By taking
$f = \I _{[0, W(\ve)]}$, we see that
\begin{eqnarray}\label{compact6}
&& \phi(W(\ve)) = \int \mu (d\o) \int_{-\infty}^{\infty} d e^{-\N
F_{\o;\t}(x)} \times e^{-(1-e^{-
1})(\N F_{\o;\t}(x-W(\ve))-\N F_{\o;\t}(x))} + O (\ve_{\t}).\nonumber \\ && \qquad
\end{eqnarray}
We can get an estimate on $\N F_{\o;\t}(W(\ve))$ from~(\ref{compact6}) by
restricting the range of integration from $W(\ve)$ to $\infty$ and using
that $\N F_{\o;\t}(x-W(\ve))-\N F_{\o;\t}(x) \leq 1.$ Then, for $x \geq W(\ve)$ we obtain
\bea\label{compact7}
\phi(W(\ve)) &\geq& \int \mu(d\o) \int_{W(\ve)}^{\infty} d e^{-\N
F_{\o;\t}(x)} \times e^{-(1-e^{-1})(\N
F_{\o;\t}(x-W(\ve))-\N F_{\o;\t}(x))}+ O (\ve_{\t}) \nonumber \\
&\geq& e^{-(1-e^{-1})}\int \mu(d\o) (1-e^{-\N F_{\o;\t}(W(\ve))})+ O
(\ve_{\t}).
\eea
Similarly, by restricting the range of integration from $0$ to
$\infty$ and using that $\N F_{\o;\t}(x-
W(\ve))-\N F_{\o;\t}(x) \leq \N F_{\o;\t} (-W(\ve))$ for $x \geq 0$ we obtain
\bea\label{compact8}
\phi(W(\ve)) &\geq& \int \mu(d\o) \int_{0}^{\infty} d e^{-\N
F_{\o;\t}(x)} \times e^{-(1-e^{-1})(\N
F_{\o;\t}(x-W(\ve))-\N F_{\o;\t}(x))}+ O (\ve_{\t}) \nonumber \\
&\geq& (1-e^{-1}) \int \mu(d\o) e^{-(1-e^{-1})\N F_{\o;\t} (-W(\ve))}+ O
(\ve_{\t}).
\eea
(\ref{compact7}) and (\ref{compact8}) prove the first part of the Corollary.
To prove (\ref{tailgF}) we observe that from the previous part it follows that for all $\t$ large enough, and sufficiently large $W(\ve)$
$$\E_{\mu} \int_{|x| \geq W(\ve)/2} d e^{-\N F_{\o; \t+1}} (x) \leq \frac{ \ve}{2}.$$
Since for sufficiently large $W(\ve)$ and $\o$ in a set of measure $1 - \frac{\ve}{2}$
$$ z_{\o,\t+1} -z_{\o,\t} \leq \frac{W(\ve)}{2}, $$ we obtain that
$$\E_{\mu} \int_{|x| \geq W(\ve)} d e^{- g*\N F_{\o;\t} (x)} \leq \ve.$$
\qed
\end{proof}
Corollary~\ref{cor:tail2}
will be used for an approximation of
$\widehat G_{\N F_{\o;\t} }(f)$
by a quantity which has better continuity properties as a functional
of $F$.
\masect{The Poisson density as a Laplace transform of a random
positive measure} \label{sec:Laplace}
We shall next show that the quasi-stationary measure $\mu$ can
be presented as equivalent to a random Poisson process whose density
is the
Laplace transform of a random positive measure on $\R$. (Due to
the invariance of $\widetilde{\cal B}$ under uniform shifts,
with no additional restriction the measures
may be adjusted so that $\rho(\R)=1$).
Let $\M$ be the space of finite measures on
$[0,\, \infty)$.
To each $\rho \in \M$ we associate the
Laplace transform function:
\begin{equation}
R_{\rho}(x) \ = \ \int_{0}^{\infty} e^{-xu} \, \rho(du) \, .
\end{equation}
We denote by $\F_L$ the space of such functions, i.e.,
$\F_{L} = \{ R_{\rho}(\cdot) \, | \, \rho\in \M \}$ .
We shall need to consider ``ensemble averages'' over randomly chosen
elements
of $\M$. These are described by probability measures on $\M$,
which
would always be understood to be defined on the natural
$\sigma$-algebra on $\M$, for which the measures of intervals,
$\rho([a,b]) $, are
measurable functions of $\rho$. Our goal in this section is to
prove the following statement.
\begin{thm}\label{th:accpts} Under the assumptions of
Theorem~\ref{th:main}, there exists a probability measure,
$\nu(d\rho)$, on $\M$ such that for any compactly supported
positive function $f$ on $\R$:
\begin{equation} \label{eq:lapoisson1}
\widetilde G_\mu(f) \ = \ \int_{\M} \nu(d\rho)
\, \widehat G_{R_{\rho}}( f ) \, ,
\end{equation}
and furthermore
\begin{equation} \label{eq:lapoisson2}
\widetilde G_\mu(f) \ = \ \int_{\M} \nu(d\rho)
\, \widehat G_{R_{\rho} * g}( f ) \, ,
\end{equation}
\end{thm}
For Laplace transform functions $F= R_{\rho}$,
shifts correspond to transformations of the form:
\begin{equation}
\rho(du) \Longrightarrow e^{-\alpha u} \, \rho(du) \, ,
\end{equation}
and the normalization condition (\ref{eq:norm}) corresponds
to $\rho(\R)=1$, i.e., $\rho \in \M$ being a {\em probability
measure}.
In view of the invariance (\ref{eq:Norm}), this normalization
condition may be freely added as a restriction of the support
of $\nu(d\rho)$ in the statement of Theorem~\ref{th:accpts}.
While the result presented in the previous section required only
quasi-stationarity, we shall now make use of the additional
assumptions listed in the main Theorem (Thm.~\ref{th:main}).
In the derivation of Theorem~\ref {th:accpts} we shall apply
what may be regarded as the principle of the equivalence of
ensembles, in the language of statistical mechanics. Specifically,
we need the following result, which, as is explained in the Appendix,
is a refinement of the `Bahadur-Rao theorem' of large deviation
theory.
\noindent {\bf Theorem A.$1$}
{\it Let $u_{1}, u_{2}, \ldots $ be IID random
variables with expectation $E_g u$ and a common probability
distribution $g(u)$, which has a density and a finite moment generating function,
$\int e^{\eta\, u } \, g(u) du \equiv e^{\Lambda(\eta)} < \infty$ for all $\eta$.
Then, for any $0< K < \Lambda'(\infty)$ and $0 < \beta < \frac{1}{2}$
there is $\eps_{\t;K,\beta} \too{\t \to \infty } 0$ such that
for all \ $q\in [E_g u \, , \, K ] $ and $|x| \leq
\t^{\beta}$:
\begin{equation}
\frac{\mbox{Prob}\left( \left\{ u_{1}+u_{2}+\ldots+u_{\t} \ge
x\, +\, q \, \t \right\} \, \right) \, }
{\mbox{Prob}\left( \left\{
u_{1}+u_{2}+\ldots+u_{\t} \ge q \, \t \right\} \,
\right)} \ = \
e^{-\eta \, x } \, [1 + O(\, \eps_{\t;K,\beta} ) \, ]
\, ,
\label{eq:ratio}
\end{equation}
with $\eta= \eta(q) $ determined by the condition
\be
q \ = \ \frac{\int u \, e^{ \eta\, u } \, g(u) du }
{ \int \, e^{\eta\, y } \, g(y) dy } \, .
\end{equation}
}
In our analysis we shall need a bound on the front velocity, and
on the possible propagation of particles from the far tail.
\begin{lem}\label{lem:cutoff}
Let $\mu$ be a quasi-stationary $g$-regular measure with a density satisfying
the assumptions (\ref{exp-condition}) and (\ref{expbound}) of
Theorem~\ref{th:main}. Then: \\
1. For any $\t$ large enough, for $\omega$ in a set of
measure $1- \ve$
\begin{equation}\label{eq:z}
z_{\o;\t} \leq \frac{S}{2 \lambda} \t + {\rm const}, ~{\rm where}~ S= \ln \int e^{
2 \lambda x} g(x) dx.
\end{equation}
2. There exist $\alpha_{\mu}(M)$ and $\beta_g(\t)$
such that the probability of the complement of the event
\begin{eqnarray}\label{notationA}
A_{\tau; D,K,M}=\{\omega: \,
\begin{array}[t]{l}
\mbox{the configuration obtained after $\tau$ steps will have not
more than $M$ } \\
\mbox{particles with $y_n \ge y_1-D \, $, and all of them made a total jump} \\ \mbox {less than
$K \t + z_{\o,\t} - x_n$ in time from $0$ to $\t$. } \}
\nonumber
\end{array}
\end{eqnarray}
satisfies
\be \label{eq:cutoff}
\mbox{Prob}(A_{\tau; D,K,M}^{c}) \ \le \ \alpha_{\mu}(M,D) \, + \,
\beta_g(\t) \, + \,
C_{g,\mu} \, e^{-\delta (K -K_0) \t }\end{equation}
with $\alpha_{\mu}(M,D) \too{M \to \infty} 0$ for each $D<\infty$,
$\beta_g(\t) \too{\tau \to \infty} 0$, and $\delta >0$.
\end{lem}
\noindent {\bf Remark:} In the proof below we shall apply the last
bound in the double limit: $\lim_{K\to \infty} \lim_{\t \to \infty}
$,
with $M$ chosen so that $1<< M << K$.
\begin{proof}
\noindent {\em 1.\/}By (\ref{expbound}) and Markov inequality
$$ P_{\mu} (\{ \sharp (- x_n) \leq m \} \geq e^{2 \lambda m} ) \leq
e^{- \lambda m}.$$
Therefore by Borel-Cantelli lemma
$$P_{\mu} \{ \{ \sharp (- x_n) \leq m \} > e^{2 \lambda m } ~{\rm
i.o.}\} = 0.$$
This implies that for any $\ve$ there exists $m_0$ such that
on a set of $\omega$ of measure $1 - \ve$, $\{ \sharp (- x_n) \leq m
\} \leq e^{2 \lambda m}$ for all $m \geq m_0$.
Using the definition of $F_{\o;~ 0}$ we obtain
\be\label{boundF} F_{\o;~ 0}(x) \leq e^{-2 \lambda \min(x, -m_0)}, ~\forall ~x < 0.\ee
Therefore
\be\label{boundF1} F_{\o;\t} (x) \leq F_{\o;~ 0}*g^{(*\t)} (x) \leq {\rm const} \int e^{-2 \lambda (x-y)}
g^{(*\t)} (y) dy \leq {\rm const}\ e^{-2 \lambda x + \t S}.
\ee
For $x = \frac{S \t}{2 \lambda} + {\rm const}$ we thus obtain $F_{\o;\t}(x) \leq 1$. It
follows by definition that $z_{\o,\t} \leq \frac{S \t}{2 \lambda} + {\rm const}$.
\noindent {\em 2.\/}
The probability that the first condition does not hold in the
definition of $A_{\tau; D,K,M}$ is, by the quasi-stationarity of $\mu$,
\be
\alpha_{\mu}(M,D) \ = \
\mu( \omega:{\rm more~than~M~ particles~ are~ within~ distance~ D
~of~ the~ leader} {\rm ~at}~ t=0) \, .
\end{equation}
This quantity vanishes for $M\to \infty$ because the number of
particles in $[y_1-D, y_1]$ is almost surely finite.
To estimate the remaining probability of the complement of the event
$A_{\tau ; D,K,M}$ we split it into two cases, based
on the distance which the front advances in time $\t$.
That distance is at least the total displacement of the particle
which is initially at $0$. The probability that this displacement is
by less
than $(E_g u - 1) \, \t$ is dominated by the quantity:
$$ Prob (z_{\o,\t} \leq (E_g u -1) \t) \leq Prob (S_{\t} \leq (E_g u-1) \t) = \beta_g (\t).$$
The choice of $1$ is somewhat arbitrary, but even so, standard
large deviation arguments which are applicable under the assumption
(\ref{exp-condition}) imply that $\beta_g(\t)$ decays
exponentially.
%%%
The contribution of the other case is bounded by the probability of the following event:
\bea
&& \mbox{Prob}\left(
\begin{array}{l}
\mbox{at least one of the particles of $\o$ will advance in $\tau$ steps } \\
\mbox{a distance greater than $[- x_n + (E_g u \, -1
+ K) \tau ]$ } \\
\end{array}
\right) \ \le \nonumber \\
&& \le \ \E_\mu \left(
\sum_n \mbox{Prob}\left( \, S_\tau \ge - x_n + (E_g u
\, -1 + K) \tau \, \right ) \, \right) \ \le \nonumber \\
&& \le \ \E_\mu \left( \sum_n \E_g( e^{\alpha S_\tau}) \, e^{-\alpha [
- x_n + (E_g u \, -1 + K) \tau ]} \, \right) \ \le
\nonumber \\
&& \le \ \left[ \int_\R \ e^{\alpha (u - E_g u
)} \, g(u) du
\, e^{-\alpha (K-1)}\right]^\tau \,
\E_\mu\left(\sum_n e^{-\alpha [
- x_n] }\right),
\eea
where
$\alpha >0$ is an adjustable constant. The last factor is finite
for $ 0< \alpha < \lambda $ since
under the assumed exponential
bound (\ref{expbound}):
\bea
\E_\mu\left(\sum_n e^{-\alpha [
- x_n] }\right) \ & =& \ \E_\mu\left( \alpha \int dy \,
e^{-\alpha y }
\sum_n \I[ y\ge -x_n ] \, \right) \ \le \nonumber
\\
&\le & \ \alpha \int dy \, e^{-\alpha y } \, A \, e^{\lambda y}\
= \ \frac{A \, \alpha}{\alpha- \lambda} \, .
\eea
The claimed
estimate readily follows (choosing $\lambda < \alpha $, and
defining $\delta >0$ correspondingly).
\end{proof}
\noindent {\bf Proof of Theorem~\ref {th:accpts}: }
Applying Theorem~A.$1$ to the function defined by
(\ref{defofF}), we find that
\bea\label{definerho1}
&& \N \, F_{\o;\t}(x) = \sum_n
P(S_{\tau} \geq x+z_{\o;\t} -x_n) \nonumber \\
&=& \ \sum_{- K(\ve) \t \leq x_n \leq 0}
P(S_{\tau} \geq z_{\o;\t} -x_n) \,
\frac{ P(S_{\tau} \geq x+ z_{\o;\t} -x_n) }
{ P(S_{\tau} \geq z_{\o;\t} -x_n) } +
\sum_{x_n \leq - K(\ve) \t} P(S_{\t} \geq z_{\o,\t} - x_n + x )\nonumber \\
\ &=& \ \sum_{-K(\ve) \t \leq x_n \leq 0}
P(S_{\tau} \geq z_{\o;\t} -x_n) \ e^{-\eta(\frac{z_{\o;\t}
-x_n}{\t} )\, \cdot \, x } \ [1+ O(\ve_\t)] +
\sum_{x_n \leq - K(\ve) \t} P(S_{\t} \geq z_{\o,\t} - x_n + x ) \nonumber \\
&=& \ \int_{0}^{\infty} \rho_{\o;\t}(du) \ e^{-u \, x} \ [1+ O(\ve_{\t})] +
\sum_{x_n \leq - K(\ve) \t} P(S_{\t} \geq z_{\o,\t} - x_n + x ) \,
,
\end{eqnarray}
with $\rho_{\o;\t}(du) $
defined as the probability measure with weights
$ P(S_{\tau} \geq z_{\o;\t} -x_n) $ at the points
$\eta(\frac{z_{\o;\t} -x_n}{\t} )$.
We will now estimate the remainder term $\sum_{x_n \leq - K(\ve) \t} P(S_{\t} \geq z_{\o,\t} - x_n + x ).$
In the case when $\lim_{\eta \to \infty} \Lambda'(\eta) < \infty$ (in the case when the supremum of the support of $g(x)$ is finite) the remainder term is zero for large $K(\ve)$ (for example if $K(\ve) \geq \Lambda'(\infty)$ and $x = O(\t^{\beta})$) .
In the case when $\lim_{\eta \to \infty} \Lambda'(\eta) = \infty$ the remainder term can be estimated using the large deviation arguments.
By using (\ref{eq:br2a}) in Theorem A.$1$ and (\ref{boundF}) we obtain
\bea\label{tailestimate}
&&\sum_{x_n \leq - K(\ve) \t} P(S_{\t} \geq z_{\o,\t} - x_n + x )\leq
\int_{K(\ve) \t}^{\infty} P( u_{1} + \ldots + u_{\t} \geq y + z_{\o,\t} + x) e^{2 \lambda y} dy \nonumber \\ && = \int_{K(\ve) \t}^{\infty}
e^{- \t {\Lambda^*} (\frac{y + z_{\o,\t} + x}{\t})}
\left[ \int _{0}^{\infty} e^{-\psi_{\t} (\eta(\frac{y + z_{\o,\t} + x}{\t})) t}
d Q^{(\eta(\frac{y}{\t}))}_{\t} (t) \right] e^{2 \lambda y} dy = O(\ve_{\t})
.\eea
The last equality in (\ref{tailestimate}) follows because by convexity of $\Lambda^*$
$$ \Lambda^*(\frac{y + z_{\o,\t} + x}{\t}) >> \frac{2 \lambda y}{\t} ~{\rm for~all}~ y \geq K(\ve) \t,$$ and because the factor in the square brackets in (\ref{tailestimate}) is small.
Therefore
\be\label{definerho} \N F_{\o;\t} (x) = \int_{0}^{\infty} e^{ - u x} \rho_{\o;\t} (d u) (1 + O(\ve_{\t})).\ee
We observe that \bea\label{FandR}
&& |\N F_{\o;\t} (x) - R_{\rho} (x) | \leq \ve R_{\rho}(x) ~{\rm and }~ |\N F'_{\o;\t} (x) - R'_{\rho}(x)| \leq \ve R'_{\rho} (x), \nonumber \\ && |g*\N F_{\o;\t} (x) - g* R_{\rho} (x) | \leq \ve g* R_{\rho}(x) ~{\rm and }~ |(g*\N F_{\o;\t})' (x) - (g*R_{\rho})'(x)| \leq \ve (g * R_{\rho})' (x) . \nonumber \\ && \qquad \eea
Using (\ref{FandR}) and Corollary~\ref{cor:tail2} we obtain \bea\label{modifiedG}
\tilde G_{\mu}(f) &=& \int_{-W(\ve)}^{W(\ve)} d e^{-R_{\rho}(x)} e^{-\int_{-\infty}^{x} (1 - e^{-f(x - y)} ) (-d R_{\rho}(y))} + \ve = \widehat G_{W(\ve), R_{\rho}}(f) + \ve \\ &=&
\int_{-W(\ve)}^{W(\ve)} de^{-g*R_{\rho}(x)} e^{-\int_{-\infty}^{x} (1 - e^{-f(x-y)}) (-d g*R_{\rho}(y))} + \ve = \widehat G_{W(\ve), g*R_{\rho}}(f) + \ve \nonumber.\eea
((\ref{modifiedG}) will serve as a definition of $\widehat G_{W(\ve), R_{\rho}}(f)$.)
From (\ref{definerho}) we observe that for all $\o$ in a set of
measure $1 - \ve$ and for every $K >> 1$ there exists a $K_1 >> 1$ depending on $K$ such that
\begin{equation}\label{defineK1}
\int_{\eta(E_g u +K)}^{\infty} e^{D\, u} \
\rho_{\o;\t}(du) \leq \sum_{x_n \leq - K_1 \tau}
P(S_{\tau} \geq x+z_{\o;\t} -x_n).
\end{equation}
We can choose $K_1$ by requiring that for all $x_n \leq - K_1 \t$, $$\frac{ z_{\o,\t} -
x_{n}}{\t} \leq E_g u + K,$$ for example $K_1 = E_g u + K -\frac{S}{2 \lambda}$ and we used (\ref{eq:z}).
From
Lemma~\ref{lem:cutoff} (see equation (\ref{eq:cutoff})),
applied with $M=\sqrt{K}$ (or any other choice with $1 << M << K$),
we find that under the assumptions listed above, for any
$D< \infty$ there exist \
$\eps_{D}(K)$ \ with which:
\bea \label{eq:tailbound}
&&\limsup_{\t \rightarrow \infty }
\ \E_{\mu} \left( \
\int_{\eta(E_g u +K)}^{\infty} e^{D\, u} \
\rho_{\o;\t}(du) \
\right ) \ \leq \\
&& \leq \limsup_{\t \rightarrow \infty }
\ \E_{\mu} \left(
\sum_{x_n \le -K_1\, \t } P(S_{\tau} \geq z_{\o ; \t} -x_n -D ) \
\right ) + \eps_{D}(K) \ \equiv \
\tilde \eps_{D}(K) \ \too{K\to \infty} 0
\nonumber \, .
\end{eqnarray}
The correspondence $\o \longmapsto \rho_{\o;\t} $ defines
a mapping from the space of configurations $\O$ into the space
$\M$, of measures on $\R$, with values restricted
to the subset of probability measures. Corresponding to this
map is one which takes the measure $\mu$ on $\O$ into a
probability measure on $\M$ which we shall denote by
$\nu_{\t}$.
By this definition, for any measurable function
$ \ \Chi: \ \M \rightarrow \R$
\be
\int_{\M} \Chi(\rho) \ \nu_{\t}(d \rho) \ =
\ \int_{\O} \Chi(\rho_{\o;\t})\mu(d\o) \ \, .
\end{equation}
The space of probability measures on compact subsets of $\R$ is
compact, and
so is the space of probability measures on this space.
While we do not have
such compactness (since the measures of $\M$ are defined over the
non-compact $\R$), equation (\ref{eq:tailbound}) with any fixed
$D>0$ implies, that the
sequence of measures $\nu_{\t}$ is {\em tight} and that it has a
subsequence $\nu_{\t_n}$ which converges in the corresponding
`weak topology' as $\t_n \to \infty$.
Let $\nu$ be a limit of such a subsequence.
(To prove the tightness of $\nu_{\t_n}$ we observe that it is possible to show that for all $\t$, $R_{\rho_{\o;\t}}(x) \leq M(x)$ for some function $M(x)$ except for $\o$ in a set of measure $\ve$. The set of $\rho$ for which $R_{\rho}(x) \leq M(x)$ is compact.)
We claim that for
every positive $f$ of compact support, ${\rm supp} f \subset
[-D,0]$,
\bea \label{eq:tata!}
\int_{\M} \left[ \widehat G_{W(\ve), R_{\rho}}( \, f\, ) \right] \
\nu(d \rho) \ +\ve &=& \lim_{n\to \infty}
\int_{\M} \left[ \widehat G_{W(\ve), R_{\rho}}( \, f\, ) \right] \
\nu_{\t_n}(d \rho) +\ve
\ = \ \widetilde G_\mu ( f ) ,
\nonumber \\
\\
\int_{\M} \left[ \widehat G_{W(\ve), g*R_{\rho}}( \, f\, ) \right] \
\nu(d \rho) + \ve &=& \lim_{n \to \infty}
\int_{\M} \left[ \widehat G_{W(\ve), g*R_{\rho}}( \, f\, ) \right] \
\nu_{\t_n}(d\rho) +\ve
\ = \ \widetilde G_\mu( f ). \nonumber
\end{eqnarray}
The weak convergence means that for any {\em continuous} function
$ \ \Chi: \ \M \rightarrow \R$ :
\bea
\int_{\M} \Chi(\rho) \ \nu(d \rho) &=&
\ \lim_{n\to \infty} \int_{\M} \Chi(\rho) \ \nu_{\t_n}(d
\rho) \nonumber \\
&=& \lim_{n\to \infty}
\int_{\O} \Chi(\rho_{\o;\t_n})\ \mu(d\o) \ \, .
\end{eqnarray}
The continuity argument does not apply immediately to the
function which we are interested in:
\be \label{eq:psi2}
\ \widehat G_{W(\ve), R_{\rho}}( f ) \ = \
\int_{-W(\ve)}^{W(\ve)} d\, [e^{-R_\rho (x)}] \
e^{-\int_{x-D}^{x}[1-e^{-f(x-y)}] d\,(- R_\rho (y)) } \ ,
\end{equation}
which is not continuous in $\rho$.
However, $\widehat G_{W(\ve),R_{\rho}} (f) $
can be approximated arbitrarily well, in the appropriate $L_{1}$
sense,
by functionals which are continuous.
The function
$ \widehat G_{W(\eps), R_{\rho}}(f) $ is not continuous in $\rho$. The
difficulty is that $R_{\rho}(x) $ can be affected by
small changes in the measure $\rho$ if those occur at high values of
the Laplace
variable $u$. However, we do obtain a continuous function by
replacing $R_{\rho}$ in (\ref{eq:psi2}) by $R_{\I_K \rho}$
with
\be
\I_K \rho(du) \ = \ \I_{[0, \eta(E_g u +K)]}(u) \
\cdot\,
\rho(du) \, .
\end{equation}
It is easy to see that
\bea \label{eq:limitail}
\int_{\M} \left( \
\int_{\eta(E_g u +K)}^{\infty} e^{-x u} \
\rho(du) \
\right ) \ \nu(d\rho) & \le &
\limsup_{\t \rightarrow \infty }
\ \int_{\O} \left( \
\int_{\eta(E_g u +K)}^{\infty} e^{-x u} \
\rho_{\o;\t}(du) \
\right ) \, \mu(d \o) \nonumber \\
&& \\
&\le & \ \tilde \eps_{x}(K) \ \too{K\to \infty} 0
\nonumber \, ,
\end{eqnarray}
where the first inequality is by the generalized version of the
Fatou's
lemma, and the second is by (\ref{eq:tailbound}).
Due to the fact that $f$ is compactly supported and the integration in $x$ is over $[-W(\ve), W(\ve)]$, the difference
$$ \int_{\M} |\widehat G_{W(\eps), R_{\rho}}(f) - \widehat
G_{W(\eps),R_{\I_K
\rho}}(f) | \ \d\nu(\rho) $$ is affected only by values of $x \in [-W(\ve)-D, W(\ve)]$.
Taking $x$ in this interval, we observe that the equation (\ref{eq:limitail}) implies that $$ \int_{\eta(E_g u + K)}^{\infty} e^{-u x} \rho(du) \leq \ve, $$ except on the set of $\omega$ of measure $\ve$.
The difference $ \int_{\M} |\widehat G_{W(\eps), R_{\rho}}(f) - \widehat
G_{W(\eps),R_{\I_K
\rho}}(f) | \ \d\nu(\rho) $ is controlled by $|R_{\rho}(x) - R_{I_K \rho}(x)| $ and by $|R'_{\rho}(x) - R'_{I_K \rho}(x)|$, which are small for $x \in [-W(\ve) -D, W(\ve)]$ except on the set of $\rho$ of measure $\ve$, since $ \int_{\eta(E_g u + K)}^{\infty} e^{-u x} \rho(du) $ is small.
One can verify by standard arguments that for $K$ finite $ \widehat
G_{W(\eps), R_{I_{K}\, \rho}} (f)$ is
continuous in $\rho$, and that this continuity and the approximation
bounds listed above imply (\ref{eq:tata!}), thereby proving the first part of the
Theorem~\ref{th:accpts}. The second part is proved via similar arguments.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\masect{Monotonicity arguments} \label{sect:monotonicity}
In this section we develop some monotonicity tools, which will be
applied to prove that if a measure $\mu$ has the properties
listed in Theorem~\ref{th:accpts} then
the corresponding measures $\rho$ are $\nu$-almost surely
concentrated on points, i.e., the Poisson densities
$R_\rho$ are almost surely pure exponential.
\masubsect{The contraction property of convolutions within $\F_L$}
The space $\F$, whose elements are positive decreasing continuous functions on
$\R$, is partially ordered by the following relation.
\begin{df} For $F, G \in \F$ we say that $G$ is \underline{steeper}
than $F$ if the level intervals of $G$ are {\em shorter} than those
of $F$, in the sense that for any $0\le a \le b \le \infty$,
\begin{equation}\label{eq:steeper}
(0 \le ) \ G^{-1}(a) - G^{-1}(b) \ \le \ F^{-1}(a) - F^{-1}(b)
\, .
\end{equation}
\end{df}
We adapt the convention
that for the (monotone) functions $G\in \F$ the inverse is defined
(for $a \ge 0$) by:
\begin{equation}
G^{-1}(a) \ = \ \inf \{ \, x \in \R\, : \, G(x) \le a \} \, .
\label{eq:inverse}
\end{equation}
It is easy to see that, within the class of monotone functions
$\F$, an equivalent formulation of the relation
``$G$ is steeper than $F$'' is that for any $u>0$:
\be \label{eq:strict2}
G(x) \ = \ F(y) \qquad \Longrightarrow \qquad G(x+u) \ \le
\ F(y+u).
\end{equation}
Also equivalent is such a principle with the reversed
inequality and $u<0$.
Of particular interest for us is the subspace $\F_L$ of Laplace
transforms of positive measures. We shall show that within this
space, the convolution with a probability
measure $g(x) dx$ makes a function steeper. (It is shown below that
the appropriately shifted $R_\rho * g$ is in $\F_L$.)
A key step towards this result, which is also of independent
interest,
is the following lemma.
\begin{lem} \label{lem:mono2}
Let $F=R_{\rho^F } \in \F_{L}$ satisfy the normalization condition:
$F(0) = 1$ (i.e., $F = \N F$), and let $G=R_{\rho^G }\in \F_{L} $ be
related to it by:
\be \label{eq:GNFg}
G \ = \ \N \, ( F * g)
\end{equation}
for some probability measure $g(x) dx$. Then, for all
$\lambda \ge 0$,
\be \label{eq:concentrate}
\int_{0}^{\lambda} \rho^{G}(du) \ \le \
\int_{0}^{\lambda} \rho^{F}(du)
\end{equation}
\end{lem}
\begin{proof}
The relation between $F$ and $G$ is such that for some normalizing
constant $z\in \R$
\be
G(x) \ = \ \int_{-\infty}^{\infty} \left[ \int_{0}^{\infty}
e^{-(x-y+z) u} \, \rho^F (du)
\right] \, g(y) dy \ = \
\int_{0}^{\infty} e^{-x u} \, e^{S(u)}\, \rho^F (du)
\end{equation}
with $S(\cdot)$ defined by:
\be \label{eq:S}
e^{S(u)} \ = \ \int_{-\infty}^{\infty} e^{(y-z) u} \, g(y) dy \, .
\end{equation}
Thus
\be
\rho^{G}(d u) \ = \ e^{S(u)}\, \rho^F (du) \ \ ,
\end{equation}
and the normalization conditions $F(0)=G(0) = 1$ imply:
\begin{equation}
\int_{0}^{\infty} \, e^{S(u)} \, \rho^{F}(du) \ = \
\int_{0}^{\infty}
\, \rho^{F}(du) \ \ .
\label{eq:Snorm}
\end{equation}
The function $S(\cdot)$ is convex, which is easily verified by showing
that $S '' > 0$, by general properties of integrals
of the form (\ref{eq:S}), and satisfies $S(0)=0$ (since $g(x)dx$ is a
probability measure).
It has, therefore, to be the case that either $\rho^{F}(du)$ is
concentrated at a
point (where $S=0$), or else $S(\cdot)$ is
negative on $[0, \bar u)$ and positive on $(\bar u, \infty)$ for some $\bar u > 0$. The claimed
concentration statement (\ref{eq:concentrate}) is obviously
true for all $ \lambda \in [0, \bar u]$. For $ \lambda \ge \bar u$,
we
note that
\begin{equation}
\int_{\lambda}^{\infty} e^{S(u)} \, \rho^{F}(du) \ \ge \
\int_{\lambda}^{\infty} \, \rho^{F}(du) \, .
\label{eq:large}
\end{equation}
By subtracting ~(\ref{eq:Snorm}) from ~(\ref{eq:large}), we
find that the
claimed ~(\ref{eq:concentrate}) is valid also for $\lambda > \bar
u$.
\end{proof}
\begin{thm}\label{th:steeper}
For any $F =R_{\rho} \in \F_L$ and a probability measure $g(x)dx$ on
$\R$,
the function $\N(F*g)$ is steeper than $F$.
\end{thm}
\begin{proof}
Our goal is to derive the inequality~(\ref{eq:steeper}) for $G=\N(F*g)$
(and $a**0: \qquad
\N(F* g)\, (x) & = & x \int_{0}^{\infty} d \lambda \, e^{-\lambda x}
\, \left[ \int_{0}^{\lambda} e^{S(u)} \rho(du)\right] \ \ ,
\\
{\rm for} \quad x<0: \qquad
\N(F* g)\, (x) & = & x \int_{0}^{\infty} d \lambda \, e^{-\lambda x}
\, \left[ \int_{\lambda}^{\infty} e^{S(u)} \rho(du)\right] \ + \
\int_{0}^{\infty} e^{S(u)} \rho(du) , \nonumber
\end{eqnarray}
with the corresponding relations holding for $F$ without the factors
$ e^{S(u)} $ .
The inequalities (\ref{eq:mono}) follow now by inserting here the
relations~(\ref{eq:concentrate}), (\ref{eq:large}) and ~(\ref{eq:Snorm}).
We note that if $F$ and $\N (F*g)$ were shifted so as to be equal at a different value of $x$, then the argument above would also go through. Therefore we obtain that
$\N (F*g)$ is steeper than $F$.
\qed
\end{proof}
%%%%%%
The partial order ``$G$ is steeper than $F$'' is preserved when any
of the functions is modified by a uniform shift, and also when each
is
replaced by a common monotone function of itself, e.g., $\{ F,\, G\}
$
replaced by $\{ 1-e^{-F},\, 1-e^{-G} \} $.
Following is a useful property of this partial order (another one is
presented in Appendix~\ref{sec:monoappendix}).
\begin{lem} \label{lem:u}
Let $F, G \in \F$, be continuous and strictly monotone decreasing
functions with
$\lim_{x \to \ -\infty} F(x) =\lim_{x \to \ -\infty} G(x) = \infty$.
If $G$ {\em steeper} than $F$ then, for any $u>0$,
\be
\int e^{-[G(x-u)-G(x)]} \, d \, e^{-G(x)} \ \le \
\int e^{-[F(x-u)-F(x)]} \, d \, e^{-F(x)} \, .
\end{equation}
Furthermore, the inequality
is strict unless $G$ is a translate of $F$ (and vice versa).
\end{lem}
\begin{proof}
The statement is a simple consequence of the following
formula, and ~(\ref{eq:strict2}):
\be
\int e^{-[F(x-u)-F(x)]} \, d \, e^{-F(x)} \, -
\int e^{-[G(x-u)-G(x)]} \, d \, e^{-G(x)} \ = \
\int_{0}^{\infty} dz \, [ e^{-F( F^{-1}(z) -u) } - e^{-G( G^{-1}(z) -u) } ]
\end{equation}
\end{proof}
An additional result related to this notion, which may be of
independent interest, is presented in Appendix~\ref{sec:monoappendix}.
\masect{Proof of the main result}
We shall now apply the monotonicity arguments for the last leg of the
proof of our main result. Theorem~\ref{th:main} is clearly implied
by the following statement (see Theorem~\ref{th:accpts}).
\begin{thm}
Let $\mu$ be a measure on the space of configurations $\O$, which
admits a representation as a random Poisson process, described by
a probability measure $\nu(d\rho)$ on $\M$ as in
Theorem~\ref{th:accpts}, for which both (\ref{eq:lapoisson1}) and
(\ref{eq:lapoisson2}) hold. Then the support of the Laplace
measure
$d\rho$ is $\nu$-almost surely a point, i.e., the
functions $R_{\rho}$ are almost-surely pure exponentials.
\end{thm}
\begin{proof} Let us consider the probability that the first gap
exceeds some $u>0$.
For a Poisson process, a simple calculation
yields:
\be \label{eq:uPoisson}
\E_{F}^{(Poisson)}( x_1 - x_2 \ge u) \ =\
\int_{-\infty}^{\infty} e^{-F(x-u) } \, (-d F(x)).
\end{equation}
Therefore
$$ \E_{\mu} (x_1 -x_2 \geq u) = \int \mu(d \omega) \int_{-\infty}^{\infty}
e^{- F(x-u)} ( - d F(x)).$$
Substituting this in (\ref{eq:lapoisson1}), or
in (\ref{eq:lapoisson2}), one obtains the corresponding
expectation
for the measure $\mu$. Subtracting the two expressions we find
that
\be \label{eq:1diff}
0 \ = \ \int_{\M} \nu(d\rho) \,
\left[ \int_{-\infty}^{\infty} e^{-R_{\rho}(x-u) } \, d
R_{\rho}(x) -
\int_{-\infty}^{\infty} e^{-R_{\rho}*g\, (x-u) } \, d
R_{\rho}*g\,(x) \right] \, .
\end{equation}
By the analysis in the previous section (Theorem~\ref{th:steeper} and
Lemma~\ref{lem:u}), the difference in the square brackets in
(\ref{eq:1diff}) is non-negative. Thus, this relation implies
that :
\begin{equation}
\int e^{-R_{\rho}(x-u) } \, d R_{\rho}(x) -
\int e^{-R_{\rho}*g\, (x-u) } \, d R_{\rho}*g\,(x) \ = \ 0 \,
\quad \mbox{ for $\nu$-almost every $\rho$.}
\end{equation}
Furthermore, by Lemma~\ref{lem:u} the equality yields that
$\nu$-almost
surely $R_{\rho}* g$ coincides with one of the
translates of $R_{\rho}$.
The only functions ($F=R_{\rho}$) with this property in $\F_{L}$
(or for that matter in $\F$, see ref.\cite{CD}) are pure
exponentials,
which correspond to $\rho$ concentrated at a point.
\end{proof}
\startappendix
\noindent {\bf \Large Appendix}
\maappendix{Useful statements from the theory of large deviations}
Our goal here is to derive Theorem~A.$1$ which was used in
Section~\ref{sec:Laplace}. Its statement may be read as an
expression of the `equivalence of ensembles' -- in statistical
mechanical terms. The following notation will be used in the Theorem.
$$ \Lambda(\lambda) \equiv \ln \E [e^{\lambda u_{1}}], \qquad
{\Lambda^*}(y) \equiv \sup_{\lambda}(\lambda y -
\Lambda(\lambda)).$$
The result we used in Section~\ref{sec:Laplace} is:
\noindent {\bf Theorem A.$1$}
{\it Let $u_{1}, u_{2}, \ldots $ be IID random
variables with a common probability distribution $g$, which has a density and
a finite everywhere moment generating function.
Then, for any $0< K < \Lambda'(\infty) $ and $0< \beta < 1/2$
there is $\eps_{\t;K,\beta} \too{\t \to \infty } 0$ such that
for all \ $q\in [E_g u \, , \, K ] $ and $|x| \leq
\t^{\beta}$:
\begin{equation}
\frac{\mbox{Prob}\left( \left\{ u_{1}+u_{2}+\ldots+u_{\t} \ge
x\, +\, q \, \t \right\} \, \right) \, }
{\mbox{Prob}\left( \left\{
u_{1}+u_{2}+\ldots+u_{\t} \ge q \, \t \right\} \,
\right)} \ = \
e^{-\eta x } \, [1 + O(\, \eps_{\t;K,\beta} ) \, ] \, ,
\label{eq:ratio1}
\end{equation}
with $\eta= \eta(q) $ determined by the condition $\eta(q) = {\Lambda^*}'(q)$.
}
\begin{proof}
We will assume that $E_g u = 0$, since we can replace the random variables $u_i$
by $u_i - E_g u$.
We will use the same notation as in the proof of the Bahadur-Rao
Theorem (see \cite{DZ}). We denote
\begin{eqnarray}
\eta(\frac{y}{\t}) &\equiv& {\Lambda^*}'(\frac{y}{\t}), \qquad
\Lambda'(\eta(\frac{y}{\t})) =
\frac{y}{\t}, \qquad
\psi_{\t} (\eta(\frac{y}{\t})) \equiv \eta(\frac{y}{\t})
\sqrt{\t \Lambda''(\eta(\frac{y}{\t}))}, \nonumber \\
Y_{i} &\equiv&
\frac{u_{i}-\frac{y}{\t}}{\sqrt[]{\Lambda''(\eta(\frac{y}{\t}))}}, \qquad
W_{\t} \equiv \frac{ Y_{1} + \ldots + Y_{\t}}
{\sqrt{\t}}, \nonumber
\end{eqnarray}
and consider a new measure $\tilde{P}$ defined by its Radon-Nikodym derivative
\begin{equation}\label{defoftildeP}
\frac{d \tilde{P}^{(\eta(\frac{y}{\t}))}}{d P} (x) = e^{x
\eta(\frac{y}{\t}) -
\Lambda(\eta(\frac{y}{\t}))}.\nonumber
\end{equation}
Let also $Q^{(\eta(\frac{y}{\t}))}_{\t}$ denote the distribution
function of $W_{\t}$ with
respect to
$\tilde{P}^{(\eta(\frac{y}{\t}))}$. It is easy to show then that
$Y_{i}$ are i.i.d. with mean $0$
and variance $1$ with respect to $\tilde{P}^{(\eta(\frac{y}{\t}))}$.
Therefore $W_{\t}$ is mean
$0$ and variance $1$ with respect to $Q^{(\eta(\frac{y}{\t}))}_{\t}$.
By analogy with the proof of Bahadur-Rao Theorem (see \cite{DZ}), we can write
\begin{equation}\label{eq:br2a}
P( u_{1} + \ldots + u_{\t} \geq y) =
e^{- \t {\Lambda^*} (\frac{y}{\t})}
\int _{0}^{\infty} e^{-\psi_{\t} (\eta(\frac{y}{\t})) t} d
Q^{(\eta(\frac{y}{\t}))}_{\t} (t).
\end{equation}
For the further consideration, we need to estimate the ratio
\begin{equation}\label{eq:br3}
\frac{P(u_{1}+u_{2}+\ldots+u_{\t} \geq x+y)}
{P(u_{1}+u_{2}+\ldots+u_{\t} \geq y)} =
e^{{- \t {\Lambda^*} (\frac{x+y}{\t})+\t {\Lambda^*} (\frac{y}{\t})}}
\frac{\int _{0}^{\infty} e^{-\psi_{\t} (\eta(\frac{x+y}{\t}))t}
d Q_{\t}^{(\eta (\frac{x+y}{\t}))} (t)}
{\int _{0}^{\infty} e^{-\psi_{\t} (\eta(\frac{y}{\t})) t} d
Q_{\t}^{^{(\eta (\frac{y}{\t}))}} (t)}.
\end{equation}
By using the Taylor's expansion we can estimate the exponent in equation (\ref{eq:br3})
\begin{equation}\label{eq:br4}
- {\Lambda^*} (\frac{x+y}{\t}) + {\Lambda^*} (\frac{y}{\t}) =
- \eta(\frac{y}{\t}) \left(\frac{x}{\t}\right)
+ O(\frac{1}{\t^{1-2\beta}}).
\end{equation}
Where to estimate the remainder term in (\ref{eq:br4}) we use the integral form of the remainder in Taylor series and that $\frac{y}{\t} \leq K$, $|x| \leq \t^{\beta}$, ${\Lambda^{*}}'' = \frac{1}{\Lambda''} < \infty$, convexity of $\Lambda$ and the assumption that the Laplace transform of $g$ is finite.
It remains to show that the prefactor in the equation (\ref{eq:br3}) is
\begin{equation}\label{defofrxy}
r (x,y) = \frac{\int _{0}^{\infty} e^{-\psi_{\t}
(\eta(\frac{x+y}{\t})) t} d Q^{(\eta
(\frac{x+y}{\t}))}_{\t} (t)}
{\int _{0}^{\infty} e^{-\psi_{\t} (\eta(\frac{y}{\t})) t} d
Q^{(\eta (\frac{y}{\t}))}_{\t} (t)} = 1 +
O (\ve _{\t}).
\end{equation}
By Berry-Esseen Theorem (see ~\cite{Fl}),
\[
\sup_x |Q^{(\eta)}_{\t} (x) - \int _{-\infty}^{x}
\frac{e^{-t^{2}/2}}{\sqrt{2 \pi}} dt|
\leq \frac{33}{4} \frac{\E u_{1}^{3}}{(\Var u_{1})^{3/2}}
\frac{1}{\sqrt{\t}} = O (\frac{1}{\sqrt{\t}}).
\]
Therefore
\begin{equation}\label{proofasymptotic2}
\int_{0}^{\infty} e^{-\psi_{\t} (\eta(\frac{x+y}{\t})) t} d Q^{(\eta
(\frac{x+y}{\t}))}_{\t} (t) = \int
_{0}^{\infty}\frac{e^{-\psi_{\t} (\eta(\frac{x+y}{\t}))
t-t^{2}/2}}{\sqrt[]{2 \pi}}
dt + O (\frac{1}{\sqrt{\t}})\left(\psi_{\t}\left(\eta(\frac{x+y}{\t})\right) + O(1)\right).
\end{equation}
This formula is especially useful when $\psi_{\t} \leq O (1)$
(i.e. when $\eta$ is small) and the first term on the
right hand side of~(\ref{proofasymptotic2}) is much larger than the
second term.
In this case we obtain
\begin{equation}\label{proofasymptotic3}
r (x,y) = \frac{\int _{0}^{\infty}
e^{-\psi_ {\t}(\eta(\frac{x+y}{\t})) t-t^{2}/2} dt + O
(\frac{1}{\sqrt{\t}})}
{\int _{0}^{\infty} e^{-\psi_{\t} (\eta(\frac{y}{\t})) t-t^{2}/2} dt + O
(\frac{1}{\sqrt{\t}})}.
\end{equation}
If $y$ is such that $O (1)
\leq \psi_{\t} \leq O ({\t}^{\frac{1}{2}})$, we write the integral
as $${\int _{0}^{\infty} e^{-\psi_{\t} (\eta(\frac{y}{\t})) t} d
Q^{(\eta (\frac{y}{\t}))}_{\t} (t)} = {\int _{0}^{\infty}
e^{-\psi_{\t} (\eta(\frac{y}{\t})) t} q^{(\eta (\frac{y}{\t}))}_{\t}
(t)} dt, $$ where $q_{\t}$ be the density of
$Q_{\t}.$ By the analog of
Berry-Esseen Theorem for densities (see \cite{Fl}):
\begin{equation}\label{proofasymptotic100}
\sup_x |q_{\t} (x) - \frac{1}{\sqrt{2 \pi}}e^{-x^{2}/2}|
= O (\frac{1}{\sqrt{\t}}), ~as~ \t \rightarrow \infty.
\end{equation}
From
(\ref{proofasymptotic100}) we obtain
\begin{equation}\label{proofasymptotic101}
r (x,y) = \frac{ \int _{0}^{\infty}
e^{-\psi_{\t} (\eta(\frac{x+y}{\t})) t-t^{2}/2} dt +
\frac{1}{\psi_{\t}(\eta(\frac{x+y}{\t}))} O (\frac{1}{\sqrt{\t}})}
{\int _{0}^{\infty} e^{-\psi_{\t} (\eta(\frac{y}{\t})) t-t^{2}/2} dt
+ \frac{1}{\psi_{\t} (\eta(\frac{y}{\t}))} O (\frac{1}{\sqrt{\t}})}.
\end{equation}
The proof of (\ref{defofrxy}) now consists of showing that
$$ \int_{0}^{\infty} e^{-\psi_{\t} (\eta (\frac{x+y}{\t}))t - t^2/2} dt -
\int_{0}^{\infty} e^{-\psi_{\t} (\eta (\frac{y}{\t})) t - t^2/2} dt \leq
O(\t^{-\ve}) \int_{0}^{\infty} e^{-\psi_{\t} (\eta (\frac{y}{\t}))t - t^2/2} dt.$$
\end{proof}
\maappendix{A class of monotone functionals over $\F_L$}
\label{sec:monoappendix}
Since the notion introduced in Section~\ref{sect:monotonicity}
may be of
independent interest, let us present here a related result, which
offers another instructive insight on the contraction
properties of convolutions in $\F$.
\noindent{\bf Theorem B.1 } {\em % \begin{thm}\label{th:U}
Let $F, G \in \F$ with $G$ {\em steeper} than $F$. Then,
for any positive and continuous function $\Psi$ on $[0,\infty)$
which vanishes at $0$ and $\infty$
\begin{equation}
\int_{-\infty}^{\infty} dt \ \Psi(\, G(t) \, ) \ \le \
\int_{-\infty}^{\infty} dt \ \Psi(\, F(t) \, )
\label{eq:Psi}
\end{equation}
Furthermore, if $\Psi$ is strictly positive on $(0, \infty)$,
and $G$ and $F$ are both left-continuous, then the inequality
is strict unless $G$ is a translate of $F$.
} % \end{thm}
\begin{proof}
By standard approximation arguments (e.g., using local
approximations by polynomials), it suffices to establish
eq.~(\ref{eq:Psi}) under the assumption that $\Psi$ is piecewise
strictly monotone.
Employing Fubini's lemma, or the Lebesgue's `layered cake'
formula for the integral,
\begin{equation}
\int_{-\infty}^{\infty} dt \, \Psi(\, F(t) \, ) \
= \ \int_{0}^{\infty} d \lambda \,
\int_{-\infty}^{\infty} dt \, I[ \Psi(\, F(t) \, )
\ge \lambda] \, .
\label{eq:Psi2}
\end{equation}
Under the added assumption on $\Psi$, the
set $\{ t \in \R\, : \, \Psi(\, F(t) \, ) \ge \lambda \} $
is a union of level-intervals of $F$, of the form
$\{ t \in \R\, : \, a_j(\lambda) \le F(t) \le b_j(\lambda) \} $
(with $\{ [ a_j(\lambda), b_j(\lambda)]\}_j$ determined as
the level sets of $\{\Psi(\cdot ) \ge \lambda\}$).
The integral over $t$ on the right side of
~(\ref{eq:Psi2}) produces the sum of
the lengths of the level-intervals of $F$.
When $F$ is replaced by $G$ the
corresponding intervals can only get shorter,
since $G$ is assumed to be steeper than $F$, and thus
equation~(\ref{eq:Psi}) holds.
In view of the above, the conditions for the {\em strict}
monotonicity sound reasonable. However, since the strict
monotonicity is very significant it may be instructive
to make the argument explicit.
(What follows makes make the argument given just above
redundant, however we keep it because of its simplicity.)
It is convenient to rearrange the above argument as
follows. Using our convention for the inverse function
\begin{equation}
\int_{-\infty}^{\infty} dt \, \Psi(\, F(t) \, ) \
= \int_{-\infty}^{\infty} dt(F) \, \Psi(F) \
= \int_{-\infty}^{\infty} d\, F^{-1}(a) \, \Psi(a) \, ,
\end{equation}
and thus
\begin{equation}
\int_{-\infty}^{\infty} dt \, \Psi(\, F(t) \, ) \ - \
\int_{-\infty}^{\infty} dt \, \Psi(\, G(t) \, )\
= \int_{-\infty}^{\infty} [d\, F^{-1}(a) - d G^{-1}(a)]\, \Psi(a)
\, ,
\label{eq:Psi3}
\end{equation}
where $d\, F^{-1}(a)$ and $d\, G^{-1}(a)$ are measures on $\R$.
The assumed relation (\ref{eq:steeper})
implies that the difference $d\, F^{-1}(a) - d\, G^{-1}(a)$ is
itself is a positive measure. The vanishing of its integral
against $\Psi$ is therefore possible only if this measure is
supported in the set $\Psi^{-1}(0)$, but that set
(viewed as set of values of the functions $F$ and $G$)
contains at most the boundary point $a=0$. It follows that
if the inequality (\ref{eq:Psi}) is saturated then the two
Stieltjes measures are equal in $(0,\infty)$, and thus
\begin{equation}
F^{-1}(a) - G^{-1}(a) \ = \ \rm{Const},
\label{eq:const}
\end{equation}
which means that $F$ and $G$ differ by a shift.
\end{proof}
This implies another monotonicity principle, which expresses
the fact that convolutions make functions in $\F_L$ steeper.
\noindent{\bf Corollary B.2 } {\em
For any function $F\in \F_L$ and a probability measure $g(x)dx$ on
$\R$,
\begin{equation}
\E_{g*F}^{(Poisson)}( x_{n}-x_{n+1} ) \ \le \
\E_{F}^{(Poisson)}( x_{n}-x_{n+1} ) \, , \quad \mbox{for all
$n \ge
1$}\, ,
\label{eq:0diff}
\end{equation}
and the inequality is strict unless either both quantities are
infinite, or
$F(x) = e^{-s(x-z)} $ for some $s>0$ and $z\in \R$.
}
\begin{proof}
The mean value of the gap may be computed with the help of
the expression:
\begin{equation}
x_{n}-x_{n+1} \ = \ \int_{-\infty}^{\infty}
\left\{I[ t>x_{n+1}] - I[ t>x_n] \right\} dt \, .
\label{eq:x-x}
\end{equation}
A simple calculation yields:
\begin{equation}
\E_{F}^{(Poisson)}( x_{n}-x_{n+1} ) \ = \
\int_{-\infty}^{\infty}
dt \, \Psi_{n}(F(t)) \,
\label{eq:3diff}
\end{equation}
with $\Psi_{n}(F) \equiv \frac{F(t)^n}{n!} e^{-F(t)} $.
Theorem~B.1 applies to such quantities.
\end{proof}
We did not base the proof of Theorem~\ref{th:accpts}
on this observation (i.e., use in Section~\ref{sect:monotonicity}
(\ref{eq:0diff})
instead of (\ref{eq:uPoisson}) )
since this argument is conclusive only when the above expected
value is know to be finite for some $n< \infty$,
and we preferred not to limit the proof by such an assumption
(and had no need to) .
\bigskip
\bigskip
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%
\noindent {\large \bf Acknowledgments\/}
We thank Pierluigi Contucci for many stimulating discussions in the
early part of this project.
A.R. would like to express her gratitude to Loren Pitt for his
invaluable help and for the financial support from
his grant during the course of this work while a Whyburn Research
Instructor at the University of Virginia, Larry
Thomas for the reading of an earlier draft of the manuscript and to
Almut Burchard, Holly Carley and Etienne DePoortere for useful
discussions. The work of M.A. was supported in part by NSF grant PHY-9971149.
\addcontentsline{toc}{section}{References}
\begin{thebibliography}{10}
\bibitem{MPV}
Mezard, M., Parisi, G., and Virasoro, M.A. (1987). {\em Spin Glass
Theory and Beyond}.
\newblock World Scientific Lecture Notes in Physics, Vol.9,
(Singapore).
\bibitem{D}
Derrida, B. (1980). ``Random-energy model: Limit of a family of
disordered models,''
{\em Phys. Rev. Lett.} {\bf 45}, 79--82.
\bibitem{DaVe} D.J.Daley, D.Vere-Jones, ``An introduction to the
Theory of Point Processes'', Springer Series in Statistics,
Springer-Verlag, 1998.
\bibitem{R}
Ruelle, D. (1987). ``A mathematical reformulation of Derrida's {REM}
and {GREM},'' {\em
Commun. Math. Phys.} {\bf 108}, 225--239.
\bibitem{Ex}
Leadbetter, M.R., Lindgren, G. and Rootz\'{e}n, H. (1983).
{\em Extremes, and Related
Properties of Random Sequences and Processes}.
\newblock (Springer-Verlag).
\bibitem{BS}
Bolthausen, E. and Sznitman, A.-S. (1998).
``On {R}uelle's probability cascades and an
abstract cavity method,'' {\em Commun. Math. Phys.} {\bf 197},
247.
\bibitem{L}
Liggett, T. (1979). ``Random invariant measures for Markov chains,
and independent particle systems,''
{\em Z.Wahrscheinlichkeitstherie} {\bf 45}, 297--854.
\bibitem{CD}
Choquet, G. and Deny, J. (1960). ``Sur l'\'equation de convolution
$\mu * \sigma = \mu$,''
{\em C.R. Acad. Sci. Paris} {\bf 250}, 799--801.
\bibitem{BR} Bahadur, R.R., and Rao, Ranga R. (1960): ``On
deviations of the
sample mean,'' {\it Ann. Math. Statis.} {\bf 31}, 1015 .
\bibitem{Fl} Feller, W. (1971): {\it An introduction to Probability
Theory and Its Applications}, v. 2 (Sect. XVI.4).
(2nd ed., Wiley series in Probability and Mathematical Statistics).
\bibitem{DZ} Dembo, A., Zeitouni, O. (1998). {\it Large deviations
techniques and applications}.
Applications of Mathematics (2nd ed., Springer-Verlag).
\end{thebibliography}
\end{document}
---------------0410121701147--
**