\documentstyle[12pt]{article}
\oddsidemargin -3mm % Remember this is 1
% inch less than actual
%\evensidemargin 7mm
\textwidth 17cm
\topmargin -9mm % Remember this is 1 inch less than actual
%\headsep 0.9in % Between head and body of text
\headsep 20pt % Between head and body of text
\textheight 23cm
\scrollmode
\begin{document}
\baselineskip=14pt
\parskip 2mm
%\input mssymb
\newcommand{\Bbb}{\bf}
\newcommand{\Z}{{\Bbb Z}}
\newcommand{\z}{{\Bbb Z}}
\newcommand{\zz}{{\Bbb Z}}
\renewcommand{\r}{{\Bbb R}}
\newcommand{\R}{{\Bbb R}}
\newcommand{\E}{{\Bbb E}}
\newcommand{\C}{{\Bbb C}}
\renewcommand{\P}{{\Bbb P}}
\newcommand{\N}{{\Bbb N}}
\newcommand{\var}{{\Bbb V}}
%\newcommand{\z}{{Z\kern-0.45emZ}}
%\newcommand{\r}{{I\kern-0.25emR}}
%\newcommand{\zz}{{Z\kern-0.30emZ}}
%\newcommand{\E}{{I\kern-0.25emE}}
%\newcommand{\Z}{{Z\kern-0.30emZ}}
%\renewcommand{\P}{{I\kern-0.25emP}}
%\newcommand{\R}{{I\kern-0.25emR}}
%\newcommand{\N}{{I\kern-0.25emN}}
%\newcommand{\var}{\mbox{Var}}
\title{Fluctuations of a surface submitted to \\ a random average process}
\author{{P.A. Ferrari \hspace{10 mm} L. R. G. Fontes}\\
Instituto de Matem\' atica e Estat\'\i stica,
USP }
\maketitle
\begin{abstract}
We consider a hyper surface of dimension $d$ imbeded in a $d+1$ space. For
each $x\in\z^d$, let $\eta_t(x)\in \R$ be the height of the surface at site
$x$ at time $t$. At rate $1$ the $x$-th height is updated to a random convex
combinations of the heights of the `neighbors' of $x$. The distribution of
the convex combination is translation invariant and does not depend on the
heights. This motion, named random average process (RAP), is one of the
linear processes introduced by Liggett (1985). Special cases of RAP are a
type of smoothing process (when the convex combination is deterministic) and
the voter model (when the convex combination concentrates on one of the
neighbors chosen at random). We start the heights located on a
hyperplane passing through the origin but different from the trivial one
$\eta(x)\equiv 0$. We show that when the convex combination is neither
deterministic nor concentrating on one of the neighbors the variance of the
height at the origin at time $t$ is proportional to the number of returns to
the origin of a symmetric random walk of dimension $d$. Under mild
conditions on the distribution of the random convex combination, this gives
variance of the order of $t^{1/2}$ in dimension $d=1$, $\log t$ in dimension
$d=2$ and uniformly bounded in $t$ in dimensions $d\ge 3$. We also show that
for each initial hyperplane the process as seen from the height at the
origin converges to an invariant measure on the hyper surfaces conserving
the initial asymptotic slope. The height at the origin satisfies a weak law
of large numbers and a central limit theorem. To obtain the results we use a
corresponding probabilistic cellular automaton, for which similar results
are derived. This automaton corresponds to the product of (infinitely
dimensional) independent random matrices whose lines are independent.
\end{abstract}
\vskip 3mm
\noindent{\bf Keywords:}
random average process, random surfaces, product of random
matrices, linear process, voter model, smoothing process.
\vskip 3mm
\noindent{\bf AMS Classification numbers: } 60K35, 82C
\newtheorem{defin}{Definition}[section]
\newtheorem{Prop}{Proposition}
\newtheorem{teo}{Theorem}
\newtheorem{prop}{Proposition}[section]
\newtheorem{lem}{Lemma}[section]
\newtheorem{rmk}{Remark}[section]
\newtheorem{cor}{Corollary}[section]
\renewcommand{\theequation}{\thesection .\arabic{equation}}
\newcommand{\bprop}{\begin{prop}}
\newcommand{\eprop}{\end{prop}}
\newcommand{\bteo}{\begin{teo}}
\newcommand{\bcor}{\begin{cor}}
\newcommand{\ecor}{\end{cor}}
\newcommand{\eteo}{\end{teo}}
\newcommand{\brm}{\begin{rmk}}
\newcommand{\erm}{\end{rmk}}
\newcommand{\blem}{\begin{lem}}
\newcommand{\elem}{\end{lem}}
\newcommand\be{\beta}
\newcommand\vep{{\varepsilon}}
\newcommand\de{{\delta}}
\newcommand\la{{\lambda}}
\newcommand\ga{{\gamma}}
\newcommand\Ga{{\Gamma}}
\newcommand\si{{\sigma}}
\newcommand\ze{{\zeta}}
\newcommand\li{1/\alpha}
\newcommand{\cov}{\mbox{Cov}}
\newcommand{\uni}{u_{n,i}}
\newcommand{\ukn}{u_{n,k}}
\newcommand{\wnk}{\theta_{n}(k)}
\newcommand{\wnl}{\theta_{n}(l)}
\newcommand{\ukmn}{u_{n,k-2}}
\newcommand{\ukpn}{u_{n,k+2}}
\newcommand{\unl}{u_{n,l}}
\newcommand{\wn}{D_n}
\newcommand{\wnm}{D_{n-1}}
\newcommand{\twnm}{\tilde D_{n-1}}
\newcommand{\wz}{D_0}
\newcommand{\twz}{\tilde D_0}
\newcommand{\wu}{D_1}
\newcommand{\twu}{\tilde D_1}
\newcommand{\twn}{\tilde D_n}
\newcommand{\tw}{\tilde D_0}
\newcommand{\txo}{\bar X_0}
\newcommand{\xo}{X_0}
\newcommand{\yi}{y_i}
\newcommand{\yn}{Y_n}
\newcommand{\ynu}{Y_{n-1}}
\newcommand{\tyi}{\bar y_i}
\newcommand{\tyj}{\bar y_j}
\newcommand{\tyju}{\tilde Y_{j-1}}
\newcommand{\tyiu}{\bar y_{i-1}}
\newcommand{\twi}{\tilde D_i}
\newcommand{\twnk}{\tilde D_{n-k}}
\newcommand{\twk}{\tilde D_k}
\newcommand{\twij}{\tilde D_{i-j}}
\newcommand{\twj}{\tilde D_j}
\newcommand{\twnu}{\tilde D_{n-1}}
\newcommand{\twnp}{\tilde D_{n+1}}
\newcommand{\tdn}{\tilde D_n}
\newcommand{\wi}{D_i}
\newcommand{\wnu}{D_{n-1}}
\newcommand{\vki}{v_{k,i}}
\newcommand{\unk}{u_{n-k,i}}
\newcommand{\uny}{v_{n,Y_{n-1}}}
\newcommand{\an}{\alpha_n}
\newcommand{\anu}{\alpha_{n-1}}
\newcommand{\tun}{{\cal W}_n}
\newcommand{\fn}{{\cal F}_n}
\newcommand{\tui}{{\cal W}_i}
\newcommand{\tzn}{\tilde z_n}
\newcommand{\tezn}{\tilde Z_n}
\newcommand{\tczn}{\tilde {\cal Z}_n}
\newcommand{\tyn}{\tilde Y_n}
\newcommand{\hyn}{\hat Y_n}
\newcommand{\tynu}{\tilde Y_{n-1}}
\newcommand{\hynu}{\hat Y_{n-1}}
\newcommand{\hyju}{\hat Y_{j-1}}
\newcommand{\teznu}{\tilde Z_{n-1}}
\newcommand{\beq}{\begin{equation}}
\newcommand{\eeq}{\end{equation}}
\newcommand{\beqn}{\begin{eqnarray}}
\newcommand{\beqnn}{\begin{eqnarray*}}
\newcommand{\eeqn}{\end{eqnarray}}
\newcommand{\eeqnn}{\end{eqnarray*}}
\newcommand{\e}{{\cal E}}
\renewcommand{\k}{(k_1,k_2)}
\newcommand{\one}{{\bf 1}}
\renewcommand{\+}{&&+}
\renewcommand{\=}{&=&}
\renewcommand{\le}{&\leq&}
\renewcommand{\ge}{&\geq&}
\newcommand\munk{\mu_{n,k}}
\newcommand\muik{\mu_{i,k}}
\newcommand\uonk{u_{n,k}^1}
\newcommand\vonk{v_{n,k}^1}
\newcommand\utnk{u_{n,k}^2}
\newcommand\vtnk{v_{n,k}^2}
\newcommand\eo{e_1}
\newcommand\et{e_2}
\newcommand\muo{\mu_1}
\newcommand\mut{\mu_2}
\newcommand\xn{X_n}
\newcommand\xiu{X_{i-1}}
\newcommand\vnk{v_{n,k}}
\newcommand\uiy{u_{i,\yiu}}
\newcommand\viy{v_{i,\yiu}}
\newcommand\yiu{Y_{i-1}}
\newcommand\wiu{W_{i-1}}
\newcommand\uik{u_{i,k}}
\newcommand\vik{v_{i,k}}
\newcommand\uil{u_{i,l}}
\newcommand\vil{v_{i,l}}
\newcommand{\Fi}{{\cal F}_i}
\newcommand{\fiu}{{\cal F}_{i-1}}
\newcommand{\wik}{\theta_{i,k}}
\newcommand{\wjl}{\theta_{j,l}}
%\newcommand{\tyiu}{\tilde y_{i-1}}
\newcommand{\tcyiu}{\tilde Y_{i-1}}
\newcommand{\tyjux}{\tilde Y_{j-1}^x}
\newcommand{\tyiux}{\tilde Y_{i-1}^x}
%\newcommand{\wi}{D_i}
\newcommand{\wis}{D_i^x}
\newcommand{\wius}{D_{i-1}^x}
\newcommand{\tynx}{\tilde Y_n^x}
\newcommand{\tynux}{\tilde Y_{n-1}^x}
\newcommand{\hzn}{\hat Z_n}
\newcommand{\hznu}{\hat Z_{n+1}}
\newcommand{\hzf}{\hat Z_\infty}
\newcommand{\bzn}{\bar Z_n}
\newcommand{\bznu}{\bar Z_{n+1}}
\newcommand{\txui}{{\cal W}^x_i}
\newcommand{\tcui}{\tilde{\cal W}_i}
\newcommand{\tcun}{\tilde{\cal W}_n}
\newcommand{\fju}{{\cal F}_{j-1}}
\newcommand{\pn}{{\cal P}_n}
\newcommand{\p}{{\cal P}}
\newcommand{\ajl}{A_{j,l}^n}
\newcommand{\ajk}{A_{j,k}^n}
\newcommand{\pj}{p_j}
\newcommand\proof{\noindent{\bf Proof. }}
\newcommand\square{\ifmmode\sqr\else{$\sqr$}\fi}
\newcommand\sqr{\vcenter{
\hrule height.1mm
\hbox{\vrule width.1mm height2.2mm\kern2.18mm\vrule width.1mm}
\hrule height.1mm}} % This is a slimmer sqr.
\newcommand\given{\ \vert\ }
\section{Introduction}
\setcounter{equation}{0}
\label{sec:int}
We consider a stochastic process $\eta_t$ in $\r^{\z^d}$. At each site
$i\in \z^d$ at each time $t$ corresponds a height $\eta_t(i)$. These
heights evolve according to the following rule. For each $i\in \z^d$
let $U(i,\cdot)$ be a random probability distribution on $\z^d$. Each
height has an independent Poisson clock of parameter $1$. When the
clock rings for site $i$ at time $t$, a (independent of everything)
realization $u(i,\cdot)$ of the random probability is chosen and then
the height at site $i$ moves to the position
$$
\sum_{j\in \z^d} u(i,j) \eta_t(j).
$$
In other words, at rate one each height moves to a random convex
combination of the other heights. The weights of this convex
combination are chosen independently at each time. We call this
process random average process (RAP). The motion is well defined
under suitable conditions on the distributions of $u$ and $\eta_0$.
The formal generator is given by
\beq
\label{eq:L}
Lf(\eta) = \sum_i\int dm(u(i)) [ f(u(i)\eta) - f(\eta)]
\eeq
where $u(i)$ is an operator that puts in the $i$-th
coordinate a convex combination of the other coordinates:
\beq
\label{eq:ui}
(u(i)\eta)(k) = \cases {\eta(k) &if $k\ne i$ \cr
\sum_j u(i,j)\eta(j) &if $k=i$\cr}
\eeq
and $m$ is the distribution of $u(i)$. If we interpret $u(i)$ as the matrix
$a$ with entrances
$$
a(k,j)= \cases { 1 & if $j=k\neq i$\cr
u(i,j) & if $k=i$ \cr
0& otherwise}
$$
then $u(i)\eta$ can be interpreted as $a\eta^\ast$, the product of the matrix
$a$ and $\eta^\ast$, the transpose of the vector $\eta$\footnote{Elsewhere
in the paper, ${}^\ast$ will have the same meaning.}. The RAP is a special
case of the linear processes of chapter IX of Liggett (1985).
Particular cases of this process are one of the smoothing processes
(Andjel (1985), Liggett and Spitzer (1981) and Liggett (1985)) and the
voter model (Liggett (1985) and Durrett (1996) for a recent review).
To obtain the smoothing process one imposes the distribution of the
matrix $u$ to concentrate mass on a constant matrix that does not
change with time. In the above papers a ``more random'' smoothing
process was studied: at each realization of the Poisson process, the
random convex combination is multiplied by an independent positive
random variable $W$ of mean 1, but the initial configurations were
restricted to the positive real numbers. Questions about existence and
ergodicity of the system when the initial heights are positive were
considered. It would be interesting to study the ergodicity questions
for the case of general initial conditions. One crucial difference
between the processes studied in the above papers and the RAP is that
when the configurations are restricted to being positive and the
process is studied from a fixed framework, the non trivial realization
of the process remain bounded. This is the reason why it makes sense
to multiply by a random variable with mean 1. In contrast, the typical
configurations of the RAP grow with time and are localizable only from
a moving frame of reference (the height at the origin, for
instance). In this case it does not make sense to multiply the
absolute weights by a random variable.
To obtain the voter model we consider $u(i,\cdot)$ as a
random probability vector for which only one of the coordinates $j$ is
different from zero --and hence equal to 1. To get the usual voter
model one has to restrict the initial configurations to those assuming
for instance only two values. But the question of the behavior of the
system when infinitely many initial values are allowed is of interest.
In this paper we will concentrate on the other cases, but some marginal
results will concern the smoothing process and the voter model.
The starting configuration is a hyperplane passing through the origin: for a
given vector $\lambda\in \R^d$ we set $\eta_0(i) = i\lambda^\ast$,
the (matrix)
product of $i$ and the transpose of $\lambda$ (this is the internal product).
Notice that if $\lambda\equiv
0$, then $\{\eta_t \equiv 0\}$ because this initial configuration is
invariant for the system. We assume that the distribution of $u(i,j)$ is
translation invariant: $u(i,j)$ and $u(i+k,j+k)$ have the same distribution.
Our main result is to show that when the initial configuration is a
hyperplane, the variance of the height at the origin at time $t$ is
proportional to
the expected numbers of returns to the origin of a symmetric random walk.
Denoting $\var$ as the variance, we show that
\beq
\label{eq:v=d}
\var\eta_t(0)= (\si^2+(\mu\lambda^\ast)^2)\int_0^t \P(D_s=0\vert D_0=0)ds
\eeq
where $D_t$ is a (symmetric) random walk with transition rates
$$
q(\ell,k)= \sum_j\E(u(0,j)u(\ell,j+k)),
$$
$\mu= \E\left(\sum_j j u(0,j)\right)$ and $\sigma^2
= \var\left(\sum_j j\la^\ast
u(0,j)\right)$. By definition, when the initial configuration is a
hyperplane, it does not have any spatial fluctuation. In this case for
$\sigma^2>0$, the asymptotic variance of the height at the origin is of the
same order for both biased ($\mu\neq 0$) and unbiased
($\mu=0$) distributions of the convex combinations. If the initial
configuration is distributed according to a measure with spatial fluctuations,
one expects the biased and unbiased cases to have different asymptotic
variances: normally the height at the origin in the biased case will pick the
spatial fluctuations of the initial configuration. We show this difference
with one example in the one dimensional case ($d=1$). We do not know how to
treat the general case when the initial configuration is random.
Let $\vert j \vert = \sqrt{jj^\ast}$, the $L_2$-norm of $j\in\Z^d$.
In the case of finite second moment, {\it i.e.} when $\sum_j \vert j\vert^2 \E
u(0,j)<\infty$ our result implies that starting with the hyperplane, the
fluctuations of the height at the origin behave asymptotically as follows: for
$\si^2+(\mu\lambda^\ast)^2>0$,
\beq
\label{eq:vet}
\var\eta_t(0) \hbox { is of the order of } \cases{ \sqrt {t} &{if
$d=1$}\cr \log t &{if $d=2$}\cr
\hbox{constant} & if $d\geq 3$\cr}
\eeq
where $\var$ is variance. Here the phrase ``$f(t)$ is of the order of
$g(t)$''
means that we can find positive constants $A$ and $B$ such that $Ag(t) \leq
f(t) \leq Bg(t)$ for all $t\geq 0$. We show a central limit theorem for
$\eta_t(0)$ for the deterministic hyperplane initial configuration. In one
example, in the one dimensional case we show it for a random
initial configuration.
The nearest neighbor one dimensional case is isomorphic to a conservative
nearest particle system studied before by the authors (unpublished). In this
case, the position of the height at the origin at time $t$ can be interpreted
as the position of a tagged particle at the same time in the conservative
nearest particle system. At the end of Section 6 we describe this
correspondence and correct a statement of Ferrari (1996) about the asymptotic
behavior of the tagged particle.
We show that starting with a hyperplane the distribution of the height
difference $(\eta_t(i)-\eta_t(j))$ converges in probability to a limiting
random variable with expectation $\eta_0(i) - \eta_0(j)$, for all $i$, $j$. In
particular this implies that the process starting with a hyperplane converges
weakly to an invariant measure. Furthermore we show that, under the limiting
invariant measure the slope is conserved, that is
$\E(\eta(i) - \eta(j)) = \eta_0(i) - \eta_0(j)$, and the variance of the
differences has the following asymptotic behavior: for
$\si^2+(\mu\lambda^\ast)^2>0$
\beq
\label{eq:vem}
\var[\eta(i) - \eta(j)]
\hbox { is of the order of }
\cases{
{\vert i-j\vert} &{if $d=1$}\cr \log \vert i-j\vert &{if $d=2$}\cr
\hbox{constant} & if $d\geq 3$\cr}
\eeq
To prove the above in $d=2$ we need a
stronger condition: that a second-plus-delta moment is finite: $\sum_j
\vert j\vert ^{2 {+} \delta} \E
u(0,j)$ $<\infty$ for some $\delta>0$. Unless for a few one
dimensional cases, we are unable to further describe this
measure. Probably it
is a Gibbs measure, but there are no reasons to expect that the measure is in
general reversible for this process. Also it would be nice to show that the
limiting measure obtained when the process starts with a hyperplane is the
unique invariant measure with the slope of the initial hyperplane.
In dimension $d=1$, if $u(i,i+1)=1-u(i,i-1)$, then the heights remain ordered
if started so and form a point process. Moreover, if $u(i,i+1)$ is uniform in
$[0,1]$, for each $\la>0$, the Poisson process of parameter $\la$ is
reversible for the system. There are other cases of reversibility for suitable
choices of $u$, but in general this process is not reversible and the Poisson
process is not necessarily invariant. For instance in the nearest neighbor one
dimensional case if $u(i,i+1)\equiv 1/2$ (this is a smoothing process), the
measures concentrating mass in configurations for which the distances between
successive neighboring heights are constant are translation invariant and
invariant for the process. Are these all the measures in this simple case? For
other choices of $U$, the ordering is not necessarily maintained and the
invariant measures may be difficult to find.
The nearest neighbors one dimensional process has been studied by Kipnis,
Presutti and Marchioro (1982) for $u(i,i+1)=1-u(i,i-1)$ uniform in $[0,1]$.
They consider $L$ differences between successive heights moving according to
the random average process. They put different boundary conditions in the
extremes of the box so that to force a gradient in the difference height. In
the limit as $L\to\infty$ they show that the difference height profile in the
normalized box (so that the length is kept to be 1), adopts a linear form
interpolating between the boundary conditions. This implies that the
hydrodynamic limit of the difference heights corresponds to a heat equation.
If $u(i,i+k)$ and $u(i,i-k)$ have the same distribution, then the hydrodynamic
equation for the RAP is
$$
\left({\partial \Phi \over \partial t}\right)_h
= D_h \left(\Delta \Phi\right)_h
$$
where $D_h= \sum_j \vert j_h \vert u(0,j)$ and $(\cdot)_h$ is the $h$-th
coordinate of the vector $(\cdot)\in \Z^d$. Here $\Phi(r,t)$ represents the
height of the surface at the macroscopic time $t$ at the macroscopic position
$r$. The equation is obtained when the time in the RAP is scaled as the square
of the space.
The main tool to study this process is duality. The graphical construction of
the RAP is realized as in the usual Harris construction of particle systems:
a (independent of everything) Poisson processes of parameter 1 is attached to
each site $i\in \z^d$.
To each mark of this Poisson process a (independent of everything)
realization of $u(i,\cdot)$ is
attached. If at time $t$ a Poisson mark appears at site $i$ and $u(i,\cdot)$
is the corresponding realization of the weights, then at time $t$ the new
configuration is $u(i)\eta_{(t^-)}$, where $u(i)$ was defined in
(\ref{eq:ui}). This Harris construction works if $u$ has finite
range. Durrett (1995) uses a percolation argument to show that there
exists a (small) time $t_0$ such that $\Z^d$ is partitioned in random
finite subsets with no interaction among them in the interval
$[0,t_0]$. Hence the graphical construction can be performed in each
of these finite sets. Repeating the argument
between $t_0$ and $2t_0$ and so on, the process is constructed. The same
argument should work under reasonable decay conditions on
the distribution of $u$, but one can avoid this by using the almost sure
duality property of this process as we see below.
Define a random walk $\tilde Y_t\in \Z^d$ using the same Poisson marks and
realizations of $u(i,\cdot)$ used in the construction of $\eta_t$. The motion
is the following: if at time $t^-$ it is $\tilde Y_{t^-}=i$, a Poisson
mark of site $i$
appears at time $t$ and $u(i,\cdot)$ is the realization of the weights
corresponding to this mark, then $\tilde Y_t$ will be $j$ with probability
$u(i,j)$. Hence $\tilde Y_t$ has (average) rates
\beqn
\nonumber
\P(\tilde Y_{t+h} = j\given \tilde Y_t = i) = h \E u(i,j) +o(h)
\eeqn
Call ${\cal F}_t$ the sigma algebra generated by the Poisson marks
and the realization of the $u$'s on these marks between $0$ and $t$.
Notice that $\tilde Y_t$ and $Y^{0,t}_t$ have the same distribution. When
conditioned to ${\cal F}_t$, $\tilde Y_t$ can be seen as a ``random walk in a
space-time random environment''. Provided
that the Harris construction is feasible,
the following duality relation holds almost surely:
\beqn
\label{eq:dua}
\eta_t(x) = \E(\eta_0(Y^{x,t}_t)\given {\cal F}_t)
\eeqn
where $Y^{x,t}_s$, $0\leq s \leq t$ is the walk following the marks from $t$
to $0$ backwards in time starting at the site $x$. Another possibility is to
take
(\ref{eq:dua}) as the definition of the process and then to show that the
resulting process has the
desirable infinitesimal properties. This can be done if (\ref{eq:dua}) is
finite for all $t$. Actually we construct the process as a limit of a
sequence of discrete time processes. It turns out that a sufficient condition
for the existence of the process is that there exists a constant $C$ such that
$\E \eta_0(j) \leq C |j|^2$. We discuss this in Sections 2 and
7.
When $u$ is constant, that is, there exists a stochastic matrix $a$ such that
$\P(u=a) = 1$, we get
$$
\eta_t(x) = \E(\eta_0(Y^{x,t}_t)).
$$
This corresponds to the potlatch process which is exactly the dual of the
smoothing process. When $U$ is not allowed to have more than one positive
coordinate ---equal to one and the others equal to zero--- $Y_t$ is just
ordinary random walk with transition probabilities given by the distribution
of $u$. Hence in this case
$$
\eta_t(x) = \eta_0(Y^{x,t}_t)
$$
This corresponds to coalescing random walks, the dual of the voter model.
To show the above results we prefer to introduce a probabilistic cellular
automaton (PCA). The time of the PCA is discrete and at all times each site
chooses an independent random convex combination of the heights at the
neighboring sites and updates its height to this new one. If we denote $X_n$
the vector indexed by $\Z^d$ of the heights at time $n$ and $u_n(\cdot,\cdot)$
a random sequence of iid stochastic matrices, then $X^\ast_n = (\prod_{k=1}^n
u_{n-k}) X^\ast_0$. The matrices $u_m$ have also the property that
$\{u_m(i,\cdot):i\in\Z^d\}$ is a family of iid random vectors. The results
described above are shown for this PCA and then a standard approximation of
the particle system by a family of PCA's is performed.
In Section 2 we introduce the PCA and prove in Proposition 2.3 the discrete-time
version of (\ref{eq:v=d}). In Section 3 we prove some estimates for the
discrete version of the random walk $D_t$ which lead to the asymptotics
(\ref{eq:vet}). In Section 4 we show the central limit theorem for the
discrete version of $\eta_t(0)$ starting with the hyperplane. In Section 5 we
show that the process starting with the hyperplane converges to an invariant
measure with the same slope as the hyperplane and that under this invariant
measure, the fluctuations between the heights at distant sites behave as in
(\ref{eq:vem}). In Section 6 we discuss the one dimensional case and show
that, when the initial distribution of heights differences is an ergodic
stationary process with fluctuations, these fluctuations appear in the
variance of the height at the origin when the process is biased.
In Section 7 we discuss the passage from the discrete to the
continuous case and in Section 8 we discuss the hydrodynamic limit.
%%%%%%%%%%%%%%%%%% section 2 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Discrete time: Probabilistic cellular automaton}
\setcounter{equation}{0}
\label{sec:disc}
The process we study in this Section is a discrete time system or
probabilistic cellular automaton whose configurations belong to $\r^{\z^d}$.
Under this evolution at time $n$ each height chooses a random convex
combination of the heights of its neighboring heights at time $n-1$ and jumps
to this new height. Let $(X_n)_{n\geq0}$ denote the height system. Let
$\{u_n(i,i+\cdot),\,n\geq1,\,i\in\z^d\}$
be an i.i.d.\/ and independent of $X_0$
family of random probability vectors distributed in $[0,1]^{\z^{d}}$. That is,
for all $i,j\in\z^d$ $u(i,j)\in [0,1]$, $\P(\sum_j u_n(i,j)=1)=1$.
Define
$$
\pi(j)=\E u_n(0,j)
$$
and assume
\beqn
\label{eq:2m}
\sum_j \vert j\vert^2\pi(j)<\infty,
\eeqn
where $\vert j\vert = \sqrt{jj^\ast}$. To avoid
dealing with aperiodicity, assume that $\pi(x)>0$ for $|x|\leq1$.
For $n\geq1$ and $i\in\z^d$ we define $X_{n}(i)$ as
\beqn
\label{eq:def}
X_{n}(i)= \sum_j u_n(i,j)X_{n-1}(j).
\eeqn
This may define a degenerate
random variable. In the case $u(i,j) = 0$ if $\vert i-j \vert >M$ for some $M$
(finite range), (\ref{eq:def}) defines a honest Markov process on
$\{0,1\}^{\Z^d}$. After introducing the concept of duality below we give
sufficient conditions for (\ref{eq:def}) to define a honest Markov process
under our hypothesis. Definition (\ref{eq:def}) means that the transpose of
the height process at time $n$ is the multiplication of $n$ infinite
dimensional independent identically distributed random matrices times the
transpose of the heights vector at time $0$:
$$
X_n^\ast = \left(\prod_{k=1}^n u_{n-k}\right) X_0^\ast
$$
Notice that not only the matrices are independent, but the rows inside each
matrix are independent too.
The height at the origin at time $n$, $X_n(0)$, can be expressed as the
expectation of a function of a simple random walk in (space--time) random
environment. To our probability space attach a family $\{w_n(i):n\geq 0,
i\in\z^d\}$ of iid random variables uniformly distributed in $[0,1]$ and
independent of everything else. For $1\leq k\leq n$, let $Y_k$ denote the
position at time $k$ of a random walk in $\z^d$ running up to time $n$,
starting at the origin defined by $Y^{x,n}_0=x$ and for $1\leq k \leq n$:
$$
Y^{x,n}_{k} = \sum_j j\one\{w_{k}(Y_{k-1}) \in I_{n-k}(Y_{k-1},j)\},
$$
where for each $i\in \z^d$,
$\{I_k(i,j):j\in \z^d\}$ is a (random) partition of the interval $[0,1]$ with
lengths $\vert I_k(i,j) \vert = u_{k}(i,j)$. The process $Y^{0,n}_n$ is a
random walk with transition probabilities
$$
\P(Y^{x,n}_{k}=j|Y^{x,n}_{k-1}=i) = \E u_{n-k}(i,j)=\pi(j-i)
$$
Let ${\cal F}_n$ be the $\sigma$-algebra generated by $\{u_m(i,j):
i,j\in \z^d, 0\leq m\leq n\}$. Conditioned to ${\cal F}_n$, $Y^{x,n}_k$ has
the following transition probabilities
$$
\P(Y^{x,n}_{k}=j|Y^{x,n}_{k-1}=i,{\cal F}_n)
=u_{n-k}(i,j):=v_{k-1}(i,j)
$$
In words, conditioned to ${\cal F}_n$, the walk at time $k$ jumps from
$i$ to $j$ with probability $v_k(i,j)$. Since this walk uses the
$u_n(i,j)$ backwards in time, we will call $Y^{x,n}_n$ the backwards walk.
\proclaim {Lemma 2.1}. (Duality formula) For any initial configuration $X_0$
such that $\vert X_0(i)\vert <\infty$ for all $i$ and for all $n$ it holds
$$
X_n(x)=\E(X_0(Y^{x,n}_n)\vert {\cal F}_n).
$$
In particular, if $X_0(i)= i\lambda^\ast$ for some vector $\lambda$, where
$\lambda^\ast$ is the transpose of $\lambda$, then
$$
X_n(x) = (\E(Y^{x,n}_n\vert {\cal F}_n))\lambda^\ast
$$
\proof
Notice that ${\cal F}_n$ is also the $\sigma$-algebra generated by
$\{v_{m,i}: i\in \z, 0\leq m\leq n\}$. Let us compute $X_2(0)$:
\beqnn
\nonumber
X_2(0)\= \sum_j u_2(0,j) X_1(j) \\
\nonumber
\= \sum_j u_2(0,j)\sum_k u_1(j,k) X_0(k) \\
\nonumber
\= \E (X_0(Y^{0,n}_2)\vert {\cal F}_2).
\eeqnn
An induction argument works for the general case. \square
The duality formula gives an easy criterium for the existence of the process:
\proclaim {Corollary}. A sufficient condition for (\ref{eq:def}) to define a
honest Markov process is that for all $n\geq 0$
$$
\E \vert X_0(Y_n)\vert <\infty
$$
where $Y_n = Y^{0,n}_n$.
\proof The hypothesis plus the duality relation implies that $\E \vert X_n(0)
\vert <\infty$. To see that this holds for the other $x$, write
$$
\E \vert X_n(x) \vert = \E\vert \E X_0(Y^{x,n}_n)\vert {\cal F}_n)\vert \leq
\E\vert X_0(Y^{x,n}_n)\vert = \E \vert X_0(Y_n+x)\vert
$$
by translation invariance.
Conditioning on the first jump of $Y_n$ we get
$$
\E \vert X_0(Y_n)\vert \geq \E u(0,x)\E \vert X_0(Y_{n-1}+x)\vert
$$
for any $x$. But we have assumed that for $|x|= 1$ $\E u(0,x)=\pi(x)>0$, hence $\E \vert
X_0(Y_{n-1}+x)\vert<\infty$ for $|x|= 1$. Using induction on $k=||x||_1$
we prove that $\E \vert X_0(Y_n)\vert\geq C(x) \E \vert X_0(Y_{n-k}+x)|$ where
$C(x)$ is positive. This proves the Corollary. \square
The corollary allows us to give a sufficient condition for our case. If
$\sum_j \vert j\vert^2\pi(j)<\infty$,
then $\E|Y_n|^2<\infty$. Hence, a sufficient
condition for the existence of the process under this hypothesis is that
\beqn
\label{eq:sufcon}
\E\vert X_0(j) \vert\leq C \vert j\vert^2
\eeqn
for some positive constant $C$.
Let $Z_n = \E(Y^{0,n}_n\vert {\cal F}_n)$. For each fixed $n$, $Z_n$ has the
same distribution as $\tilde Z_n$, the position at time $n$ of the
(forward) process defined by
$$
\tilde Z_n = \E(\tilde Y_n\vert {\cal F}_n)
$$
where $\tilde Y_n$ is a (forward) random walk in a random environment
given by
$\tilde Y_0=0$ and for $k\geq 1$,
$$
\tilde Y_{k+1}
= \sum_j j\one\{w_k (\tilde Y_{k})\in I_{k}(\tilde Y_{k},j)\}
$$
where $w_n(i)$ is the same sequence of iid random variables uniformly
distributed in $[0,1]$ defined above and $\{I_{k}(i,j):j\}$ is the
partition of the interval $[0,1]$ with lengths
$\vert I_{k}(i,j) \vert =u_{k}(i,j)$ defined above. Given ${\cal
F}_n$, the conditional probabilities for this walk are
\beqnn
\nonumber
&&\P(\tilde Y_{k+1}= j|\tilde Y_{k}=i,{\cal F}_{n})= u_k(i,j)\\
\eeqnn
for $1\leq k \leq n$. Let
$$
\tilde Z_n = \E(\tilde
Y_n\vert {\cal F}_n)
$$
We have
$$
\E\tilde Z_n = \E\tilde Y_n = n \sum_j j\pi(j):= n\mu.
$$
\proclaim Lemma 2.2. The process $\tilde Z_n -\mu n$ is a martingale with respect
to the filtration $\{{\cal F}_n: n\geq 0\}$.
\proof
We have, for $n\geq1$,
\beqnn
\nonumber
\tezn\=\sum_k \E[\tilde Y_{n}|\tilde Y_{n-1}=k,{\cal F}_{n}]
\P(\tilde Y_{n-1}=k|{\cal F}_{n})\\
\nonumber
\=\sum_k \sum_j j u_n(k,j)
\P(\tilde Y_{n-1}=k|{\cal F}_{n-1})\\
\nonumber
\=\sum_kk\P(\tilde Y_{n-1}=k|{\cal F}_{n-1})+\sum_k\sum_j (j-k) u_n(k,j)
\P(\tilde Y_{n-1}=k|{\cal F}_{n-1})\\
\nonumber
\=\teznu + \tun,
\eeqnn
where $\tun=\E(\theta_{n}(\tilde Y_{n-1})|{\cal F}_{n})$, with
$$
\theta_n(k)=\sum_j (j-k)u_n(k,j)\in \Z^d
$$
for all $n$ and $k$.
Iterating, we get
\beq
\label{eq:mart1}
\tilde Z_{n}=\sum_{i=1}^n\tui.
\eeq
Now,
\beqn
\nonumber
\E(\tui\vert{\cal F}_{i-1} )
\nonumber
\=\sum_k \sum_j \E[(j-k)u_i(k,j)\vert \tilde Y_{i-1}=k,{\cal F}_{i-1}]
\P(\tilde Y_{i-1}=k|{\cal F}_{i-1})\\
\label{eq:mart2}
\=\sum_j \E[ju_i(0,j)] \sum_k \P(\tilde Y_{i-1}=k|{\cal F}_{i-1})] = \mu
\eeqn
by the independence of $u_i(k,\cdot)$ from ${\cal F}_{i-1}$ for all $i$ and
$k$. The result follows from~(\ref{eq:mart1}) and~(\ref{eq:mart2}). \square
Let $D_n$ be a random walk with the following transition
probabilities:
\beqn
\label{eq:drw}
\P(D_{n+1} = k \vert D_n =\ell) = \sum_j \E(u_1(0,j) u_1(\ell,j+k))
:=\gamma(\ell,k).
\eeqn
We prove in Lemma 2.4 below that $D_n$ is symmetric, i.e.\/
$\gamma(\ell,\ell+k) =\gamma(\ell,\ell-k)$.
\proclaim Proposition 2.3. Let $\la$ be a vector in $\R^d$,
$\sigma^2 = \var[\wnk\la^\ast]$ and
$\pn=\sum_{i=0}^{n-1} \P(\wi=0)$. Then
\beq
\label{eq:prop3}
\var(\tczn) = \sigma^2 \pn,
\eeq
where $\tczn=\tezn\la^\ast\in \R$.
The finiteness of $\sigma^2$ is implied by the second moment condition assumed
in
the beginning of the section.
\proof Since $\tczn$ is a martingale with increments
$\tcun=\tun\la^\ast$,
$$
\var(\tczn) = \sum_{i=0}^{n-1} \var(\tilde{\cal W}_i)
$$
Now, by the identity $\E\theta_n(k)\la^\ast \equiv \mu\la^\ast$ and the
independence
of $\theta_n(k)$ and
${\cal F}_{n-1}$,
$$
\tilde{\cal W}_i-\E\tilde{\cal W}_i
=\sum_k[\wnk-\E\wnk] \la^\ast\P(\tilde Y_{n-1}=k|{\cal F}_{n-1}).
$$
Hence we have
\beqnn
\nonumber
\var(\tcun)\=\E(\tcun-\E\tcun)^2\\
\nonumber
\=\E\sum_{k,l}([\wnk-\E\wnk]\la^\ast)([\wnl-\E\wnl]\la^\ast)\\
&&\quad\times\P(\tilde Y_{n-1}=k|{\cal F}_{n-1})
\P(\tilde Y_{n-1}=l|{\cal F}_{n-1})\\
\nonumber
\=\si^2 \E\sum_k\P^2(\tilde Y_{n-1}=k|{\cal F}_{n-1}),
\eeqnn
since $\E\{([\wnk-\E\wnk]\la^\ast)([\wnl-\E\wnl)]\la^\ast)\}=0$
for all $k\ne l$, by the independence among the $u$'s.
To get the result by iteration it remains only to verify that
$$
\E\sum_k\P^2(\tilde Y_{n}=k|{\cal F}_{n})=\P(\wn=0)
$$
for all $n\geq1$.
For that, let $\twn=\tyn-\hyn$, with $\hyn$ an independent copy of $\tyn$
(given ${\cal F}_{n}$) starting at the origin. Then
\beqn
\P(\twn=0)\=\E\P(\twn=0|{\cal F}_{n})\nonumber\\
\=\E\sum_{k,l}\P(\twn=0|\tynu=k,\hynu=l,{\cal F}_{n})\P(\tynu=k,\hynu=l|{\cal
F}_{n})\nonumber\\
\=\E\sum_k\P(\twn=0|\tynu=k,\hynu=k,{\cal F}_{n})\P(\tynu=k,\hynu=k|{\cal
F}_{n})\nonumber\\
\+\E\sum_k\sum_{\ell\ne 0}\P(\twn=0|\tynu=k,\hynu=k+\ell,{\cal
F}_{n})\\
&&\quad\quad\times\P(\tynu=k,\hynu=k+\ell|{\cal F}_{n})\nonumber\\
\=\E\sum_k\sum_j (u_n(k,j))^2 \P(\tynu=k,\hynu=k|{\cal
F}_{n-1})\nonumber\\
\+\E\sum_k \sum_{\ell\ne 0} \sum_j u_n(k,j) u_n(k+\ell,j)
\P(\tynu=k,\hynu=k+\ell|{\cal F}_{n-1})\nonumber\\
\=\sum_k\sum_j \E(u_n(k,j))^2 \P(\tynu=k,\hynu=k )\nonumber\\
\+\sum_{\ell\neq 0} \sum_k \sum_j \E[u_n(k,j) u_n(k+\ell,j)]
\P(\tynu=k,\hynu=k+\ell)\nonumber\\
\=\sum_j \E(u_n(0,j))^2 \sum_k \P(\tynu=k,\hynu=k )\nonumber\\
\+\sum_{\ell\neq 0} \sum_j \E[u_n(0,j) u_n(\ell,j)] \sum_k
\P(\tynu=k,\hynu=k+\ell)\nonumber\\
\=\ga(0,0)\sum_k \P(\tynu=k,\hynu=k)\nonumber\\
\+\sum_{\ell\neq 0} \gamma(\ell,0)\sum_k \E(\tynu=k,\hynu=k+\ell)\nonumber\\
\label{eq:1}
\=\sum_\ell \gamma(\ell,0) \P(\tilde D_{n-1} = \ell)
\eeqn
where
\beqn
\nonumber
\gamma (0,0) = \sum_j \E(u_1(0,j))^2, \ \ \
\gamma (\ell,0) = \sum_j \E(u_1(0,j) u_1(\ell,j))
\eeqn
A similar reasoning yields
\beqn
\label{eq:2}
\P(\twn=k)\=\sum_{\ell}\ga(\ell,k)\P(\twnu=\ell)
\eeqn
where $\ga(\ell,k)$ is given by (\ref{eq:drw}).
Since $\wn$ also satisfies the same
relations~(\ref{eq:1}),~(\ref{eq:2})
and $\wn$ and $\twn$ start at the same place (the origin), we
conclude that
$$
\P(\twn=k)=P(\wn=k),
$$
for all $k$, in particular $k=0$, and this
establishes~(\ref{eq:prop3}). \square
\proclaim Lemma 2.4. The transitions $\gamma(x,y)$ given by (\ref{eq:drw})
correspond to those of a symmetric random walk.
\noindent{\bf Proof.}
Let $k\ne 0$. By translation invariance
$$
\ga(0,k) = \sum_j \E(u_1(0,j-k)u_1(0,j)) = \ga(0,-k)
$$
and for $\ell\ne 0$, by translation invariance and independence of
$u_1(0,\cdot)$ and $u_1(\ell,\cdot)$,
\beqn
\nonumber
\ga(\ell,\ell+k) \= \sum_j\E(u_1(0,j)u_1(\ell,j+\ell+k)) \\
\nonumber
\= \sum_j\E u_1(0,j) \E u_1(\ell,j+\ell+k) \\
\nonumber
\= \sum_j\E u_1(0,j) \E u_1(0,j+k) \\
\nonumber
\= \sum_j\E u_1(0,j-k) \E u_1(0,j) \\
\nonumber
\= \ga(\ell,\ell-k).
\eeqn
This finishes the proof. \ \square
%%%%%%%%%%%%%%%%%% section 3 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Random walk estimates}
\setcounter{equation}{0}
\label{sec:rw}
We show in this section various asymptotic results involving
$\P(\twn=0|\twz=0)$ and related quantities. We use these results to prove the
discrete-time version of the
asymptotics (\ref{eq:vet}) in Lemma 3.3 below.
Recall that $\twn$ was defined in~(\ref{eq:drw}).
The random walk $\twn$ is not
spatially homogeneous (but almost), the (time homogeneous)
transition probabilities are distinct (only) at the origin.
This poses a small technical problem for deriving the results of this section,
since the supporting random walk results we need will be
quoted from Spitzer (1964) which treats only the homogeneous
case. The second moment condition assumed in Section 2
implies a second moment condition for the random walk $D_n$, which is then
seen to be recurrent (except for trivial cases). It is also aperiodic
due to the positivity assumptions made on $u(\cdot,\cdot)$.
At this point we also, for simplicity, assume isotropy, that is,
the distribution of $u(\cdot,\cdot)$ is assumed invariant under permutation
of coordinates. This of course leads to isotropy of $D_n$.
We start by proving monotonicity
of $\P(\twn=0|\twz=0)$ with respect to $n$.
\proclaim Lemma 3.1. $\P(\twn=0|\twz=0)$ is non-increasing in $n$.
\noindent{\bf Proof.}
Equation~(\ref{eq:im2}) below says that
$\P(\twnp=0|\twu=0)-\P(\twnp=0|\twu=k)$ is the increment on the variance
of a martingale. It thus has to be non negative. Then
\beqnn
\nonumber
\P(\twnp=0|\twz=0)\=\sum_k\P(\twnp=0|\twu=k)\P(\twu=k|\twz=0)\\
\nonumber
\le\sum_k\P(\twnp=0|\twu=0)\P(\twu=k|\twz=0)\\
\nonumber
\=\P(\twn=0|\twz=0).\,\square
\eeqnn
Our next step is to calculate the
power series of $\P(\twn=0)$ in terms of that of the related quantity
$\P(T=n)$, where $T$ is the return time of the walk to the origin after
leaving it. We will establish a comparison between the former
power series and the corresponding one for the homogeneous
walk, from which the asymptotics of interest of the two walks
are shown to be the same.
Let
$$
f(s)= \sum_{n\geq0}\P(\twn=0)s^n;\ \ \ \ \ \ g(s)= \sum_{n\geq0}\P(H_n=0)s^n
$$
be the power series of $\P(\twn=0)$ and $\P(H_n=0)$, respectively, where $H_n$ is
the homogeneous random walk with transition probability function
\beqn
\nonumber
\gamma_H (\ell,k) = \sum_j \E(u_1(0,j))\E( u_1(\ell,j+k)).
\eeqn
Notice that the transition functions of $\twn$ and $H_n$ differ only at the
origin.
\proclaim Lemma 3.2. f(t) is of the order of $g(t)$.
\noindent {\bf Proof.}
Let $\{g_i,\,i=1,2,\ldots\}$ be the successive waiting times of the
walk at the origin and $\{T_i,\,i=1,2,\ldots\}$ the successive
return times to the origin after leaving it and let $G_i$ and
$\tau_i$ denote their partial sums, respectively.
We then have
\beq
\nonumber
\P(\twn=0)=\sum_{i\geq0}\P(n\in[G_i+\tau_i,G_{i+1}+\tau_i)),
\eeq
where $G_0=\tau_0=0$.
The last probability can be written as
\beqnn
\nonumber
\sum_{r\geq0}\P(G_i+\tau_i=r)\P(n\in[r,r+g_{i+1}))
\nonumber
\=\sum_{r=0}^n\P(G_i+\tau_i=r)\P(g_{i+1}>n-r)\\
\nonumber
\=\sum_{r=0}^n\ga^{n-r}\P(G_i+\tau_i=r),
\eeqnn
where $\ga=\ga(0,0)$.
Thus
\beq
\label{eq:pdn}
\P(\twn=0)=\sum_{r=0}^n\ga^{n-r}\sum_{i\geq0}\P(G_i+\tau_i=r)
\eeq
and forming the power series, we get
\beqn
\nonumber
f(s)&:=&\sum_{n\geq0}\P(\twn=0)s^n\\
\nonumber
\=\left(\sum_{n\geq0}\ga^{n}s^n\right)
\left(\sum_{n\geq0}\sum_{i\geq0}\P(G_i+\tau_i=n)s^n\right)\\
\nonumber
\=(1-\ga s)^{-1}\sum_{i\geq0}\E(s^{G_i+\tau_i})\\
\nonumber
\=(1-\ga s)^{-1}\sum_{i\geq0}[\E(s^{g_1})\E(s^{T})]^i\\
\nonumber
\=\frac{1}{1-\ga s}\frac{1}{1-\frac{(1-\ga)s}{1-\ga s}\psi_T(s)}\\
\label{eq:ps}
\=\frac{1}{1-s+(1-\ga)s[1-\psi_T(s)]},
\eeqn
where the second identity follows from the fact that the right hand side
in~(\ref{eq:pdn}) represents the general term in a power series gotten
out of the multiplication of the two power series in the right hand side
of that identity. $\psi_T(s)$ denotes $\E(s^{T})$.
Now $$\E(s^{T})=\sum_{x\ne 0}\E(s^{T^x})p_x,$$
where $T^x$ is the hitting time of the origin of the walk starting at $x$
and $p_x,\,x\ne0,$ is the distribution of the jump from the origin.
Notice that $T^x$ is stochastically smaller than the sum of
$||x||_1$ independent copies of $T^{\tt e}$,
where {\tt e} is any of the
origin's nearest neighbors\footnote{Here is the only place
in the argument where the assumed isotropy is used. See Remark 3.1.}
and that $p_{\tt e}>0$.
Thus,
\beqn
\label{eq:comp1}
[1-\E(s^{T^{\tt e}})]p_{\tt e}\le1-\psi_T(s)=\sum_{x\ne 0}[1-\E(s^{T^x})]p_x\\
\label{eq:comp2}
\le\sum_{x\ne 0}\{1-[\E(s^{T^{\tt e}})]^{||x||_1}\}p_x
\leq[1-\E(s^{T^{\tt e}})]\nu,
\eeqn
where $\nu=\sum||x||_1p_x<\infty$. We conclude that
\beq
\nonumber
\frac{1}{1-s+\nu(1-\ga)s[1-\psi_{T_{\tt e}(s)}]}
\leq f(s)\leq\frac{1}{1-s+p_{\tt e}(1-\ga)s[1-\psi_{T_{\tt e}(s)}]}.
\eeq
We have similar bounds for $g(s)$ (with different $\gamma<1$, $\nu<\infty$
and $p_{\tt e}>0$).
The result follows immediately.. \square
\vspace{5mm}
\proclaim Remark 3.1. It is not difficult to check
that, denoting $(1-\ga)\sum_xa(x)p_x$, where $a(x)$ is refered to
in the proof of Proposition 5.1 below, by $C\equiv C(\ga,(p_x))$,
\beq
f(s)/g(s)\to C'/C
\eeq
as $s\to1$, where $C'=C(\ga',(p'_x))$ and the primes denote parameters
of the homogeneous random walk $H_n$. This does not require isotropy
and thus eliminates the need for its assumption.
Next, we will use the behavior of $f(s)$ as $s\to1$ to read the
behavior of $\sum_{i=0}^{n-1}\P(\twi=0)$ as $n\to\infty$.
\proclaim Lemma 3.3. Assume
$\sigma^2>0$. Then in $d=1$ and $2$,
$\sum_{i=0}^{n-1}\P(\twi=0)$ is of the order of
$\sqrt n$ and
$\log n$, respectively.
In $d\geq3$, it is bounded.
\noindent{\bf Proof.}
By the previous lemma, $f(s)$ behaves the same as $g(s)$.
Spitzer (1964), {\bf P7.9} (on p.75) says that $\P(H_n=0)$
is $O(n^{d/2})$. This implies
(we leave this as an exercise to the reader)
that $g(s)$ is $O[(1-s)^{-1/2}]$ as $s\to1$ for $d=1$,
$O[\log(1-s)^{-1}]$ for $d=2$ and constant for $d\geq3$.
Then so does $f(s)$.
This in turn gives information on $\sum_{i=1}^n\P(\twi=0)$,
the expected number of visits to the origin, from the fact that
\beq
\label{eq:gf}
\frac{e}{e+1}f(1-1/n)\leq
\sum_{i=0}^{n-1}\P(\twi=0)\leq 2e f(1-1/n),\eeq
for all large enough $n$.
To get the lower bound above, we write
\beq
\nonumber
f(1-1/n)=\sum_{i=0}^\infty \P(\twi=0)(1-1/n)^i\leq\sum_{i=0}^{n-1}\P(\twi=0)+
\sum_{i=n}^\infty \P(\twi=0)(1-1/n)^i.
\eeq
Then the monotonicity of Lemma 3.1 allows us to bound the last term from above by
\beq
\nonumber
(1-1/n)^n n\P(\twn=0)
\leq e^{-1}\sum_{i=0}^{n-1}\P(\twi=0),
\eeq
where the inequality is justified again by the monotonicity
of $\P(\twi=0)$. The bound follows.
For the upper bound, we write
\beq
\nonumber
f(1-1/n)\geq(1-1/n)^n\sum_{i=0}^{n-1}\P(\twi=0)
\eeq
The factor in front of the above sum is bounded below by $(2e)^{-1}$
for all large enough $n$ and the bound follows.
In $d=1$ and $2$,
we conclude that $\sum_{i=0}^{n-1}\P(\twi=0)$ is of the order of
$\sqrt n$ and
$\log n$, respectively.
In $d\geq3$, it is bounded. \square
\proclaim Corollary 3.4. Let $\la$ be a non null vector in $\z^d$ and assume
$\sigma^2>0$.
Then
\beq
\nonumber
\var(\tczn) \hbox { is of the order of } \cases{ \sqrt {n} &{if
$d=1$}\cr \log n &{if $d=2$}\cr
\hbox{constant} & if $d\geq 3$,}
\eeq
where $\tczn=\tezn\la^\ast$ was defined in Proposition 2.3.
\noindent{\bf Proof.} Follows from Lemma 3.3 and Proposition 3.2. \square
\noindent{\bf Remark 3.2.} Corollary 3.4 is the discrete-time version of
(\ref{eq:vet}). Notice however that there is a difference. While in the
discrete time one multiplies by $\sigma^2$, in the continuous time the
constant is $\sigma^2+(\mu\lambda^*)^2$. In particular this means that for the
smoothing process, for which $\sigma^2=0$, if $\mu\neq 0$, we get non trivial
fluctuations for the continuos time case and no fluctuations for the discrete
time. In the latter case the motion reduces to a hyperplane that moves
deterministically at speed $\mu$. These fluctuations come from the
fluctuations of the Poisson processes.
\noindent{\bf Remark 3.3.} Exact expressions for the constants of proportionality
in 1 and 2 dimensions follow from those mentioned in Remark 3.1 and from
strengthenings of~(\ref{eq:gf}). For the latter, in 2 dimensions,
we can use the lower
and upper bounds $f(1-\log n/n)$ and $f(1-1/n\log n)+const$.,
respectively, instead on~(\ref{eq:gf}). In 1 dimension,
we can use {\bf P20.2} in Spitzer(1964), on p.225. We leave the details
for the interested reader.
%%%%%%%%%%%%%%%%%% section 4 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Central limit theorem}
\setcounter{equation}{0}
\label{sec:clt}
In this section we prove a central limit theorem for the forward walk $\tilde
Y_n$ when conditioned to the values of ${\cal F}_n$, the sigma algebra
generated by $u_k(\cdot,\cdot)$ for $k\leq n$. Of course it will then hold
also for the the coordinate heights of the RAP starting with a planar
configuration.
For simplicity, we study only the one dimension nearest neighbor case, where
$u_1(i,j)=0$ for $|i-j|>1$. Other cases can be similarly treated. We also
assume that $\sigma^2$ as defined in Proposition 2.3 is strictly positive
(which amounts to asking that $u_n(\cdot,\cdot)$ be non deterministic).
\proclaim Theorem 4.1. As $n\to\infty$,
$\pn^{-1/2}(\tilde Z_n-n\mu)$
converges to a centered normal random variable with variance $\sigma^2$.
Notice that in the case we are considering $\pn$ is of the order of
$\sqrt n$ as argued at the end of the previous section. By definition, $\tilde
Z_n$ is the height at zero at time $n$ when the starting configuration of
heights is the hyperplane $X_0(i) = i$ for all $i$. Hence Theorem 4.1 shows
also a central limit theorem for the height at the origin when the initial
configuration is a hyperplane.
\noindent{\bf Proof.}
To establish this CLT, it is enough to check the two conditions of
the corollary to Theorem 3.2 in Hall and Heyde (1980)
(identifying $X_{ni}$ there with $\pn^{-1/2}\tui$ and
${\cal F}_{ni}$ with ${\cal F}_{i}$).
The first condition is trivially satisfied while the second one can be written as
\beq
\label{eq:clt1}
V_n^2=\pn^{-1}\sum_{i=1}^n\left\{\E[\tui^2|{\cal
F}_{i-1}]-\E[\tui^2]\right\}\to 0
\eeq
in probability as $n\to\infty$. Calculating the expectations above
by further conditioning on $Y_{i-1}$ (as in many of the calculations so far),
we can write~(\ref{eq:clt1}) as
\beq
\label{eq:clt2}
\si^2\pn^{-1}\sum_{i=0}^{n-1}\left[\P(\twi=0|{\cal
F}_{i})-\P(\twi=0)\right]\to0.
\eeq
It suffices to prove that the variance of the left hand side of~(\ref{eq:clt2})
goes to $0$ as $n\to\infty$.
For that, write the variance of the sum in~(\ref{eq:clt2}) as
\beq
\label{eq:clt3}
\sum_{j=1}^{n-1}\E\left\{\sum_{i=0}^{n-1}
\left[\P(\twi=0|{\cal F}_{i\wedge j})-\P(\twi=0|{\cal F}_{i\wedge(j-1)})\right]
\right\}^2.
\eeq
Some terms in the sum inside the expectation sign cancel to yield
\beq
\label{eq:clt4}
\sum_{i=j}^{n-1}
\left[\P(\twi=0|{\cal F}_{j})-\P(\twi=0|{\cal F}_{j-1})\right].
\eeq
We look now into the summands above. We first condition in $\twj$ to
get
\beq
\label{eq:clt5}
\sum_k\P(\twi=0|\twj=k)\left[\P(\twj=k|{\cal F}_{j})-\P(\twj=k|{\cal F}_{j-1})\right].
\eeq
We further condition in $\tyju$ and $\hyju$ to get
\beqn
\nonumber
\sum_{k,l,l'}\!\!\!\!\!\!&&\!\!\!\!\!\!\P(\twi=0|\twj=k)\\
\nonumber
&\times&\!\!\!\left[
\P(\twj=k|\tyju=l'+l,\hyju=l',{\cal F}_{j})-\P(\twj=k|\tyju=l'+l,\hyju=l')\right]\\
\label{eq:clt6}
&\times&\!\!\!\P(\tyju=l'+l,\hyju=l'|{\cal F}_{j-1}).
\eeqn
Notice that the possible values for $k$ are $l-1$, $l$ and $l+1$ and that
\beqnn
\nonumber
\P(\twj=l-1|\tyju=l'+l,\hyju=l',{\cal F}_{j})\= (1-u_{j,l'+l})u_{j,l'},\\
\nonumber
\P(\twj=l|\tyju=l'+l,\hyju=l',{\cal F}_{j})\=
(1-u_{j,l'+l})(1-u_{j,l'})+u_{j,l'+l}u_{j,l'}\quad\mbox{and}\\
\nonumber
\P(\twj=l+1|\tyju=l'+l,\hyju=l',{\cal F}_{j})\= (1-u_{j,l'})u_{j,l'+l}.
\eeqnn
Substituting in~(\ref{eq:clt6}), we get, after some more manipulation,
\beqn
\nonumber
\sum_{l,l'}\!\!\!\!\!\!&&\!\!\!\!\!\!
\left[\P(\twi=0|\twj=l+1)-\P(\twi=0|\twj=l)\right]\\
\nonumber
\!\!\!\!\!\!&\times&\!\!\!\!\{u_{j,l'+l}(1-u_{j,l'})-\E[u_{j,l'+l}(1-u_{j,l'})]\}
\P(\tyju=l'+l|{\cal F}_{j-1})\P(\tyju=l'|{\cal F}_{j-1})\\ \nonumber
\\
\nonumber
+\sum_{l,l'}\!\!\!\!\!\!&&\!\!\!\!\!\!
\left[\P(\twi=0|\twj=l-1)-\P(\twi=0|\twj=l)\right]\\
\label{eq:clt7}
\!\!\!\!\!\!&\times&\!\!\!\!\{u_{j,l'}(1-u_{j,l'+l})-\E[u_{j,l'+l}(1-u_{j,l'})]\}\}
\P(\tyju=l'+l|{\cal F}_{j-1})\P(\tyju=l'|{\cal F}_{j-1}).
\eeqn
We will analyze explicitly only the first sum above by taking the sum over $i$,
squaring
and taking expectations. The second one has the same behavior.
To alleviate notation, let us define
\beqn
\label{eq:cltdef1}
\ajl&:=&\sum_{i=j}^{n-1}\left[\P(\twi=0|\twj=l+1)-\P(\twi=0|\twj=l)\right]\\
\label{eq:cltdef2}
\overline{u_{j,l'+l}(1-u_{j,l'})}&:=&
u_{j,l'+l}(1-u_{j,l'})-\E[u_{j,l'+l}(1-u_{j,l'})]\\
\label{eq:cltdef3}
\pj(l)&:=&\P(\tyju=l|{\cal F}_{j-1}).
\eeqn
We want then to estimate
\beq
\label{eq:clt8}
\E\left\{\sum_{l,l'}\ajl\overline{u_{j,l'+l}(1-u_{j,l'})}\pj(l'+l)\pj(l')\right\}^2.
\eeq
We will show below that~(\ref{eq:clt8}) is (at most) of the order of $\P(\twj=0)$.
Substituting in~(\ref{eq:clt3}) and performing the sum in $j$, we get
a term of the order of $\pn$. To find (an upper bound to) the variance
of~(\ref{eq:clt2}),
we multiply by constant times $\pn^{-2}$ (already taking into account the
second sum in~(\ref{eq:clt7})). Since $\pn\to\infty$ as
$n\to\infty$, the last variance then goes to 0 as $n\to\infty$ as
desired.
We rewrite~(\ref{eq:clt8}) as
\beq
\label{eq:clt9}
\E\left\{\sum_{l,l',k,k'}\ajl\ajk
\E[\overline{u_{j,l'+l}(1-u_{j,l'})}\,\overline{u_{j,k'+k}(1-u_{j,k'})}]
\pj(l'+l)\pj(l')\pj(k'+k)\pj(k')\right\},
\eeq
where we use the independence between the $u$'s and $p$'s (the $A$'s are
deterministic).
Now we bound~(\ref{eq:clt9}) by constant times
\beq
\label{eq:clt10}
\E\left\{\sum_{l,l',k,k'}
\left|\E[\overline{u_{j,l}(1-u_{j,l'})}\,\overline{u_{j,k}(1-u_{j,k'})}]\right|
\pj(l)\pj(l')\pj(k)\pj(k')\right\},
\eeq
since $|\ajl|$ is uniformly bounded in $l$, $j$ and $n$ (we will
prove this result below).
The expectation inside the sum above vanishes if
$\{l,l'\}\cap\{k,k'\}=\emptyset$. The sum over pairs
with full intersection is bounded above by
\beq
\label{eq:clt11}
2\E\left\{\sum_{l,l'}
|\E[\overline{u_{j,l}(1-u_{j,l'})}]^2|
\pj^2(l)\pj^2(l')\right\}.
\eeq
The expectation inside the sum is constant for $l\ne l'$ and separately
for $l=l'$. Thus it is
bounded above uniformly by a constant, so this part of the sum
is bounded above by constant times
\beq
\label{eq:clt12}
\E\left\{\sum_{l,l'}\pj^2(l)\pj^2(l')\right\}=
\E\left\{\sum_{l}\pj^2(l)\right\}^2
\leq \E\left\{\sum_{l}\pj^2(l)\right\}
=\P(\twj=0).
\eeq
For pairs with intersection at only one point, we argue similarly
to bound the sum by constant times
\beq
\label{eq:clt13}
\E\left\{\sum_{l,l',k}\pj^2(l)\pj(l')\pj(k)\right\}=
\E\left\{\sum_{l}\pj^2(l)\right\}
=\P(\twj=0).
\eeq
All cases have been considered and we thus have the result. \square
\vskip 5mm
\proclaim Lemma 4.2.
$|\ajl|$ is uniformly bounded in $l$, $j$ and $n$.
\noindent{\bf Proof.}
By the time translation invariance of the model, it suffices to
prove the uniform boundedness of
\beq
\label{eq:ubd1}
\sum_{i=0}^{n}\left[\P(\twi=0|\tw=l)-\P(\twi=0|\tw=l+1)\right]
\eeq
for $l\geq0$.
Consider the $\sum_{i=0}^{n}\P(\twi=0|\tw=l+1)$ and condition on
the time $T$ that the walk starting in $l+1$ first hits $l$ to get
\beq
\label{eq:ubd2}
\sum_{i=1}^{n}\sum_{j=1}^{i}\P(\twi=0|\twj=l)\P(T=j)
=\sum_{i=1}^{n}\sum_{j=1}^{i}\P(\twij=0|\tw=l)\P(T=j),
\eeq
by translation invariance again.
Reversing the order of the sum in~(\ref{eq:ubd2})
\beq
\label{eq:ubd3}
\sum_{j=1}^{n}\sum_{i=j}^{n}\P(\twij=0|\tw=l)\P(T=j)
\eeq
and the variable $i$ to $k=i-j$
\beq
\label{eq:ubd4}
\sum_{j=1}^{n}\sum_{k=0}^{n-j}\P(\twk=0|\tw=l)\P(T=j)
=\E\sum_{i=0}^{(n-T)^+}\P(\twi=0|\tw=l).
\eeq
Thus~(\ref{eq:ubd1}) becomes
\beq
\label{eq:ubd5}
\E\sum_{i=(n-T)^+}^{n}\P(\twi=0|\tw=l).
\eeq
The probabilities inside the sum are maxima when $l=0$ (by the martingale
argument already used in the proof of Lemma 3.1).
Rewrite~(\ref{eq:ubd5}) (for $l=0$) as
\beq
\label{eq:ubd6}
\sum_{j=0}^{n}\sum_{i=(n-j)}^{n}\P(\twi=0|\tw=0)\P(T=j)+
\P(T>n)\sum_{i=0}^{n}\P(\twi=0|\tw=0).
\eeq
\vskip 5mm
Changing variables $i$ to $k=n-i$ in the first sum, it becomes
\beq
\label{eq:ubd7}
\sum_{j=0}^{n}\sum_{k=0}^{j}\P(\twnk=0|\tw=0)\P(T=j)
\eeq
and changing the order of summation,
\beq
\label{eq:ubd8}
\sum_{k=0}^{n}\sum_{j=k}^{n}\P(\twnk=0|\tw=0)\P(T=j)
=\sum_{k=0}^{n}\P(\twnk=0|\tw=0)\P(n\geq T\geq k).
\eeq
Summing the second term in~(\ref{eq:ubd6}), we finally get
\beq
\label{eq:ubd9}
\sum_{k=0}^{n}\P(\twnk=0|\tw=0)\P(T\geq k).
\eeq
Now since $\twi$ is a one dimensional simple symmetric
random walk (it is homogeneous off the origin and $T$ does not depend
on the transitions from the origin), we have that
$\P(T\geq n)$ is of the order
of $n^{-1/2}$ (see Spitzer (1964) {\bf P32.1} on p.378 and {\bf P32.3} on p.381).
By the arguments at the end of
the previous section, so is $\P(\twn=0|\tw=0)$.
This implies that~(\ref{eq:ubd8}) is bounded
in $n$.
The argument is finished.
\square
%%%%%%%%%%%%%%%%%% section 5 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Convergence to an invariant distribution}
\setcounter{equation}{0}
\label{sec:inv}
In this section we prove that the process as seen from the height at the
origin starting with a deterministic
hyperplane converges weakly.
Let
\beq
\nonumber
\hzn:=Z_n-Z_n(0),
\eeq
with $\hzn(x):=Z_n(x)-Z_n(0)$ for all $x$, where $Z_n(x) = \E(Y^{x,n}_n\vert
{\cal F}_n)$. Proposition 5.1 below contains
also the discrete-time version of the asymptotics announced in
(\ref{eq:vem}). In Proposition 5.2 we show that the weak limit is invariant
for the process.
\proclaim Proposition 5.1. $\hzn$ converges weakly. Let $\hzf$ denote
the weak limit.
If $\sigma^2>0$, then
\beq
\label{eq:vemd}
\var[\hzf(i) - \hzf(j)]
\hbox { is of the order of }
\cases{
{\vert i-j\vert} &{if $d=1$},\cr \log \vert i-j\vert &{if $d=2$},\cr
\hbox{constant} & if $d\geq 3$.\cr}
\eeq
If $\sigma^2=0$, then $\hzn(i) - \hzn(j) = \hat Z_0(i) - \hat Z_0(j)$ for all
$n\geq 0$.
\noindent{\bf Proof.}
$\hzn$ has the same distribution as $\bzn:=\tezn-\tezn(0)$. We start
proving the
proposition by showing that for each $x\in \Z^d$, $\E(\bzn(x)-x)^2$ is
uniformly bounded in $n$. Since
it is a martingale, it then converges almost surely and in $L^2$ (see Theorem
2.5 and Corollary 2.2 in Hall and Heyde (1980) for instance). Given $x$,
\beq
\nonumber
\var(\bzn(x)-x)=2\var(\tezn(0))^2-2\C(\tezn(0),\tezn(x)-x),
\eeq
where $\C$ denotes the covariance sign, since $\tezn(x)-x$ and $\tezn(0)$ are
equally distributed by the translation invariance of the model. We already
have an expression for the first term on the right hand side from
Proposition~2.3. We need to derive one for the last one, which we do now. We
already have from the proof of Proposition~2.3 that
\beq
\nonumber
\tezn=\sum_{i=1}^{n}\tui,
\eeq
with $\tui=\E(\theta_{i,\tilde Y_{i-1}}|{\cal F}_{i}).$
Similarly,
\beq
\nonumber
\tezn(x)-x=\sum_{i=1}^{n}\txui,
\eeq
with $\txui=\E(\theta_{i,\tilde Y_{i-1}^x}|{\cal F}_{i})$, where $\tynx$
is a random walk in $\fn$ like $\tyn$, but starting from $x$. Thus,
\beqnn
\nonumber
\C(\tezn(0),\tezn(x)-x)\=\E\left(\sum_{i,j=1}^n
[\E(\theta_{i,\tilde Y_{i-1}}|{\cal F}_{i})-\E(\theta_{i,\tilde Y_{i-1}})]
[\E(\theta_{j,\tilde Y_{j-1}^x}|{\cal F}_{j})-
\E(\theta_{j,\tilde Y_{j-1}^x})]\right)\\
%
\=\E\sum_{i,j}\sum_{k,l}(\wik-\E\wik)(\wjl-\E\wjl)
\P(\tcyiu=k|\fiu)\P(\tyjux=l|\fju)\\
%
\=\si^2\sum_i\E\sum_k \P(\tcyiu=k|\fiu)\P(\tyiux=k|\fiu),
\eeqnn
the last equality due to the independence of at least one of the $\theta$'s
(the one with higher subscript) to the conditional probabilities generally and
to the other $\theta$ when $i\ne j$ and the null mean of the $\theta$'s.
$\si^2$ is the second moment of the $\theta$'s.
Now, reasoning as in the proof of Proposition~2.3,
\beq
\nonumber
\E\sum_k \P(\tcyiu=k|\fiu)\P(\tyiux=k|\fiu)=\P(\wius=0),
\eeq
where $\wis$ is a random walk like $\wi$ but starting in $x$.
From the above discussion, we conclude that
\beq
\label{eq:im2}
\E(\bznu(x)-x)^2=2\si^2\sum_{i=0}^{n-1}\left(\P(\wi=0)-\P(\wis=0)\right).
\eeq
After a similar reasoning as used in the proof of Lemma 4.2 above,
one sees that the last expression equals
\beq
\label{eq:im1}
\sum_{k=0}^{n}\P(\twnk=0|\tw=0)\P(T_x\geq k),
\eeq
where $T_x$ is the hitting time of the origin by the walk $\twi$
starting at $x$.
Since the above expression is the variance of a martingale, it
is monotone in $n$. To show it is bounded, it suffices to consider
its power series (in the variable $0\leq s<1$) and show it is bounded as $s\to1$
when multiplied by $1-s$. The limiting value of this procedure gives the
limit in $n$ of~(\ref{eq:im1}).
Now
\beqnn
\nonumber
F(s)&:=&\sum_{n=0}^{\infty}\sum_{k=0}^{n}\P(\twnk=0|\tw=0)\P(T_x\geq k)s^n\\
\nonumber
\=f(s)g(s),
\eeqnn
where $f(s)$ is the power series of $\P(\twn=0|\tw=0)$ and $g(s)$ that
of $\P(T_x\geq n)$.
We have $g(s)=(1-s)^{-1}(1-s\psi_{T_x}(s))$ (using the notation of
Section 3) and from~(\ref{eq:ps}), we get
$$(1-s)F(s)=\frac{1-s\psi_{T_x}(s)}{1-s+(1-\ga)s[1-\psi_T(s)]}.$$
Now similar arguments as in the proof of Lemma 3.2
apply and we conclude that since this expression is bounded as
$s\to1$ for $\ga$ and $(p_y)$ of the homogeneous case (as follows
from Spitzer(1964) {\bf T32.1} on p.378),
then so it is for the non-homogeneous case of our model.
In 3 or more dimensions, the expression in~(\ref{eq:im2}) is
the difference of two positive quantities, the first of which
is bounded in $n$ (of course then also the second, which is
smaller), as follows from Spitzer(1964), {\bf P7.9} on p.75.
We conclude that it is bounded in $n$ and $x$.
Now we prove the asymptotics (\ref{eq:vemd}). Assume $\sigma^2>0$.
In this case, the space scalings of the invariant measure
follow from the dependence in $x$ of the limit of~(\ref{eq:im2}).
We first remark that in 3 or more dimensions there is no space scaling,
since~(\ref{eq:im2}) is bounded in $x$ as well as in $n$ as seen above.
In 1 and 2 dimensions, the spatial scaling is given by a function
$a(x)$, as readily seen from Spitzer(1964), {\bf P32.1} on p.378
and~(\ref{eq:im1}).
In 1 dimension $a(x)$ scales as $|x|$ (Spitzer(1964)
{\bf P28.4} on p.345) under our hypotheses $\E(\sum_jju(0,j))^2<\infty$.
In 2 dimensions, in order to use {\bf P12.3} on p.124 of Spitzer(1964)
to state that $a(x)$ scales as $\log|x|$, we need to make the stronger
assumption of finiteness of any moment of $\sum_j j u(0,j)$
greater than 2.
The case $\sigma^2=0$ follows from (\ref{eq:im2}). \ \square
\noindent{\bf Remark 5.1.} The case $\sigma^2=0$ corresponds to the discrete
time
smoothing process with no randomness ($W\equiv 1$ in Definition (0.1)
of Liggett (1985)). In this case the hyperplanes are
invariant configurations.
\proclaim Proposition 5.2. Let $\hat X_n$ defined by $\hat X_n(x)=
X_n(x)-X_n(0)$ be the RAP as seen from the height at the origin. Then $\hzf$
is invariant for $\hat X_n$.
\proof Let $U_0$ denote the matrix that has all the lines identical to
the vector $\{U(0,x),\,x\}$ and let $\hat U=U-U_0$.
To prove the proposition, it suffices then to show that
$\hat U\hzf^\ast=\hzf^\ast$ in distribution,
with $U$ independent from $\hzf$. But that is clear from the facts that
$\hat U\hzn^\ast=\hznu^\ast$ in distribution and that $\hat U\hzn^\ast$
converges weakly to
$\hat U\hzf^\ast$. The latter point is argued from the $L^2$ convergence
of $U\bar Z_n^\ast$ and $U_0\bar Z_n^\ast$ as follows. Given $x$,
\beqn
\E[U\bar Z^\ast_n(x)-U\bar Z^\ast_\infty(x)]^2
\=\E[U(\bar Z^\ast_n-\bar Z^\ast_\infty)(x)]^2\\
\=\E[\sum_yu(x,y)(\bar Z_n(y)-\bar Z_\infty(y))]^2\\
\le \E\sum_yu(x,y)[\bar Z_n(y)-\bar Z_\infty(y)]^2\\
\=\sum_y\pi(y-x)\E[\bar Z_n(y)-\bar Z_\infty(y)]^2\\
\=\E[\bar Z_n(0)-\bar Z_\infty(0)]^2,
\eeqn
where the inequality follows by Jensen and the last identity is due
to space translation invariance of the distribution of
$\bar Z_n(y)-\bar Z_\infty(y)$.
The same inequality is true
with $U$ replaced by $U_0$ since it amounts to replace $x$ by $0$.
One concludes
\beq
\E[\hat U\bar Z^\ast_n(x)-\hat U\bar Z^\ast_\infty(x)]^2
\leq4\E[\bar Z_n(0)-\bar Z_\infty(0)]^2.
\eeq
and $L^2$ convergence follows.
\square
\proclaim Remark 5.2. Along the same lines of Remark 3.3,
here one can find, in d=1,2, the exact limiting constant of proportionality of the
spatial fluctuations with respect to their order of magnitude,
with the extra information
from the exact asymptotics of $a(x)$ in Spitzer(1964).
%%%%%%%%%%%%%%%%%% section 6 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{The one dimensional case}
\setcounter{equation}{0}
\label{sec:1d}
In the one dimensional case we are able to treat more
general initial conditions. Let $(X_n)_{n\geq 0}$ denote the height system and
$(x_i)_{i\in\z^d}$ be its initial configuration.
Thus we have
$$
X_0=(x_i);\ \ \ \ x_0=0.
$$
Assume $x_{i+1}-x_i = y_i$ with $y_i$ a stationary ergodic process with
$\E y_i=\li>0$ and $\var y_i=\be^2$. Notice that this distribution satisfies
the sufficient condition for the existence of the process given by
(\ref{eq:sufcon}). Denote $\yi-\li$ by $\tyi$ and let
$\txo(i)=\xo(i)-(\li)i$ and $S_n=\E(\txo(Y_n)|\fn)$. We then have
\beq
\nonumber
X_n(0)=S_n+(\li) Z_n.
\eeq
where, as before, $Z_n = \E(Y_n|\fn)$.
\proclaim Proposition 6.1. $S_n$ and $Z_n$ are uncorrelated for all $n$. If
$\mu=\sum j \E u_n(j)=0$, then $\var S_n$ is of order $\sqrt n$. If $\mu\neq
0$, then $\var S_n$ is of the order of $n$.
The proposition above allows us to obtain different behavior of the
fluctuations of the height at the origin in the biased and unbiased cases. The
corollary below shows that in the unbiased case the fluctuations are
sub diffusive (variance proportional to the square root of time) but in the
biased case they are diffusive (variance proportional to the time).
\proclaim Corollary. If $\mu= 0$, then $\var X_n(0)$ is of order
$\sqrt n$. If
$\mu\neq 0$, then $\var X_n$ is of the order of $n$.
\noindent{\bf Proof of Corollary.} Since $S_n$ and $Z_n$ are uncorrelated,
$\var X_n(0) = \var Z_n +\var S_n$. We have shown in Corollary 3.4 that $\var
\Z_n$ is of the order of $\sqrt n$ in dimension one. Then, Proposition 6.1
shows that $\var X_n$ is of the order of $\var S_n$. \square
\noindent{\bf Proof.} In this proof we abuse notation by writing $Y_n$ instead
of $Y^{0,n}_n$.
\beqnn
\nonumber
\E(S_nZ_n)\=\E\left[\E(\txo(Y_n)|\fn)\E(Y_n|\fn)\right]\\
\nonumber
\=\E\left[\sum_k\txo(k)\P(Y_n=k|\fn)\sum_kk\P(Y_n=k|\fn)\right]\\
\nonumber
\=\E\left[\sum_{k,l}\txo(k)l\P(Y_n=k|\fn)\P(Y_n=l|\fn)\right]\\
\nonumber
\=\sum_{k,l}\E(\txo(k))l\E[\P(Y_n=k|\fn)\P(Y_n=l|\fn)]=0,
\eeqnn
since $E(\txo(k))=0$ for all $k$. This proves uncorrelatedness.
\beqnn
\nonumber
S_n\=\sum_k\txo(k)\P(Y_n=k|\fn)\\
\nonumber
\=\sum_{k<0}\sum_{i=k}^{-1}\tyi \P(Y_n=k|\fn)+\sum_{k>0}\sum_{i=1}^{k}\tyiu
\P(Y_n=k|\fn)\\
\nonumber
\=\sum_{i>0}\tyiu \P(Y_n\geq i|\fn)-\sum_{i<0}\tyi \P(Y_n\leq i|\fn)\\
\nonumber
\=\sum_{i\geq0}\tyi \P(Y_n>i|\fn)-\sum_{i<0}\tyi \P(Y_n\leq i|\fn)
\eeqnn
Thus
\beqnn
\nonumber
\E(S_n^2)\=\E\left[\sum_{i\geq0}\tyi \P(Y_n>i|\fn)\right]^2\\
\nonumber
\+\E\left[\sum_{i<0}\tyi\P(Y_n\leq i|\fn)\right]^2\\
\nonumber
&&-2\E\left[\sum_{i\geq0}\tyi \P(Y_n>i|\fn)
\sum_{i<0}\tyi \P(Y_n\leq i|\fn)\right]
\eeqnn
The first expectation on the right equals
\beq
\nonumber
\sum_{i,j\geq0}\E(\tyi\tyj)\E[\P(Y_n>i|\fn)\P(Y_n>j|\fn)]
\eeq
which is then seen to be
\beq
\label{eq:unc1}
\be^2\sum_{i\geq0}\E[\P^2(Y_n>i|\fn)],
\eeq
since
$\E(\tyi\tyj)=\E(\tyi)\E(\tyj)=0$ for all $i\neq j$ and $\E\tyi^2=\be^2$ for all
$i$.
Similarly, the second expectation equals
\beq
\label{eq:unc2}
\be^2\sum_{i<0}\E[\P^2(Y_n\leq i|\fn)]
\eeq
and the last one vanishes.
We get the following upper and lower bounds for $\E(S_n^2)$, (the bounds are
based on $\P^2(\cdots)\leq \E(\P^2(\cdots \vert {\cal F}_n))\leq \P(\cdots)$)
\beq
\nonumber
\be^2\left(\sum_{i\geq0}\P(Y_n>i)]+\sum_{i<0}\P(Y_n\leq i)\right)=\be^2\E(|Y_n|)
\eeq
and
\beq
\nonumber
\be^2\left(\sum_{i\geq0}\P^2(Y_n>i)]+\sum_{i<0}\P^2(Y_n\leq i)\right),
\eeq
respectively. When $\mu=\E \sum_j ju(0,i) = 0$, the random walk $Y_n$ has
zero mean and since the variance is positive and finite these bounds are both
of order $\sqrt n$. When $\mu\neq 0$, the random walk $Y_n$ is not centered
and the bounds are of order $n$. \square
\vskip 5mm
%%%%%%%%%%%%%%%%%% section 7 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Continuous time}
\setcounter{equation}{0}
\label{sec:cont}
%\newcommand{\uni}{u_{n,i}}
\newcommand{\lnk}{\beta_{n-k,i}}
\newcommand{\lni}{\beta_{n,i}}
\newcommand{\lki}{\beta_{k,i}}
%\newcommand{\vki}{v_{k,i}}
%\newcommand{\unk}{u_{n-k,i}}
\renewcommand{\+}{&&+}
\renewcommand{\=}{&=&}
In this Section we discuss the continuous time process described in
the introduction. Each site in $\Z^d$ is provided with an exponential
clock of rate 1. Every time it rings, the corresponding height jumps
to a random convex combination of its neighboring heights. Let
$\eta_t,\,t\geq0$ denote this process. The way we choose to show the
results for this process is to approximate it by a family of discrete
time processes, looking much the same as the discrete process we
considered before. A direct approach should also work.
Let $T(i,n)$ be the successive times of the Poisson process of site
$i$, $n\geq 1$ and let $u_k(i,\cdot)$ the $k$-th (independent)
realization of the random vector $u(i,\cdot)$. In addition to the
setting of that process, consider further a family
$\{\lni,\,n\geq1,\,i\in\z\}$ of (i.i.d.~Bernoulli) random variables
(with parameter $\P (T(i,1)\leq 1/N)= 1-\exp(-1/N)$), where $N$ is a
positive integer (scaling parameter), defined by $\lni=\one\{T(i,k)\in
[(n-1)/N, n/N)$ for some $k\geq 1\}$ and let $K_n=\sum_{i=1}^n\lki$.
For a fixed initial configuration $X_0\in \R^{\Z^d}$ define the
process $(X^N_n)_{n\geq0}$ by
\beqnn
\nonumber
X^N_0 (i)\= X_0(i),\\
\nonumber X^N_n(i)\=\lni\sum_j
u_{K_n}(i,j)X^N_{n-1}(j)+(1-\lni)X^N_{n-1}(i),\quad n\geq1.
\eeqnn
Let ${\cal F}^N_n$ be the sigma algebra generated by
$\{\la_{k,i},u_k(i,j),1\leq k \leq n,i\in\z^d\}$. Again $X^N_n(0)$ is described
as the conditional expectation of a function of a random walk:
\beqn
\label{eq:ducon}
X^N_n(0)=E(X^N_0(Y^{N,0,n}_n)\ \vert\ {\cal F}^N_n),
\eeqn
where $Y^{N,0,n}_k,\,k=0,\ldots,n,$ is the backwards
random walk starting at the origin and for $0\leq k\leq n$,
\beqnn
\nonumber
\P(Y^{N,0,n}_k=j|Y^{N,0,n}_{k-1}=i)
\=\lnk u_{K_{n-k}}(i,j)+(1-\lnk) \one\{i=j\} \\
&:=& u^N_{n-k}(i,j),
\eeqnn
Notice that $\{u^N_{k}(i,\cdot):k\geq 0,i\in \Z^d\}$ is a family of iid
vectors.
The same arguments of Section 2 yield
$$
\var X^N_n(0) = \si^2_N\E M^N(n)
$$
where $M^N(n)$ is the number of returns to the origin in the time interval
$[0,t]$ of a (symmetric) random walk $D^N_t$ with transition probabilities
$$
\gamma^N(\ell,k)= \sum_j\E(u^N_1(0,j)u^N_1(\ell,j+k))
$$
and, reminding that $u_1(i,j)$ has the same distribution as $u(i,j)$,
\beqnn
\nonumber
\si^2_N\=\var\left[\hbox{$\sum_j \lambda j^\ast u^N_1(0,j)$}\right] \\
\nonumber \= {1\over N} \left(\var \left[\hbox{$\sum_j \lambda j^\ast
u(0,j)$}\right] + \left[\E\hbox{$\sum_j \lambda j^\ast
u(0,j)$}\right]^2\right) - {\E\left[\sum_j \lambda j^\ast
u(0,j)\right]^2\over N^2} \\
\nonumber &=&{1\over N}\left(\si^2 +(\lambda \mu^\ast)^2\right)+o(1/N)
\eeqnn
where
$\si^2$ is defined in Proposition 2.3 and $\mu = \E\left(\sum_j j
u(0,j)\right)$. It is a standard matter to prove that the process
$Y^{x,t}_s$, $s\in[0,t]$
defined by the almost sure coordinatewise limit
$$
Y^{x,t}_s= \lim_{N\to\infty}Y^{N,x,\lfloor Nt\rfloor}_{\lfloor Ns\rfloor}
$$
is a random walk with transition rates
$\{u_\cdot(\cdot,\cdot)\}$. This walk uses the
Poisson marks and the realizations of the $u$ backwards in time from $t$ to
$0$.
Using the duality formula (\ref{eq:ducon}),
we prove below (see Lemma 7.1) that
$\E(Y^{N,x,\lfloor Nt\rfloor}_{\lfloor Nt\rfloor}
\vert{\cal F}^N_{\lfloor Nt\rfloor})$
%\beqn
%\label{eq:66}
%\E\left(\left.Y^{N,x,\lfloor Nt\rfloor}_{\lfloor Nt\rfloor}\
%\right\vert\ {\cal F}^N_{\lfloor
%Nt\rfloor}\right)
%\eeqn
converges in $L^1$ to
$\E(Y^{x,t}_t\vert{\cal{F}}_t)$, where ${\cal{F}}_t$ is the sigma algebra
generated by the Poisson processes and the realizations of the
$\{u_\cdot(\cdot,\cdot)\}$
corresponding to the Poisson marks in the interval $[0,t]$.
It is then easy to
show that the limiting process
\beq
\eta_t= \lim_{N\to\infty}X^N_{\lfloor Nt\rfloor}
\ \ \hbox{ and }\ \ X^N_0=\eta_0
\eeq
exists, has generator (\ref{eq:L}) and
\beq
\eta_t(x) = \E(\eta_0(Y^{x,t}_t)\vert{\cal{F}}_t).
\eeq
\proclaim Lemma 7.1. If $\E\eta_0 (j) \leq const.~\vert j \vert^2$ for all $j$
then $X^N_{\lfloor Nt\rfloor}$ converges to $\eta_t$ in $L^1$
as $N\to\infty$ for all fixed $t$. If $\E\eta_0(j)^2\leq const.~\vert j\vert^2$
for all $j$ then there is also convergence in $L^2$.
\noindent{\bf Proof.} Consider the event
\beqnn
A^N_t\=\{Y^{x,t}_s\,\mbox{jumps twice in a time
interval $[(n-1)/N,n/N)$ for some $1\leq n\leq \lfloor Nt\rfloor$}\\
&&\,\,\,\,\mbox{{\em or}\,\,\, jumps once
in $[\lfloor Nt\rfloor,\lfloor Nt\rfloor+1/N)$\}}.
\eeqnn
Then
\beqn
\label{eq:asl}
\E|X^N_{\lfloor Nt\rfloor}-\eta_t|\le
\E|\eta_0(Y^{0,t}_t)-
\eta_0(Y^{N,0,\lfloor Nt\rfloor}_{\lfloor Nt\rfloor})|\\
%
\=\E[|\eta_0(Y^{0,t}_t)-
\eta_0(Y^{N,0,\lfloor Nt\rfloor}_{\lfloor Nt\rfloor})|;A^N_t].
\eeqn
Notice that ${\cal F}^N_{\lfloor Nt\rfloor}\subset{\cal F}_t$ for all $t$.
The latter expectation can be written, disregarding constant factors,
\beqn
&&\sum_{i,j}\E|\eta_0(i)-\eta_0(j)|
\P(Y^{0,t}_t=i,Y^{N,0,\lfloor Nt\rfloor}_{\lfloor Nt\rfloor}=j,A^N_t)\\
%
\label{eq:dec1}
\le \sum_{i,j}(\E|\eta_0(i)|+\E|\eta_0(j)|)
\P(Y^{0,t}_t=i,Y^{N,0,\lfloor Nt\rfloor}_{\lfloor Nt\rfloor}=j,A^N_t)\\
%
\le \sum_{i,j}(i^2+j^2)
\P(Y^{0,t}_t=i,Y^{N,0,\lfloor Nt\rfloor}_{\lfloor Nt\rfloor}=j,A^N_t)\\
\label{eq:dec}
\=\E[(Y^{0,t}_t)^2;A^N_t]+
\E[(Y^{N,0,\lfloor Nt\rfloor}_{\lfloor Nt\rfloor})^2;A^N_t]
\eeqn
and the argument for convergence in $L^1$ closes with the observation that
$(Y^{0,t}_t)^2$ and
$(Y^{N,0,\lfloor Nt\rfloor}_{\lfloor Nt\rfloor})^2$ are uniformly integrable
and $\P(A^N_t)\to0$ as $N\to\infty$ for all fixed $t$,
both of which assertions are not difficult to check.
Convergence in $L^2$ follows the same steps,
with the $|\cdots|$ expressions
in~(\ref{eq:asl}-\ref{eq:dec1}) replaced by $[\cdots]^2$.
(The corresponding of inequality~(\ref{eq:asl})
follows by Jensen.) \square
Now, by Lemmas 2.1 (duality formula) and 7.1, when $\eta_0(x)\equiv x$
\beqnn
\nonumber
\var \eta_t(0))
\= \lim_{N\to\infty} \var X^N_{\lfloor Nt\rfloor}(0)\\
\nonumber
\= \lim_{N\to\infty} \si^2_N \E M^N(\lfloor Nt\rfloor)\\
\nonumber
\= (\si^2+(\lambda \mu^\ast)^2)\E M(t),
\eeqnn
where $M(t)$ is the time spent in the origin up to $t$
by a continuous
time random walk $D_t$ with transition rates given by
\beqn
\label{eq:rw1}
q(0,k)=\sum_j\E(u(0,j)u(0,j+k))
\eeqn
and, for $\ell\ne0$,
\beqn
\label{eq:rw2}
q(\ell,\ell+k)=(\pi_k+\pi_{-k}).
\eeqn
Now the asymptotic variance for the continuous case follows from the
asymptotics of the number of visits of a continuous time symmetric random walk
with a inhomogeneity at the origin. The asymptotics of Section 2 can be
obtained also for the continuous time walk.
The convergence to an invariant measure follows
for the continuous case similarly as above,
by using the discrete time approximation and the fact that
$\E(Y_t\given {\cal F}_t)$ is a martingale.
Here $Y_t$
is the limiting process obtained from $Y^{N}_{\lfloor Nt\rfloor}$,
the walk with transitions
\beqnn
\nonumber
\P(Y^{N}_k=j|Y^{N}_{k-1}=i)\=\lki u_{K_k}(i,j)+(1-\lki) \one\{i=j\} \\
\nonumber
&=:& u^N_{k}(i,j).
\eeqnn
We have
\beqnn
\nonumber
\var[\eta_t(x)-\eta_t(0)]\=\lim_{N\to\infty}2\si_N^2[\E M^N(\lfloor
Nt\rfloor) -\E M_x^N(\lfloor Nt\rfloor)]\\
\nonumber
\=(\si^2+(\lambda \mu^\ast)^2)[\E M(t)-\E M_x(t)],
\eeqnn
where the subscripts $x$ mean that the corresponding random walk
starts at $x$.
Now the fact that the last quantity is bounded and the martingale
property imply the convergence. The fact that the limit is invariant
follows as in Proposition 5.2.
We leave the extension of the results of Sections 4 and 5 to
the reader, who may employ the approximation above.
%%%%%%%%%% Liggett %%%%%%%%%%%%%
\noindent{\bf Comparison with Liggett's results.} In display (0.2) of Chapter
IX, Liggett (1985) defines linear systems using families of random operators
$A_z(x,y)$ and linear coefficients $a(x,y)$. Our process corresponds to the
choice
$a(x,y) = 0$ for all $x$, $y$ and
$$
A_z(x,y) = \cases{u(x,y) &if $x=z$ \cr
\one\{x=y\} & if $x\neq z$ \cr}.
$$
Then conditions (1.3) and (1.4) of Liggett are satisfied. On
the other hand the hypothesis of his Lemma 1.6 reads
$$
\sup_x \E \sum_y u(x,y) = \sup_x \E \sum_y u(0,y-x) = \E\sum_y u(0,y) = 1
$$
by translation invariance. This implies that also Liggett's (1.5) is satisfied
and his construction works for our case. We have proposed two somehow simpler
constructions. One of them, sketched in the introduction exploits the
existence of a almost sure duality relation. The other is a standard passage
from discrete to continuous time but without hard estimates.
It is also interesting to notice that when computing the two point correlation
functions in the translation invariance case of his Theorem 3.1, a function
called there $q_t(x,y)$ plays a fundamental role (see page 445). While in the
general case $\sum_y q_t(x,y)$ is not expected to be one, in the RAP case
$q_t(x,y)$ are the probability transition functions at time $t$ of the random
walk with rates $q(x,y)$ given by (\ref{eq:rw1}) and (\ref{eq:rw2}).
His Theorem 3.17 can be applied to our process to yield weak convergence
to constant positive configurations, when starting with translation invariant
positive initial conditions. He does not treat initial conditions we
considered in Theorem 5.1 (because his work has a different perspective
presumably).
%%%%%%%%%% nearest neighbors %%%%%%%%%%
\noindent{\bf The nearest neighbors case. Conservative nearest
particle systems.} Consider $d=1$ and $u(i,j) = 0$ if $\vert
i-j\vert>1$. In this case if the initial heights are ordered, that is
$\eta_0(i) <\eta_0(i+1)$ for all $i$, then the same will occur for
latter times. If we project the heights on a vertical line, we obtain
a point process. We can interpret that at each event of this point
process there is a particle. If $\eta_0(0)=0$, then the initial point
process has a particle at the origin. The dynamics obtained by this
projection can be described as follows. Each particle, after an
exponential time chooses a random position between the neighboring
particles and jump to it. Since the interaction occurs only with the
two nearest neighboring particles and the number of particles is
conserved, we called this motion as conservative nearest particle
system. See Liggett (1985) for examples of (non conservative) nearest
particle systems.
To study the height at the origin $\eta_t(0)$ in the height system is
equivalent to tag the particle at the origin and follow it in the
particle system. The particle interpretation also allows us to study
the projection of configurations that otherwise will be not
localized. In particular, when $u(i,i+1)= 1-u(i,i-1)$ is a uniform
random variable in $[0,1]$, any homogeneous Poisson process is
reversible for the particle system, when we disregard the labels of
the particles. To see this, notice that if $X$ and $Y$ are independent
exponential random variables of the same parameter and $U$ is an
uniform in $[0,1]$ independent of the above random variable, then
$u(X+Y)$ and $(1-U)(X+Y)$ are again independent exponential random
variables of same parameter. The reversibility of the Poisson process
for the particle system implies that the particle system as seen from
the tagged particle has as reversible measure the Poisson process
conditioned to have a particle at the origin. This in particular
implies that if initially the difference between successive heights
are iid exponentially distributed with some parameter $\rho$, then
this distribution is reversible for the process as seen from the
height at the origin.
Another consequence of this isomorphism between the particle process
and the height system is that we get the asymptotic behavior for the
motion of the tagged particle in the conservative nearest particle
system. The continuous time version of Corollary to Proposition 6.1
implies that the motion of the tagged particle in the non biased case
($\mu=0$) is sub diffusive. That is, starting with the Poisson process,
the variance of $\eta_t(0)$ will behave asymptotically as $\sqrt
t$. This corrects Theorem 9.1 in Ferrari (1996) which states wrongly
that the behavior was diffusive.
%%%%%%%%%%%%%%%%%% section 8 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Hydrodynamics}
\setcounter{equation}{0}
\label{sec:hdyn}
Let $\phi:\R^d \to \R$ be a continuous function. Let $\phi_n:\Z^d\to\R$ be
defined by
$$
\phi_n(i) = \phi(i/n)
$$
If
the distribution of $u(i,j)$ is symmetric in the sense that $u(i,j)$ and
$u(j,i)$ have the same distribution, then the following Hydrodynamic limit
holds:
$$
\lim_{n\to\infty} \E (X_{n^2t}(nr)\vert X_0 = \phi_n) = \Phi(r,t)
$$
where $\Phi$ is the solution of the heat equation with initial condition
$\phi$. In other words, $\Phi$ is the solution of
$$
\left({\partial \Phi \over \partial t}\right)_h
= D_h \left(\Delta \Phi\right)_h,
$$
where $D_h= \sum_j \vert j_h \vert u(0,j)$, where $j_h$ is the $h$-th
coordinate of the vector $j\in \Z^d$. To show the above one just
computes the derivative of the $h$-th coordinate of $\E X_{n^2t}(rn)$ as
follows
\beqnn
\nonumber
&&\lim_{n\to\infty} n^2\, \E(X_{n^2t}(rn) -X_{n^2t-1}(rn))_h\\
\nonumber
&&\quad\quad= \lim_{n\to\infty} n^2 \sum_j \E (u_{n^2t}(rn,rn+j))_h
\E(X_{n^2t-1}(rn+j)-X_{n^2t-1}(rn))_h\\
\nonumber
&&\quad\quad= \lim_{n\to\infty} n^2\!\!\! \sum_{j:j_h>0}
\E (u_{n^2t}(rn,rn+j))_h \E(X_{n^2t-1}(rn+j)+X_{n^2t-1}(rn-j)-X_{n^2t-1}(rn))_h
\eeqnn
where $(\cdot)_h$ is the $h$-th coordinate of the vector $(\cdot)$. When
$n\to\infty$ this gives the desired result.
Presumably both the law of large numbers and local equilibrium hold.
We close this Section giving a comparison between the hydrodynamics of the
$\eta_t$ process and the $\xi_t$ process studied by Kipnis, Presutti and
Marchioro (1982). Consider the continuous time $\eta_t$ process in one dimension
with nearest neighbor interaction: $u(i,i+1) = 1-u(i,i-1)$ uniformly
distributed in $[0,1]$ and $u(i,j)=0$ otherwise.
For this process the Poisson Processes of rate $\rho$ on the line are
invariant (and reversible) measures.
The $\xi_t$ process is just defined by the differences:
$$
\xi_t(i) = \eta_t(i+1)-\eta_t(i)
$$
Since the Poisson process is
invariant for the $\eta_t$ process, the measure defined as product of independent
exponentials of parameter $\rho$ is invariant for the $\xi_t$ process.
Kipnis, Presutti and Marchioro (1982) consider this process but in a finite
box $\rho_L=\{1,\dots,L\}$ with the following boundary conditions: at the
times of a Poisson process of rate $1$, the value at site $1$ is updated
independently of everything by substituting whatever is in the site by an
exponential random variable of mean $\rho_-$; at the times of a (independent
of everything) Poisson process of rate $1$, the value at site $L$ is updated
independently of everything by substituting whatever is in the site by an
exponential random variable of mean $\rho_+$. They studied the unique
invariant measure around site $rL$, $r\in [0,1]$ and obtained that, as
$L\to\infty$, this measure converges to a product of exponentials with
parameter $\varphi(r) = r\rho_- +(1-r)\rho_+$. This is called local
equilibrium. The function $\rho(s)$ is the solution of the
heat equation
$$
{\partial^2\varphi \over \partial r^2}= 0
$$
with boundary conditions
$$
\varphi(0) = \rho^-;\ \ \ \ \varphi(1) = \rho^+.
$$
The corresponding solution for the hydrodynamic limit of the corresponding
$\eta_t$ process is a non equilibrium stationary profile growing with time: the
solution of the equation
$$
{\partial \Phi \over \partial t}= {\partial^2\Phi \over \partial r^2}
$$
with Neumann conditions
\beqn
\nonumber
\left.{\partial\Phi \over \partial r}\right\vert_{r=0} = \rho_-;\ \ \ \
\left.{\partial\Phi \over \partial r}\right\vert_{r=1} = \rho_+.
\eeqn
This solution is
$$
\Phi(r,t) = (\rho_+-\rho_-){r^2 \over 2}+ r \rho_- + t(\rho_+-\rho_-).
$$
And the relation is
$$
{\partial \Phi \over \partial r} = \varphi.
$$
\vskip 5mm
\noindent{\bf Acknowledgments.} We thank Errico Presutti for discussing with
us his results with Kipnis and Marchioro. We thank K. Ravishankar for some
nice discussions.
This paper was partially supported by FAPESP and N\'ucleo de Excel\^encia
"Fen\^omenos Cr\'\i ticos em Probabilidade e Processos Estoc\'asticos".
It was partially written when the second author visited the Courant Institute
(with a CNPq-NSF grant and CNPq grant no.~200125/96-6), where he found
warm hospitality.
\vskip 5mm
%\penalty -10000
\noindent{\Large{\bf References}}
\penalty 10000
\vskip 5mm
\itemize
\item E. Andjel (1985) Invariant measures and long time
behaviour of the smoothing process. {\sl Ann. Probab. \bf 13} 1:62--71.
\item R. Durrett (1996) Stochastic Spatial Models. {\it PCMI Lecture Notes},
IAS, Princeton.
\item P.A. Ferrari (1996) Limit theorems for tagged particles. {\sl
Markov Processes Relat.\ Fields \bf 2} 17--40.
\item T.\ M.\ Liggett and F. Spitzer (1981) Ergodic theorems for
coupled random walks and other systems with locally interacting components.
{\sl Z. Wahrsch. Verw. Gebiete \bf 56} 4:443--468.
\item T.\ M.\ Liggett (1985). {\sl Interacting Particle Systems.}
Springer, Ber\-lin.
\item C. Kipnis, C. Marchioro and E. Presutti (1982) Heat flow in
an exactly solvable model. {\sl J. Statist. Phys. \bf 27} 1:65--74.
\item P. Hall and C. C. Heyde (1980) {\it Martingale limit theory and its
application. Probability and Mathematical Statistics}. Academic Press, Inc.
[Harcourt Brace Jovanovich, Publishers], New York-London, 1980.
\item F. Spitzer (1964) {\it Principles of Random Walk}, Academic Press.
\enditemize
\vskip 3truemm
\parindent -20pt
\leftline{Instituto de Matem\'atica e Estat\'\i stica --- %
Universidade de S\~ao Paulo}
\leftline{Cx.\ Postal 66281 --- 05315-970 S\~ao Paulo SP --- Brasil}
\leftline{{\tt, }}
\end{document}