\documentstyle[11pt]{article}
\author{Hans-Otto Georgii \\
{\small Mathematisches Institut der Universit\"at M\"unchen}\\
{\small Theresienstr. 39, D--80333 M\"unchen}\\
{\small E--mail: georgii@rz.mathematik.uni--muenchen.de}}
\title{{\bf Mixing properties of induced random transformations}}
\textwidth 14.5cm \textheight 23.4cm
\oddsidemargin 7mm \evensidemargin -1mm \topmargin -4mm
\parindent 0.5cm
\renewcommand{\baselinestretch} {1.1} %{1.4}
%\renewcommand{\theequation}{\arabic{section}.\arabic{equation}}
\sloppy
\renewcommand{\labelenumi}{(\arabic{enumi})}
%%%%%Kurzformen allgemeiner Befehle
%\def\sect#1{\section{#1}\setcounter{equation}{0}}
\def\skip{\medskip\smallskip}
\def\ba{\begin{array}}
\def\ea{\end{array}}
\def\be{\begin{equation} \label}
\def\ee{\end{equation}}
\def\bea{\begin{eqnarray*}}
\def\eea{\end{eqnarray*}}
\def\beal{\begin{eqnarray} \label}
\def\eeal{\end{eqnarray}}
\def\bit{
\begin{itemize}\setlength{\parsep}{0ex}\setlength{\topsep}{-2ex}
\setlength{\parsep}{1ex}\setlength{\parskip}{0ex}\setlength{\itemsep}{0ex}
}
\def\eit{\end{itemize}}
\def\rf#1{(\ref{#1})}
\def\mmbox#1{\enspace\mbox{#1}\enspace}
\def\proof{\skip{\em Proof.}\quad}
\def\definition {\skip\noindent{\bf Definition.}\quad}
\def\proposition#1{\skip\noindent{\bf Proposition #1}\quad}
\def\lemma#1{\skip\noindent{\bf Lemma #1}\quad}
\def\theorem#1{\skip\noindent{\bf Theorem #1}\quad}
\def\corollary#1{\skip\noindent{\bf Corollary #1}\quad}
\def\remark#1{\skip\noindent{\bf Remark #1}\quad}
\def\remarks{\skip\noindent{\bf Remarks}\quad}
\def\example#1{\skip\noindent{\bf Example #1}\quad}
%%%%%Allgemeine Symbole
\def\R{{\sf R}}
\def\Rd{{\R}^d}
\def\Z{{\sf Z}}
\def\N{{\sf N}}
\def\Zd{{\Z}^d}
\def\sZd{\mbox{\scriptsize\sf Z}^d}
\def\ti{\to\infty}
%%%%%Griechische Buchstaben
\def\a{\alpha}
\def\b{\beta}
\def\g{\gamma}
\def\G{\Gamma}
\def\d{\delta}
\def\D{\Delta}
\def\l{\lambda}
\def\L{\Lambda}
\def\m{\mu}
\def\n{\nu}
\def\r{\rho}
\def\s{\sigma}
\def\S{\Sigma}
\def\t{\tau}
\def\th{\theta}
\def\ph{\varphi}
\def\o{\omega}
\def\O{\Omega}
%%%%%%%Script-Buchstaben
\def\cA{{\cal A}}
\def\cB{{\cal B}}
\def\cE{{\cal E}}
\def\cF{{\cal F}}
\def\cG{{\cal G}}
\def\cI{{\cal I}}
\def\cO{{\cal O}}
\def\cP{{\cal P}}
\def\cR{{\cal R}}
\def\cS{{\cal S}}
\def\cT{{\cal T}}
\def\cW{{\cal W}}
%%%%%%%%%%%Fett-Buchstaben
\def\bS{{\bf S}}
%%%%%%%Sonstige Abkuerzungen
\def\c{\circ}
\def\inv{^{-1}}
\def\eh{\mbox{\small $\frac{1}{2}$}}
\def\sp{\mbox{supp}\,p}
\begin{document}
\date{}
\maketitle
\begin{abstract} Let $S(N)$ be a random walk on a countable abelian group
$G$ which acts on a probability space $E$ by measure--preserving transformations
$(T_v)_{v\in G}$. For any $\L \subset E$ we consider the random return time $\t$ at which $T_{S(\t)}\in\L$. We show that the corresponding induced
skew product transformation is K--mixing whenever a natural subgroup of
$G$ acts ergodically on $E$.\\
{\bf Key words}: Random group action; skew product; induced transformation; K--system; random walk in a random landscape.
\end{abstract}
\section{Introduction and result}
This note is concerned with an abstract version of the following
{\em Basic example: Kasteleyn's random walk in a random scenery.} Let $\Zd$
be the integer lattice of dimension $d\ge1$, and suppose we are given
\bit\item[(i)] a stochastic coloring of $\Zd$, that is, a translation--invariant $\{0,1\}$--valued random
field $C = (C(v))_{v\in\sZd}$ --- the vertices $v\in\Zd$ with $C(v)=1$ will be called black; and
\item[(ii)] a random walk $(S_N)_{N\ge0}$ on $\Zd$ which starts at the origin
and is independent of the coloring.
\eit
Let $C_0=(C_0(v))_{v\in\sZd}$ be the conditioned coloring for which the origin
is black, and let $\t_k$ denote the $k$--th time at which the random walk
visits a black point of $C_0$. (Note that the consideration of these times
connects the two independent data, the coloring and the walk.) At time
$\t_k$, the random walker observes the coloring $C_k =
(C_0(S_{\t_k}+v))_{v\in\sZd}$ around his position. The conditioning of the
coloring ensures that the sequence $(C_k)_{k\ge0}$ of observed colorings
is stationary. By Kakutani's random ergodic theorem \cite{K2}, the process
$(C_k)_{k\ge0}$ is ergodic when the coloring $C$ is ergodic with respect to translations. Are there any stronger mixing properties of $(C_k)_{k\ge0}$
which can be
deduced from suitable mixing properties of $C$?
This question was posed by Kasteleyn in \cite{Kast} --- this paper gives a
survey of the early work on the model above. A first answer was found by
Keane and den Hollander \cite{KH}: If $C$ is a Bernoulli coloring and
$(S_N)$ is transient, $(C_k)_{k\ge0}$ is (strongly) mixing. This result
was much improved by den Hollander \cite{Holla}: $(C_k)_{k\ge0}$ is mixing
whenever $C$ has a trivial tail $\s$--field. He conjectured
that the mixing property of $(C_k)_{k\ge0}$ already holds when the coloring is ergodic with respect to the subgroup of translations generated by the differences of possible steps of the random walk.
In this note we shall establish this conjecture. In fact, we will even show
that (under the above assumption of ergodicity) the stationary process $(C_k)_{k\ge0}$ has a trivial tail. Also, there will be no difficulty in
considering a more general setting which we will describe now.
\skip
{\em The setting.} We consider the following objects:
\vspace{-1ex}
\bit
\item {\em a standard Borel probability space $(E,\cB,\m)$;
\item a countable abelian (additive) group $G$;
\item a $\m$--preserving measurable action $(T_v)_{v\in G}$ of $G$ on $E$} (which means by definition that the $T_v$ are $\m$--preserving invertible transformations of $(E,\cB)$ such that $T_v T_{v'}=T_{v+v'}$ for $v,v'\in G$ and $T_0= \mbox{id}$ for the null element $0$ of $G$);
\item {\em a probability measure $p$ on $G$ which serves as the jump distribution for a random walk on $G$.} The latter is defined on the Bernoulli space $(W,\cW,\n) = (G,\cP(G),p)^\N$ by $S_N(w) \equiv S(N,w) = \sum_{n=1}^N w_n$, where
$w_n$ is the $n$--th coordinate of $w\in W$.
\eit
The joint system is described by the product space
$$
(\O,\cF,P)=(E\times W,\,\cB\otimes\cW,\,\m\otimes\n)
$$
together with the skew product transformation
$$
T(x,w) = (T_{w_1}x,\th w), \qquad (x,w)\in\O,%
$$
where $\th: W\to W$ is the left--shift. It is well--known and easy to see
that $T$ preserves $P$. Its iterates are given by the formula
$$
T^N(x,w) = (T_{S(N,w)}\,x,\th^N w), \qquad (x,w)\in\O, N\ge1.
$$
This shows that the first coordinates of $T^N(x,w)$ perform a random walk
through the $G$--orbit of $x$.
We make the following assumption of ergodicity.
\bit
\item[(A)] $\m$ is ergodic with respect to $(T_v)_{v\in\G}$, where $\G=\G(p)$
is the smallest group containing
$$
\sp - \sp = \{u-v: u,v \in G, \, p(u)p(v)>0\}\,\,.
$$
\eit
$p$ is called strongly aperiodic if $\G=G$. In this case assumption (A)
simply means that $\m$ is ergodic with respect to the whole transformation
group $(T_v)_{v\in G}\:$.
The following proposition is essentially due to Meilijson \cite{Meil} and den Hollander \cite{Holla}. It can be phrased by saying that --- due to the Bernoulliness of the random walk --- the skew product transformation $T$ has
much better mixing properties than the original transformation group $(T_v)_{v\in G}\:$.
\proposition{}Under assumption (A), $T$ is a Kolmogorov--endomorphism.
That is, for all $A\in\cF$ we have
\be{K}
\sup_{B\in\cF} |P(A\cap T^{-N}B) - P(A)P(B)| \to 0
\ee
as $N\to\infty$.
\skip
Let us return for a moment to the basic example. In this case $G=\Zd$, $E=
\{0,1\}^G$, $\m$ is the distribution of the coloring, and $T_v$ the shift of
$E$ by $v\in G$. The objects of study are the distinguished event $\L =
\{C(0)=1\}$ that the origin is black, and the sequence of random times $N$ at
which $T_{S(N)}\in\L$.
Abstracting again from the example, we thus need to include a final ingredient into our general setting, namely
\bit
\item {\em a fixed measurable set $\L\subset E$ with $\m(\L)>0$.}
\eit
For $(x,w)\in \O$ we consider the time
$$
\t(x,w) = \inf\{N\ge1: T_{S(N,w)}\,x\in\L\}
$$
of the first visit in (or, if $x\in\L$, first return to) $\L$.
The sequence of all later return times to $\L$ is defined by the
recursion $\t_1=\t$, $\t_{k+1} = \t_k + \t\c T^{\t_k}$ for $k\ge1$.
As $T$ is ergodic, the Poincar\'e recurrence theorem implies that all $\t_k$
are finite with $P$--probability one.
For a point $(x,w)\in\L\times W$, the consideration of the return times
$\t_k$ amounts to considering the induced dynamical
system $(\L\times W,\cF_\L,P_\L,T_\L)$, where $\cF_\L$ is the restriction of
$\cF$ to $\L\times W$, $P_\L = P(\,\cdot\:|\L\times W) = \m(\,\cdot\,|\L)\otimes\n$
the conditional probability measure, and $T_\L = T^\t$ the induced
transformation of $T$ on $\L\times W$. Explicitly, for $(x,w)\in\L\times W$ and
$k\ge0$ we have
$$
T_\L^k(x,w) = ( T_{S(\t_k(x,w),w)}x, \th^{\t_k(x,w)}w).
$$
Let us mention at this point that the central concepts of this paper
--- random group action and induced transformation ---
were both developed by Kakutani \cite{K1,K2}.
What are the mixing properties of the induced dynamical system? In general,
an induced transformation inherits only ergodicity, but no stronger mixing property from its primitive transformation. In fact, the stationary Markov
chain with state space $\{0,1,2\}$ and transition probabilities $p_{00} =
p_{01}=1/2$, $p_{12}=p_{20}=1$ provides an example of a K-- (even Bernoulli)
system for which the induced subchain with state space $\{1,2\}$ is periodic and
therefore not weakly mixing. However, the dynamical system $(\O,\cF,\m,T)$
under consideration is so well--behaved that even the induced subsystem is
K--mixing. This is the essence of our main result.
\theorem{}Under assumption (A), the induced transformation $T_\L$ is a
Kolmogorov--endomorphism. Explicitly, for all $A\in\cF_\L$,
\be{KL}
\sup_{B\in\cF_\L} |P_\L(A\cap T_\L^{-k}B) - P_\L(A)P_\L(B)| \to 0
\ee
as $k\to\infty$.
\skip
\remarks (a) Setting $\L=E$ in the theorem we reobtain the
proposition. Nevertheless, for the sake of exposition we will first prove
the proposition and then refine the argument to
obtain the theorem.
(b) As is well-known, \rf{KL} is equivalent to the statement that $P_\L$ is
trivial on the future tail $\s$--algebra $\bigcap_{k\ge1} T_\L^{-k}\cF_\L$.
Another way of writing \rf{KL} is the following statement of convergence
to equilibrium: For each $A\in\cF_\L$,
\be{KLM}
\| P(T^{\t_k}\in\cdot\,|A)-P_\L \| \to 0
\ee
as $k\to\infty$, where $\|\,\cdot\,\|$ stands for the total variation norm.
Assertion \rf{KLM} even holds when $A$ is not contained in $\L$.
Indeed, for each
$A\in\cF$ we have that $P(T^\t\in\cdot\,|A) \ll P_\L$, and a standard extension
argument allows to insert the associated Radon--Nikodym density in the place
of $A$ in \rf{KL}. In fact, our proof of the theorem gives \rf{KLM} directly
for all $A\in\cF$.
(c) Reducing \rf{KLM} to events in $E$ we obtain: For all
$\D\in\cB$,
$$
\sup_{B\in\cB}\Big|\int \n(dw)\: \m(x: T_{S(\t_k(x,w),w)}\,x\in B|\D)
- \m(B|\L)\Big| \to 0
$$
as $k\to\infty$. Another corollary of \rf{KLM} is that for all $A\in\cF$,
$$
\|P(\t_{k+1}-\t_k\in\cdot\:|A) - P_\L(\t\in\cdot\:)\| \to 0
$$
as $k\to\infty$. In particular, if $\int \t\,dP<\infty$ then
$$
\int (\t_{k+1}-\t_k)\,dP(\,\cdot\,|A)\to\int\t\,dP_\L=1/\m(\L)
$$
as $k\to\infty$ because in this case the sequence $(\t_{k+1}-\t_k)_{k\ge1}$
is uniformly $P$--integrable. To see this one may use the equations
$P(\t=n)=\m(\L)\,P_\L(\t\ge n)$ and $P(\t_{k+1}-\t_k=n) = \m(\L)\, P_\L(\t\,;\,
\t_{k+1}-\t_k=n)$ for $n,k\ge1$ which follow readily from the fact that $T$
is $P$--preserving and ergodic.
(d) The theorem breaks down for induced transformations $T_M$ on subsets
$M$ of $\O$ which are not of the particular form $M=\L\times W$. Here is a
counterexample. Let $G=\Z$ and $p$ be such that $p(0)=p(1)=1/2$. Define
$M = E\times\{w\in W: w_1=1\}$, and let $\t_k$ denote the $k$--th return time
to $M$. (Note that $\t_k$ is a function of $w$ only.) Then $S(\t_k)=k$ and
$T_M^k=T_k\times\th^{\t_k}$. As a consequence, assertion \rf{KL} (with $T_\L$
replaced by $T_M$) can only hold when $\m$ is a Dirac measure. This shows that
assumption (A) is by far not sufficient for $T_M$ to be K--mixing.
(e) $T$ and $T_\L$ become invertible if we replace $W$ by the two--sided
sequence space $G^\Z$. It is routine to restate and prove the theorem in
this setting.
\skip
We shall prove the proposition in Section 2 and the theorem in Section 3.
As den Hollander in \cite{Holla}, we make extensive use of coupling arguments.
Actually we adopt some of his ideas. The main difference is the
following. Den Hollander used the tail triviality of the coloring to obtain a
tail coupling of conditioned colorings. By this we mean a realization of the
conditioned colorings on a common probability space which is such that the
colorings coincide outside of a finite random box. He then had to deal with
the difficulty that --- in the case that the random walk is recurrent --- the
random box of disagreement will be visited infinitely many often. We can avoid this
problem by replacing the tail coupling with an orbit coupling. This only
requires the ergodicity of the coloring and provides a realization of
conditioned colorings on a common probability space which is such that each
coloring is a random translate of the other. This leads to great simplifications
of the argument.
\section{Proof of the proposition}
We start with stating the orbit coupling lemma which is our fundamental tool.
\lemma{2.1 (Orbit coupling)} Suppose assumption (A) holds, and let $\D,\D'
\subset E$ be two measurable sets with $\m(\D)\m(\D')>0$. Then there exist a
probability measure $\bar\m = \bar\m_{\D,\D'}$ on $(E\times E,\cB\otimes\cB)$
and a measurable function $\g:E\times E\to \G$ such that
\bit
\item[(i)] $\bar\m$ has marginals $\m(\,\cdot\,|\D)$ and $\m(\,\cdot\,|\D')$, and
\item[(ii)] $\bar\m(X'=T_\g X) = 1$, where $X,X'$ are the two projections from
$E\times E$ onto $E$.
\eit
\proof In view of (A), $\mu$ is trivial on the $\s$--algebra $\cI(\G)$ of
all $(T_v)_{v\in\G}$--invariant events in $E$. Hence $\m(\,\cdot\,|\D) =
\m(\,\cdot\,|\D')$ on $\cI(\G)$. The lemma is therefore a particular case of the
orbit coupling theorem (in fact of Proposition 3.1) in \cite{G}. $\Box$
\skip
Our second tool is a coupling of random walks. It is merely a slight variant of a coupling proposed by Liggett on pp. 69--70 of \cite{Ligg}. For later use,
however, we need to give the details of the proof.
\lemma{2.2 (Random walk coupling)} Let $v\in\G$ and $C, C'$ two cylinder events
in $W$ with $\n(C)\n(C') > 0$. Then there exists a probability measure $\bar\n =
\bar\n_{v,C,C'}$ on $(W\times W,\cW\otimes\cW)$ such that
\bit
\item[(i)] $\bar\n$ has marginals $\n(\,\cdot\,|C)$ and $\n(\,\cdot\,|C')$, and
\item[(ii)] $\bar\n(S_N=S_N'+v \,\,\mbox{eventually})=1$.
\eit
Here $S_N=S_N(\xi)$ and $S_N'=S_N(\xi')$ for the two projections $\xi,\xi'$ from
$W\times W$ onto $W$.
\proof We may assume that, for some $\ell\in\N$, $C$ and $C'$ prescribe fixed
values for the first $\ell$ coordinates. The general case then follows by an
averaging argument.
For such $C, C'$, the conditional probabilities $\n(\,\cdot\,|C)$ and
$\n(\,\cdot\,|C')$ govern two random walks which start at time $\ell$ at certain
points $u, u'\in G$ with $u-u'\in\G$. This shows that we can, in fact, assume
without loss that $C=C'=W$.
By the definition of $\G$, there exist some $k\in\N$ and $u_1,\ldots,u_k,
u_1',\ldots,u_k'\in\sp$ such that $u_j\ne u_j'$ for $1\le j\le k$ and
$\sum_{j=1}^k(u_j-u_j')=v$. We introduce the stopping times $\r_0=0$ and
$$
\r_j = \inf\Big\{N>\r_{j-1}: S_N-S_N' = \sum_{i=1}^j (u_i-u_i')\Big\},
$$
$1\le j\le k$. We define a probability measure $\bar\n$ on $W\times W =
(G\times G)^\N$ by prescribing its recursive conditional probabilities.
Namely, for $n\in\N$ and $1\le j\le k$ we stipulate that on the set
$\{\r_{j-1}0$, while on $\{\r_k0$.
We choose $\bar\m=\bar\m_{\D,E}$ according to the orbit coupling lemma, and for
$v\in\G$ we let $\bar\n_v=\bar\n_{v,C,W}$ be as in Lemma 2.2. We define a probability
measure $\bar P$ on $\O\times\O=E\times E\times W\times W$ by
$$
\bar P(dx,dx',dw,dw')=\bar\m(dx,dx')\:\bar\n_{\g(x,x')}(dw,dw')\:.
$$
Writing $X, X':\O\times\O\to E$ and $\xi,\xi':\O\times\O\to W$ for the four
projections we then have $\bar P((X,\xi)\in\cdot\,)=P(\,\cdot\,|A)$ and
$\bar P((X',\xi')\in\cdot\,)=P$.
Consider the coupling time
$$
\r=\inf\Big\{M\ge1: S_N=S_N'+\g(X,X') \mmbox{for all}N\ge M\Big\}.
$$
By construction, $\bar P(\r<\infty)=1$. On the set $\{\r\le N\}$ we have
$\xi_n=\xi_n'$ for all $n>N$ and
$$
T_{S(N)}X=T_{S'(N)+\g(X,X')}X=T_{S'(N)}X'\:.
$$
This means that $T^N(X,\xi)=T^N(X',\xi')$ on $\{\r\le N\}$. Thus for each
$B\in\cF$ we can write, using that $P(T\inv B)=P(B)$,
\beal{rho}
|P(T^{-N}B|A)-P(B)|&\le&\int |1_B\c T^N(X,\xi) - 1_B\c T^N(X',\xi')|\,d\bar P
\nonumber\\
&\le&\bar P(\r>N)\,.
\eeal
Assertion \rf{K} now follows immediately.
\section{Proof of the theorem}
To prove \rf{KL} we can assume without loss that $\m(\L\,\triangle\, T_v\L)>0$
for some $v\in\G$. Indeed, in the alternative case $\L$ is almost surely invariant
under the transformation group $(T_v)_{v\in\G}$. Assumption (A)
then implies that $\m(\L)=1$, and this means that $P_\L=P$ and
$P(T_\L=T)=1$. So in this case the theorem is reduced to the proposition
which has already been proved.
Since $\{v\in\G:\m(\L\,\triangle\, T_v\L)=0\}$ is a group and $\G$ is generated
by $\sp-\sp$, the assumption above can be written in the form
\bit
\item[(B)] $\m(T_u\L\,\triangle\, T_v\L) > 0$ for some $u,v\in\sp$.
\eit
We will use this property to introduce a refined coupling of random walks which
accounts for the number of visits in $\L$. Let
$$
V_N = \sum_{n=1}^N 1_{\L\times W}\c T^n = \sum_{n=1}^N 1_\L\c T_{S(n,\cdot)}
$$
be the number of visits in $\L$ up to time $N$.
\lemma{3.1} Suppose (A) and (B) hold, and let $v\in\G, k\in\Z$ and $C, C'$ be
two cylinder events with $\n(C)\n(C')>0$. Then there exists a probability kernel
$x\to \bar\n_x = \bar\n_{x,v,k,C,C'}$ from $(E,\cB)$ to
$(W\times W,\cW\otimes\cW)$ such that
\bit
\item[(i)] for all $x$, $\bar\n_x$ has marginals $\n(\,\cdot\,|C)$ and
$\n(\,\cdot\,|C')$, and
\item[(ii)] for $\m$--almost all $x$,
$$
\bar\n_x(S_N=S_N'+v\mmbox{and}V_N(x,\,\cdot\,)=V_N'(T_vx,\,\cdot\,)+k
\mmbox{eventually}) = 1\:.
$$
\eit
Here $V_N(x,\,\cdot\,)=V_N(x,\xi)$ and $V_N'(x,\cdot\,)=V_N(x,\xi')$ in the
notation of Lemma 2.2.
\proof 1) {\em Construction of $\bar\n_x$.} We proceed as in the proof of Lemma 2.2.
As explained there, we only need to consider the case $C=C'=W$. The measure $\bar\n_x$
will again be constructed by prescribing its conditional probabilities
$$
q_{x,n}(a,a')=\bar\n_x\Big(\xi_n=a,\xi_n'=a'\Big|(\xi_i,\xi_i'),i\r:
N\mmbox{even,}V_N(x,\,\cdot\,)=V_N'(T_vx,\,\cdot\,)+k\Big\}\:,
$$
where as usually $\inf\emptyset=\infty$. For each $n$ we require that on the
set $\{\r\le n<\s_x\}$,
$$
q_{x,n}(a,a')=\left\{\begin{array}{cl}p(a)p(a')&\mbox{if $n$ is odd}\\[1ex]
\delta(\xi_{n-1}',a)\delta(\xi_{n-1},a')&\mbox{if $n$ is even},
\end{array}\right.
$$
where $\delta(\cdot,\cdot)$ is the Kronecker--Delta.
The final recoupling is achieved by requiring that on $\{\s_x\r$ and all $n>\s_x$, with $\bar\n_x$--probability one.]
It can easily be checked that the sequence $(Z_N)$ is a martingale relative
to $\bar\n_x$. The increments of $(Z_N)$ are $\pm1$ or $0$. It follows that $(Z_N)$
either converges or oscillates unboundedly, in that
$\sup\, Z_N=\infty$ and $ \inf\, Z_N=-\infty$.
On the latter event, $\s_x$ is both infinite and, by a `continuity' argument,
finite. So this event cannot occur with positive probability, and we can conclude that $Z_N$ converges with $\bar\n_x$--probability one.
To complete the proof of property (ii) it is therefore sufficient to show
that $\bar\n_x( Z_N \:\mbox{converges}, \s_x=\infty)=0$ for $\m$--almost all $x$. To check this we write for any even $M$ and $u\in G$
\beal{0}
\bar\n_x\Big(\hspace{-1ex}&Z_N& \hspace{-1ex} \mbox{converges},
\: \s_x=\infty\:\Big|\:\r=M, S_M=u\Big)
\nonumber\\ %[1.2ex]
&\le& \bar\n_x\Big(1_\L(T_{u+\xi(M+1)+\ldots+\xi(n+1)}\,x)= 1_\L(T_{u+\xi(M+1)+\ldots+\xi(n)+\xi(n+2)}\,x)\nonumber \\
&&\qquad\qquad\qquad\qquad\mmbox{for all sufficiently large even}
n\:\Big|\:\r=M, S_M=u\Big)\nonumber\\[1.2ex]
&=& \n\Big(1_\L(T_{S(n)+\xi(n+1)+u}\,x)=1_\L(T_{S(n)+\xi(n+2)+u}\,x)\nonumber\\
&&\qquad\qquad\qquad\qquad\mmbox{for all sufficiently large even}n\Big)
\eeal
The inequality follows from the particular definition of $q_{x,n}$ on
$\{\rM}$ has distribution $\n$ and is independent of
$\{\r=M,S_M=u\}$. The subsequent Lemma 3.2 will show that the last probability
in \rf{0} vanishes for $\m$--almost all $x$ and all $u$. So for these $x$ we have
$$
\bar\n_x(\s_x<\infty)\ge\bar\n_x(Z_N\:\mbox{converges}) = 1\:
$$
This completes the proof of the lemma. $\Box$
\lemma{3.2} Under assumptions (A) and (B), the last probability in \rf{0}
vanishes for $\m$--almost all $x\in E$ and all $u\in G$.
\proof Since $\m$ is invariant under $T_u$, we only need to consider the case
$u=0$. The $\m$--integral of the probability in question is equal to
$$
1-P(T^{2n}\in\Sigma\mmbox{for infinitely many}n)\:,
$$
where $\Sigma = \{(x,w)\in\O: x\in T_{w_1}\inv\L\,\triangle\,T_{w_2}\inv\L\}$.
Assumption (B) implies that $P(\Sigma)>0$, and the proposition shows that
$T^2$ is ergodic. The lemma thus follows from the Poincar\'e recurrence theorem.
$\Box$
\skip
We now turn to the proof of the theorem. In fact we shall prove \rf{KLM} for
all $A\in\cF$.
As in the proof of the proposition, we can assume that $A=\D\times C$ for
some $\D\in\cB$ and a cylinder event $C\subset W$ such that $P(A)=
\m(\D)\n(C)>0$. Let $\bar\m=\bar\m_{\D,\L}$ be chosen according to Lemma 2.1,
and for $v\in\G$ and $x\in E$ we let $\bar\n_{x,v}=\bar\n_{x,v,0,C,W}$ be as
in Lemma 3.1. We define a probability measure $\bar P$ on $\O\times\O$ by
$$
\bar P(dx,dx',dw,dw')=\bar\m(dx,dx')\bar\n_{x,\g(x,x')}(dw,dw')\:.
$$
Letting again $X, X', \xi, \xi'$ denote the four projections on $\O\times\O$,
we then have that $\bar P((X,\xi)\in\cdot\,)=P(\,\cdot\,|A)$ and
$\bar P((X',\xi')\in\cdot\,)=P_\L$.
We consider the coupling time
$$
\s=\inf\Big\{k\ge1: S_N=S_N'+\g(X,X')\mmbox{and} V_N(X,\xi)=V_N(X',\xi')
\mmbox{for all} N\ge k\Big\}\:.
$$
By construction, $\bar P(\s<\infty)=1$. As in the proof of the proposition we
see that, on the set $\{\s\le k\}$, $\th^N\xi=\th^N\xi'$ and $T_{S(N)}X =
T_{S'(N)}X'$ for all $N\ge k$ and, in addition, $V_N(X,\xi)=V_N(X',\xi')$
for all $N\ge k$. The last property implies that $\t_k(X,\xi)=\t_k(X',\xi')$.
Hence $T^{\t_k}(X,\xi)=T^{\t_k}(X',\xi')$ on $\{\s\le k\}$.
An estimate analoguous to \rf{rho} thus shows that
$$
\|P(T^{\t_k}\in\cdot\,|A)-P_\L\| \le 2\,\bar P(\s>k) \to 0 \mmbox{as} k\to\infty.
$$
This proves \rf{KLM} for all $A\in\cF$ and in particular the theorem.
\newpage
\begin{thebibliography}{99}
\bibitem{G} H.\ O.\ Georgii: Orbit coupling. Preprint (1995)
\bibitem{Holla} W.\ Th.\ F.\ den Hollander: Mixing properties for random walk
in random scenery. {\em The Annals of Probab.}\ {\bf 16} (1988), 1788--1802.
\bibitem{K1} S.\ Kakutani: Induced measure preserving transformations. {\em Proc.\ Japan Acad.\ Ser.\ A Math.\ Sci.} {\bf 19} (1943), 635--641.
\bibitem{K2} S.\ Kakutani: Random ergodic theorems and Markoff processes with a stable distribution, in: J.\ Neyman (ed.), {\em Proc.\ Second Berkeley Symp.\ Math.\ Statist.\ and Probab.}. Univ.\ of California Press: Berkeley, 1951, pp.\ 247--261.
\bibitem{Kast} P.\ W.\ Kasteleyn: Random walks through a stochastic landscape.
{\em Bull.\ Internat.\ Inst.\ Statist.}\ {\bf 45} (1985), 27--I.1--13.
\bibitem{KH} M.\ Keane \& W.\ Th.\ F.\ den Hollander: Ergodic properties of color records. {\em Physica} {\bf 138A} (1986), 183--193.
\bibitem{Ligg} Th.\ M.\ Liggett: {\em Interacting Particle Systems}. Springer: New York etc., 1985.
\bibitem{Meil} I.\ Meilijson: Mixing properties of a class of skew--products. {\em Israel J.\ Math.}\ {\bf 19} (1974), 266--270.
\end{thebibliography}
\end{document}