% April 25/94, GS
\documentstyle[12pt]{article}
%\documentstyle[12pt,drafthead]{article}
%% List of macros follows.
% THEOREM, EQN etc. commands
\renewcommand{\theequation}{\thesection.\arabic{equation}}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{prop}[theorem] {Proposition}
\newtheorem{cor}[theorem] {Corollary}
\newtheorem{defn}[theorem] {Definition}
\newtheorem{conj}[theorem] {Conjecture}
\newcommand{\en} {\end{equation}} \newcommand{\eq} {\begin{equation}}
\newcommand{\lbeq}[1] {\label{eq: #1}} \newcommand{\refeq}[1]
{(\ref{eq: #1})} \newcommand{\lbfg}[1] {\label{fg: #1}}
\newcommand{\reffg}[1] {\ref{fg: #1}} \newcommand{\lbtb}[1]
{\label{tb: #1}} \newcommand{\reftb}[1] {\ref{tb: #1}}
\newcommand{\eqarray} {\begin{eqnarray}} \newcommand{\enarray}
{\end{eqnarray}} \newcommand{\eqarrstar} {\begin{eqnarray*}}
\newcommand{\enarrstar} {\end{eqnarray*}}
% ``Remark, Proof, QED'' etc.
\newcommand{\REV} {{\bf APR21}} \newcommand{\proof} {\noindent {\bf
Proof}. \hspace{2mm}} \newcommand{\qed} {\hspace*{\fill} $\Box$
\medskip} \newcommand{\remark}{\noindent {\bf Remark}. \hspace{2mm}}
\newcommand{\ssss} { \scriptstyle } \newcommand{\sss} {
\scriptscriptstyle }
%Caligraph fonts
\newcommand{\Acal} {{\cal A }}
\newcommand{\Bcal} {{\cal B }}
\newcommand{\Ccal} {{\cal C }}
\newcommand{\Dcal} {{\cal D }}
\newcommand{\Ecal} {{\cal E }}
\newcommand{\Fcal} {{\cal F }}
\newcommand{\Gcal} {{\cal G }}
\newcommand{\Hcal} {{\cal H }}
\newcommand{\Kcal} {{\cal K }}
\newcommand{\Lcal} {{\cal L }}
\newcommand{\Mcal} {{\cal M }}
\newcommand{\Ncal} {{\cal N }}
\newcommand{\Pcal} {{\cal P }}
\newcommand{\Rcal} {{\cal R }}
\newcommand{\Scal} {{\cal S }}
\newcommand{\Tcal} {{\cal T }}
\newcommand{\Ucal} {{\cal U }}
\newcommand{\Vcal} {{\cal V }}
\newcommand{\Wcal} {{\cal W }}
\newcommand{\Xcal} {{\cal X }}
\newcommand{\Ycal} {{\cal Y }}
%Bold fonts
\newcommand{\Cbold} {{\bf C}}
\newcommand{\Rbold} {{\bf R}}
\newcommand{\Rd} {{ {\bf R}^d}}
\newcommand{\rd} {\mbox{${\bf R}^d$}}
\newcommand{\Zbold} {{ {\bf Z} }}
\newcommand{\Zd} {{ {\bf Z}^d }}
\newcommand{\zd} {\mbox{${\bf Z}^d$}}
% Mathematical symbols:
\newcommand{\smfrac}[2]{\textstyle{#1\over #2}}
\newcommand{\combination}[2]{ { \left (
\begin{array}{c} {#1} \\ {#2}
\end{array} \right ) }}
\newcommand{\intsub}{\int_{[-\pi,\pi]^d}} \newcommand{\nexists} {{
\not\exists }} \newcommand{\nin} {{ \not\in }}
\newcommand{\nni} {{ \not\ni }} \newcommand{\Prob} {{\rm Prob}}
\newcommand{\prodtwo}[2]{ \prod_{ \mbox{ \scriptsize
$\begin{array}{c} {#1} \\ {#2} \end{array} $ } } }
\newcommand{\sumtwo}[2]{\sum_{ \mbox{ \scriptsize
$\begin{array}{c} {#1} \\ {#2} \end{array} $ } } }
% End of list of macros
\oddsidemargin 3mm
\evensidemargin 3mm
\topmargin -12mm
%\headheight 4mm
%\headsep 3mm
\textheight 620pt
\textwidth 440pt
\title {
The diffusive phase of a model \\ of self-interacting walks
}
\author {David C.\ Brydges \\
Department of Mathematics \\
University of Virginia \\
Charlottesville, VA 22903-3199, USA \\
E-mail: {\tt db5d@virginia.edu} \\
\and
Gordon Slade \\
Department of Mathematics and Statistics \\
McMaster University \\
Hamilton, Ont., Canada L8S 4K1 \\
E-mail: {\tt slade@mcmaster.ca}
}
\begin{document}
\maketitle
%\begin{center}
% {\large \bf NOT FOR DISTRIBUTION}
%\end{center}
\begin{abstract}
We consider simple random walk on ${\bf Z}^d$ perturbed by a factor
$\exp[\beta T^{-p} J_T]$, where $T$ is the length of the walk and
$J_T = \sum_{0 \leq i < j \leq T} \delta_{\omega(i),\omega(j)}$.
For $p=1$ and dimensions $d \geq 2$, we prove that this walk behaves
diffusively for all $-\infty < \beta < \beta_0$, with $\beta_0 >0$.
For $d>2$ the diffusion constant is equal to $1$, but for $d=2$ it
is renormalized. For $d=1$ and $p=3/2$, we prove diffusion for all
real $\beta$ (positive or negative). For $d>2$ the scaling limit is
Brownian motion, but for $d \leq 2$ it is the Edwards model (with
the ``wrong'' sign of the coupling when $\beta >0$) which governs
the limiting behaviour; the latter arises since for
$p=\frac{4-d}{2}$, $T^{-p}J_T$ is the discrete self-intersection
local time. This establishes existence of a diffusive phase for
this model. Existence of a collapsed phase for a very closely
related model has been proven in work of Bolthausen and Schmock.
\end{abstract}
\section{Introduction}
\subsection{The model}
We consider homogeneous simple random walks on $\zd$ taking nearest
neighbour steps with equal probabilities $\frac{1}{2d}$. Given a
$T$-step simple random walk $\omega$ beginning at the origin, let \eq
J_T \equiv J_T(\omega) = \sum_{0 \leq i < j \leq T}
\delta_{\omega(i),\omega(j)}. \en Define \eq \lbeq{pfx} c_T = E \exp
[ \beta T^{-p} J_T ], \en where the expectation is with respect to
simple random walk beginning at $0$, and $p \geq 0$ and $\beta \in
{\bf R}$ are parameters. We define a new measure on $T$-step simple
random walks, by assigning to a walk $\omega$ the probability \eq
\frac{1}{c_T} \frac{1}{(2d)^T} \exp [ \beta T^{-p} J_T (\omega) ].
\en
For $\beta=0$ this new measure is
just the simple random walk. For $\beta >0$ it defines
a model of self-attracting walks, since self-intersections are
encouraged by the exponential factor. Similarly, for
$\beta <0$, this is a model of self-repelling walks. The factor
$T^{-p}$ diminishes the strength of the self-interaction for long walks,
and for $p$ fixed, $\beta$ provides a measure of the strength of the
interaction. For $p=0$ and $\beta < 0$ this is the Domb--Joyce model
of weakly self-avoiding walks, which in the limit $\beta \to -\infty$
gives the usual strictly self-avoiding walk.
We are interested in the phenomenon of a collapse transition, in which
for fixed $p$ and $d$ there is a transition from diffusive behaviour
to collapsed behaviour when $\beta > 0$ is increased. The order parameter
for the transition is the diffusion constant $D(\beta)$,
which is defined in terms of the mean-square displacement
\eq
\langle |\omega(T)|^2 \rangle_\beta =
\frac{E \left( |\omega(T)|^2 \exp [ \beta T^{-p} J_T ] \right) }
{E \left( \exp [ \beta T^{-p} J_T ] \right) }
\en
by
\eq
D(\beta) = \lim_{T \to \infty} \frac{1}{T}
\langle |\omega(T)|^2 \rangle_\beta .
\en
The diffusive phase corresponds to $0 < D(\beta) < \infty$, while
the collapsed phase is signalled by $D(\beta)=0$. In fact, typically
in the collapsed phase the mean-square displacement remains uniformly
bounded as $T \to \infty$. For simple random walk we have $D(0)=1$,
and it is to be expected that for fixed $p$ the diffusion constant
should be a nonincreasing function of $\beta$, since increasing the
encouragement for self-intersections should not increase the typical
distance travelled by the walk.
In order to
observe a transition at positive $\beta$,
it is necessary to include a factor $T^{-p}$
with $p \geq 1$ in the interaction, since for $0 \leq p <1$ the walk is
collapsed for all $d$ when $\beta >0$ \cite{Oono75,Oono76}. For $d \geq 2$,
we shall see that the correct power for observing a transition is $p=1$.
Note that given a transition for $p=1$ there can be no transition for
other values of $p$, since $p<1$ always
yields collapse, while larger values of $p$ essentially correspond to
the diffusive behaviour observed for $p=1$ and $\beta =0$.
The study of the large-$T$ limit of the partition function $c_T$ is
is also related to the problem of taking the continuum limit of the
discrete Edwards model (with the ``wrong'' sign of the coupling when
$\beta >0$). To see this, we recall that the partition function of
the Edwards model is formally given by the following expression, in
which traditionally one is interested in the repulsive case $\beta < 0$:
\eq
\lbeq{Edpf}
E \exp \left[ \beta \int_{0 \leq s 0$ when
$d=1$ and for small $\beta > 0$ for $d=2$. We learned this fact
from Le~Gall \cite{LeGa94}, and will address some related issues in
Section~\ref{sec-d2}. Consequently the partition
function \refeq{Edpf} is an entire analytic function of
$\beta$ for $d=1$, and is analytic in a neighbourhood of
$\beta=0$ for $d=2$ (when conventionally renormalized).
A discrete space-time version of \refeq{Edpf} can be obtained by replacing
the continuous time interval $[0,1]$ by the discrete time interval
$\{ 0, 1, \ldots , T\}$, replacing the Brownian expectation
by the simple random walk expectation, replacing $X_s$ by
$T^{-1/2} \omega(\lfloor sT \rfloor )$, replacing
the two time integrals by Riemann sums, and replacing the Dirac delta
function by a suitably rescaled Kronecker delta.
This leads to the discrete partition function
\eq
\lbeq{disEd}
E \exp [ \beta T^{(d-4)/2} J_T ],
\en
where now the expectation is with respect to simple random walk starting
from the origin.
This is just the partition function $c_T$, with $p=(4-d)/2$. Thus
the $T \to \infty$ limit for this value of $p$ corresponds to the
continuum limit of the Edwards model. Note that this is the problem
of studying the {\em ultraviolet} limit for the Edwards model, as
the continuum time interval is the finite interval $[0,1]$. The
{\em infrared} problem for the Edwards model is connected with
studying the behaviour when the interval $[0,1]$ is replaced by
a long interval $[0,L]$ with $L \to \infty$, and is not addressed here.
Before stating our results precisely, we remark that the model we analyze
is not the standard model for the collapse of long chain
polymers. The
standard model of polymer collapse involves a self-repellence due
to the excluded volume effect that no two monomers can occupy the same
region of space, together with a nearest-neighbour attraction due to
temperature or solvent effects. In other words to each $T$-step
simple random walk $\omega$ there is associated a factor
\eq
\exp \left[ -\lambda_1 \sum_{0 \leq i < j \leq T}
\delta_{\omega(i),\omega(j)}
+ \lambda_2 \sum_{0 \leq i < j \leq T}
\delta_{|\omega(i)-\omega(j)|,1} \right],
\en
with $\lambda_1$ and $\lambda_2$ both positive.
It has been argued, and observed experimentally, that when
$\lambda_2$ is increased with fixed $\lambda_1$, there is a collapse
transition. Recent references include
\cite{BGW92,BOP93}. The combination of
attraction and repulsion makes this
model difficult to treat rigorously; our model avoids this difficulty.
%\bigskip \noindent
%{\bf Questions for us to think about (but I am happy to omit this if
%there is nothing obvious to say):}
%\begin{enumerate}
%\item What is the relevance of Fr\"ohlich's remark about work of
% Lebowitz and Speer on nonlinear Schr\"odinger models?
% Their results are related to ours by the isomorphism between
% $\phi^2$ and $\tau$. This means that they are studying many
% polymers with mutual attraction $T^{-3/2} \sum \tau^2_x$, where
% $\tau$ is the total local time and $T$ is the total time, in $d =
% 1$. I am inclined to leave this all out. What do you think?
%\item
%Cardy suggested that there was a connection between our collapse transition
%problem and solitons in the wrong-sign $\phi^4$ theory. Can we comment
%meaningfully on this?
%I think that this is nothing more than what we have already said: that
%the collapse partition function is given by dv theory which involves
%the extremeum of an action with wrong sign $\lambda \phi^4$? Leave it
%out?
%\end{enumerate}
\subsection{The results}
In this section we formulate the main results proved in this paper.
We will be interested in the scaling limit of $X_T(t) =
T^{-1/2}\omega(\lfloor t \rfloor T)$ as $T \to \infty$, where the
probability of $\omega$ is given by $c_T^{-1}(2d)^{-T} \exp [ \beta
T^{-p} J_T(\omega)]$.
We introduce the renormalized partition function
\eq
c_T^{ren} = E \exp [ \beta T^{-p} (J_T - E(J_T))].
\en
Let
\eq
\lbeq{beta0def}
\beta_0 = \sup \{ \beta : \sup_T c_T^{ren} < \infty \};
\en
this depends on $d$ and $p$. We shall see that $\beta_0 >0$ for
$d \geq 2$, $p=1$, while $\beta_0 =+\infty$ for $d=1$, $p=\frac{3}{2}$.
We define the simple random walk Green function
\eq
G(x) = \sum_{T=0}^\infty p_T(x),
\en
where $p_T(x)$ denotes the transition probability for simple random walk
to go from $0$ to $x$ in $T$ steps. The Green function is finite for
$d>2$ but diverges for $d \leq 2$.
\begin{theorem}
\label{thm-main}
Let $d>2$ and $p=1$. Then $\beta_0 > 0$, and the following statements
hold for $-\infty < \beta < \beta_0$. The diffusion constant
$D(\beta)$ is equal to 1, and the process $X_T$ converges in
distribution to Brownian motion. The partition function satisfies \eq
\lim_{T \to \infty} c_{T} = \exp[\beta [G(0)-1]] , \quad \quad \lim_{T
\to \infty} c_{T}^{ren} = 1.
\en
%\lbeq{limpfhighd}
% \lim_{T \to \infty} \hat{c}_{aT,T}(k/\sqrt{T}) = \exp[a\beta [G(0)-1]]
% \exp [-k^2/2d],
%\en
%so in particular setting $k=0$ we have
%\eq
% \lim_{T \to \infty} c_{aT,T} = \exp[a\beta [G(0)-1]] .
%\en
\end{theorem}
The Green function occuring in the limiting partition function in
the above theorem is divergent when $d=2$, and this is a symptom of
the need for renormalization in two dimensions.
For $d=2$ we have the following result. Let $\underline{\gamma}$ denote the
renormalized self-intersection local time for planar Brownian motion
on the time interval $[0,1]$. This gives rigorous meaning \cite{Vara69,LeGa85}
to the expression
\eq
\underline{\gamma} \; \mbox{``}=\mbox{''} \;
\int_{0 \leq s0$, and for $-\infty < \beta <
\beta_0$, the following statements hold. The limit defining the
diffusion constant exists and equals \eq D(\beta)= \int B(1)^2
d\nu_{2,\beta} . \en Moreover, $D$ is a strictly decreasing function
of $\beta \in [0,\beta_0)$. The law of the process $X_T(t)$ converges
in distribution to $\nu_{2,\beta}$. In addition, as $T \to \infty$,
\eq c_T \sim [E e^{\beta \underline{\gamma}}] T^{\beta/\pi}. \en
\end{theorem}
In fact, we prove the above theorem by
taking limits separately for the renormalized
partition function and the corresponding quantity weighted with a
factor $|\omega(T)|^2$, with the ratio giving the mean-square displacement.
For more details, see Section~\ref{sec-conv}.
Bolthausen and Schmock \cite{BoltS94}
have studied a closely related model, with $p=1$ but
with a continuous-time random walk with exponential holding times. It is
expected that the continuous-time model will behave in a qualitatively
similar fashion to the discrete-time model considered here.
Define
\eq
\beta_c = \inf \{ \beta : \lim_{T \to \infty} c_T^{1/T} > 0 \}.
\en
Clearly $\beta_0 \leq \beta_c$, so for
$d \geq 2$ it is the case that $\beta_c >0$ (see also Section~\ref{sec-huer}).
For the continuous-time model with $d \geq 2$ and
$\beta$ exceeding the continuous-time analogue of $\beta_c$,
Bolthausen and Schmock
prove that the mean-square displacement
is uniformly bounded in $T$ and hence $D(\beta)=0$. Together with our
results, this suggests that for $d>2$ there may be a jump discontinuity
in the graph of $D$ at some critical value of $\beta$, whereas for $d=2$
the transition may be continuous. The natural questions therefore arise
as to whether or not $\beta_0 = \beta_c$, and whether $D(\beta)$ goes
continuously to $0$. These questions remain open.
For $d=1$ and $p=1$, Bolthausen and Schmock \cite{BoltS94}
have proved that the mean-square
displacement of the continuous-time model is uniformly bounded in $T$
for all positive $\beta$, and hence $D(\beta)=0$ for all $\beta >0$.
This indicates that the power $p=1$ is too small to observe a collapse
transition, at positive $\beta$,
when $d=1$. On the other hand the existence of the ``wrong-sign''
Edwards model for $d=1$ suggests that for $p=3/2$ there should be
diffusive behaviour for all $\beta >0$. The next theorem shows that
this is indeed the case. For its statement we define
\eq
\lbeq{nu1b}
d\nu_{1,\beta} = \frac{e^{\beta \gamma} dW}
{\int e^{\beta \gamma} dW},
\en
where now $dW$ denotes the one-dimensional Wiener measure, and
\eq
\gamma = \int_{0 \leq s < t \leq 1} \delta(B_t - B_s) ds \, dt
\en
denotes the one-dimensional self-intersection local time. The denominator
of \refeq{nu1b} is finite for all $\beta \in {\bf R}$ \cite{LeGa94}.
\begin{theorem}
\label{thm-main1}
Let $d=1$ and $p=\frac{3}{2}$. For any $-\infty <\beta < \infty$, the
limit defining the diffusion constant exists, and \eq D(\beta)= \int
B(1)^2 d\nu_{1,\beta} . \en The diffusion constant is strictly
decreasing for $\beta \in [0,\infty)$. The law of the process
$X_T(t)$ converges in distribution to $\nu_{1,\beta}$.
\end{theorem}
The behaviour of the diffusion constant is summarized in Figure~\ref{fig-dc}.
It has not been established whether $\beta_0 = \beta_c$.
Continuity at $\beta_0$ for $d=2$ has not been established, nor has
monotonicity for $d=1$ or $2$ when $\beta < 0$. It has not been established
whether $D(\beta) \to 0$ as $\beta \to \infty$ for $d=1$, or whether
$D(\beta) \to \infty$ as $\beta \to -\infty$ for $d=1$ and $2$.
Concavity or convexity properties have also not been established for
$d=1$ or $2$.
The curves illustrated for $d=1$ and $d=2$ are thus only schematic.
\begin{figure}
\begin{center}
\setlength{\unitlength}{0.0075in}%
\begin{picture}(660,174)(20,640)
\thinlines \put( 80,800){\line( 0,-1){120}} \put( 80,680){\line(-1,
0){ 60}} \put( 80,680){\line( 1, 0){ 60}} \thicklines
\put(140,680){\line( 1, 0){ 60}} \thinlines \put(120,760){\line(-1,
0){100}} \put(120,682){\line( 0,-1){ 4}} \put(140,682){\line(
0,-1){ 4}} \put(320,800){\line( 0,-1){120}}
\put(320,680){\line(-1, 0){ 60}} \put(320,680){\line( 1, 0){ 60}}
\thicklines \put(380,680){\line( 1, 0){ 60}} \thinlines
\put(380,682){\line( 0,-1){ 4}} \put(360,682){\line( 0,-1){ 4}}
\put(560,800){\line( 0,-1){120}} \put(560,680){\line(-1, 0){ 60}}
\put(560,680){\line( 1, 0){ 60}} \put(620,680){\line( 1, 0){ 60}}
\put(
80,805){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$D(\beta)$}}}
\put(200,665){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\beta$}}}
\put(140,660){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\beta_c$}}}
\put(105,660){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\beta_0$}}}
\put( 65,765){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$1$}}}
\put(320,805){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$D(\beta)$}}}
\put(440,665){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\beta$}}}
\put(380,660){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\beta_c$}}}
\put(345,660){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\beta_0$}}}
\put(365,700){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{?}}}
\put(300,735){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$1$}}}
\put(318,750){\line(1,0){4}}
\put(560,805){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$D(\beta)$}}}
\put(540,735){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$1$}}}
\put(558,750){\line(1,0){4}}
\put(680,665){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$\beta$}}}
\put( 50,630){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$d>2, \,
p=1$}}}
\put(290,630){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$d=2, \,
p=1$}}}
\put(530,630){\makebox(0,0)[lb]{\raisebox{0pt}[0pt][0pt]{$d=1, \,
p=\frac{3}{2}$}}}
\end{picture}
\end{center}
\caption{The diffusion constant.}
\label{fig-dc}
\end{figure}
Theorem~\ref{thm-main1}
suggests that the transition for $d=1$ is quite different
than for $d \geq 2$, with the transition from diffusive to collapsed
behaviour taking place more gradually, as $p$ is varied rather than as
$\beta$ is varied. We expect that, for $\beta >0$, the behaviour
for $p \in (1,\frac{3}{2})$
interpolates between collapse for $p=1$ and diffusion
for $p=\frac{3}{2}$.
Such behaviour may be characterized by a continuous and monotone
decreasing critical exponent $\nu(p)$ such that
\eq
\lbeq{nup}
\langle |\omega(T)|^2 \rangle \sim \mbox{Const.}(\beta)T^{2\nu(p)},
\en
with $\nu(1)=0$ and $\nu(\frac{3}{2})=1/2$,
but \refeq{nup} has not been established.
A complementary problem, also unsolved, is
to understand the behaviour of the
$d=1$ model with {\em repulsive} ($\beta <0$)
interaction with $p=1$. The behaviour
is ballistic for all $\beta < 0$ when $p=0$ \cite{GH93},
and diffusive for all $\beta <0$ when $p=\frac{3}{2}$
by Theorem~\ref{thm-main1},
and presumably for $p \in (0,\frac{3}{2})$
the behaviour of the walk interpolates
between these two extremes as in \refeq{nup}. Related issues for
the model in which the energy function $T^{-p}J_T$ is replaced by
$\sum_{0 \leq i < j \leq T}|j-i|^{-p}\delta_{\omega(i),\omega(j)}$
are discussed in \cite{CPP94,Kenn94}.
A different type of attractive
walk model was studied in \cite{Zola87}, in which the weight of a $T$-step
walk $\omega$ is given by
\eq
\exp \left[ \beta \sum_{j=0}^T I[\omega(j)=\omega(i) \mbox{ for some }
i0$. Related issues for
Brownian motion are treated in \cite{Schm90,Szni91}.
The remainder of the paper is organized as follows. In Section~\ref{sec-silt}
we discuss the relation of our results to invariance principles for
self-intersection local times. In Section~\ref{sec-huer}
we describe a heuristic argument that for $p=1$ there is a collapse
transition at positive $\beta$ for $d \geq 2$ but not for $d<2$.
In Section~\ref{sec-exdiff} we prove uniform bounds on the
renormalized partition function.
Sections~\ref{sec-highd} and \ref{sec-d12} contain
some preliminaries to the proofs of our
main results. The proofs are then completed in Section~\ref{sec-conv},
apart from the monotonicity of the diffusion constant for $d=1$ and $2$, which
is treated in Section~\ref{sec-diffconst}.
\subsection{Self-intersection local times}
\label{sec-silt}
This section briefly discusses the relation between our results and
invariance principles for self-intersection local times. For a random
variable $X$, we use the notation \eq \underline{X} = X - EX. \en
Consider first $d=2$, and let
\eq
\gamma_T = \frac{1}{T} \sum_{0 \leq s < t \leq T} \delta_{\omega(s),
\omega(t)}.
\en
It was proven in \cite{Rose90} that $\underline{\gamma_T}$ converges
in distribution to $\underline{\gamma}$, for random walks obeying a
periodicity condition not satisfied by the simple random walk.
We use ideas from \cite{Rose90} in this paper, and as a byproduct
obtain a proof of this convergence in distribution for the simple
random walk (see Proposition~\ref{weakconvergence}).
We will prove
Theorem~\ref{thm-main2} by combining the convergence in distribution
of $\underline{\gamma_T}$ to $\underline{\gamma}$ with
existence of a uniform exponential moment for $\underline{\gamma_T}$.
For $d=1$, the discrete self-intersection local time is given by
\eq
\gamma_T = \frac{1}{T^{3/2}} \sum_{0 \leq s < t \leq T}
\delta_{\omega(s),\omega(t)}.
\en
No renormalization is required for $d=1$, and it is known that $\gamma_T$
converges in distribution to its continuum counterpart $\gamma$
\cite{Boro81,Boro89,Perk82}.
Combined with existence of a uniform exponential moment
for $\gamma_T$, this will lead to a proof of Theorem~\ref{thm-main1}.
The proof we shall give of convergence in distribution of the renormalized
self-intersection local time for $d=2$ applies also to $d=1$, and
thereby provides an alternate approach to some results
of \cite{Boro81,Boro89,Perk82}.
Note that use of $p=1$ rather than $p=\frac{3}{2}$
in the definition of $\gamma_T$
would lead to a discrete random variable with an infinite limit, and
this much stronger interaction leads to collapse for all
positive $\beta$.
For dimensions $d>2$ we define
\eq
\gamma_T = \frac{1}{T} \sum_{0 \leq s < t \leq T}
\delta_{\omega(s),\omega(t)}.
\en
This use of $p=1$ defines a random variable which is smaller than
for the choice $p=\frac{4-d}{2}$ corresponding to the discrete
self-intersection local time.
With $p=1$ no renormalization is required, and we
will prove in Section~\ref{sec-highd} that $\gamma_T$ converges in $L^2$
to the constant $G(0)-1$. Combined with the existence of an exponential moment
for $\gamma_T$ (see Section~\ref{sec-exdiff}),
this will allow for a proof of Theorem~\ref{thm-main}.
To our knowledge, it remains an
open problem to construct the (repulsive) Edwards model for $d=3$
as a suitably renormalized continuum limit of the discrete model with
$p=\frac{1}{2}$.
The following lemma shows that the effect of the renormalization
of $\gamma_T$ by the subtraction of $E\gamma_T$ is a significant
effect when $d=2$, but not when $d \neq 2$. In particular, for $d \neq 2$
the value of $\beta_0$ would be unchanged if the renormalized partition
function $c_T^{ren}$ were replaced by $c_T$ in \refeq{beta0def}, but this
is not true for $d=2$. The proof of the lemma makes use
of the notation
\eq
\lbeq{convdef}
(f*g)_T(x) = \sum_y \sum_{s=0}^T f_s(y)g_{T-s}(x-y)
\en
for convolution in time and space.
\begin{lemma}
\label{lem-renconst}
As $T \to \infty$, \eq E\gamma_T = \left\{ \begin{array}{ll} G(0) - 1
+O(T^{-\epsilon}) & (d>2) \\ \frac{1}{\pi}\log T +O(1) & (d=2) \\
\frac{2}{3}\sqrt{\frac{2}{\pi}} +O(T^{-1/2}) & (d=1)
\end{array} \right. \en for any $\epsilon < \min \{ \frac{d-2}{2}, 1
\}$.
\end{lemma}
\proof Let $q_t(x) = p_t(0)\delta_{x,0}[1-\delta_{t,0}]$ and $a=\max
\{ 1, \frac{4-d}{2} \}$. By definition of $\gamma_T$ and the Markov
property, \eq \lbeq{EgammaT} E\gamma_T = \frac{1}{T^a} \sum_x
(p*q*p)_T(x) = \frac{1}{T^a} \sum_{s_2=2}^T \sum_{s_1=0}^{T-s_2}
p_{s_2}(0) = \frac{1}{T^a} \sum_{s_2=2}^T (T-s_2+1) p_{s_2}(0). \en
For $d>2$ the right side is given by \eq G(0)-1 -
\sum_{s_2=T+1}^\infty p_{s_2}(0) - \frac{1}{T} \sum_{s_2=0}^T
(s_2-1)p_{s_2}(0) - {1 \over T}, \en and the desired result then
follows from the fact that $p_s(0) = O(s^{-d/2})$. For $d=1$ or $2$,
the local central limit theorem states that $p_{2n}(0) = (\pi
n)^{-d/2} +O(n^{-1-d/2})$ (see e.g. \cite{Lawl91}). With
\refeq{EgammaT}, this gives the desired result. For example, when $d
= 1$ the leading term is the Riemann sum for ${1 \over 2}\sqrt{{2\over
\pi}} \int_0^1 ds \, (1-s) s^{-1/2}$; the factor ${1 \over 2}$
arises because only walks with an even number of steps can contribute
to $p_{s_2}(0)$. \qed
\subsection{A variational problem}
\label{sec-huer}
For $p=1$, we can write the partition function \refeq{pfx} in terms of
the local time \eq L_y(T) = \frac{1}{T}\sum_{i=0}^T
\delta_{y,\omega(i)}, \en which is the proportion of time spent by the
walk $\omega$ at the site $y$. We also define \eq \lbeq{tauyT}
\tau_y(T) = TL_y(T) = \sum_{i=0}^T \delta_{y,\omega(i)} \en to be the
number of visits of the walk $\omega$ to the site $y$, up to time $T$.
In terms of $L_y(T)$, \eq \lbeq{tauL} T^{-1} J_T = \frac{1}{2} T^{-1}
\left[ \sum_{i,j=0}^T \delta_{\omega(i),\omega(j)} - T \right] =
\frac{1}{2} T \sum_y L_y^2(T) - \frac{1}{2}. \en Hence we can rewrite
the partition function as \eq \lbeq{cTtau} c_T = e^{-\beta/2} E \exp
\left[ \frac{1}{2} \beta T \sum_y L_y^2(T) \right] . \en The
following subadditivity argument guarantees existence of the limit
$\lim_{T \to \infty} c_T^{1/T}$.
\begin{lemma}
\label{lem-subadd}
For $p=1$ and $\beta \geq 0$, and for any $d$, the limit $b= \lim_{T
\to \infty} T^{-1} \log c_T$ exists and is finite and nonnegative.
\end{lemma}
\proof
Define $\tau_y[a,b] = \sum_{i=a}^b \delta_{y,\omega(i)}$.
By definition,
\eq
\lbeq{sumlem1.5}
(\tau_y [0,T] )^2 \leq (\tau_y [0,S] + \tau_y [S,T] )^2
= (\tau_y[0,S])^2 + (\tau_y[S,T])^2 + 2 \tau_y[0,S]\tau_y[S,T].
\en
Estimating the last term on the right side using $2ab \leq \alpha a^2 +
\alpha^{-1}b^2$, with $\alpha = (T-S)/S$, gives
\eq
T^{-1}(\tau_y[0,T])^2 \leq S^{-1}(\tau_y[0,S])^2 +
(T-S)^{-1} (\tau_y [S,T])^2.
\en
Using \refeq{tauyT}, \refeq{cTtau} and the Markov property,
it follows that $\log
(e^{\beta/2} c_T)$ is subadditive. A standard lemma
about subadditive sequences then implies that
$\lim_{T \to \infty} T^{-1} \log c_T = \inf_{T \geq 1} T^{-1}
\log (e^{\beta/2} c_T)$, which gives the result of the lemma.
The limit is nonnegative for $\beta \geq 0$, since $c_T \geq 1$.
\qed
Keeping $p=1$ fixed and restricting $\beta >0$,
we now present a heuristic argument which predicts that
there is a collapse transition for $d \geq 2$ but not for $d=1$, for this
value of $p$. In fact, this argument provides the basis for the proof
by Bolthausen and Schmock that there is a collapsed phase. For simplicity we
consider the continuous-time model. In view of \refeq{cTtau} we may
as well consider the partition function
\eq
\lbeq{cTha}
\tilde{c}_T = \tilde{E}
\exp [ \beta T \sum_y L_y^2(\omega) ],
\en
where $\tilde{E}_T$ denotes expectation with respect to the continuous-time
simple random walk up to time $T$ and $L_y$ is interpreted appropriately.
The Donsker--Varadhan theory of large deviations suggests that
\eq
\lbeq{varprin}
\lim_{T \to \infty} \frac{1}{T} \log \tilde{c}_T = \sup \{ \beta
||\phi||_4^4 - \frac{1}{2} ||\nabla \phi ||_2^2 : ||\phi||_2=1 \}
\equiv b.
\en
The gradient is the
finite difference gradient associated to the lattice.
In general the supremum may or may not be attained.
It is clear
that for $\beta$ sufficiently large, and in any dimension, the solution $b$
of the variational problem is strictly positive. In this case it can
be shown that the
supremum is attained by an exponentially decaying function.
For $d =1$, it is the case for all $\beta >0$ that $b>0$ (this can be seen
from a scaling argument), and here too
the supremum is attained by an exponentially
decaying function. On the other hand for $d \geq 2$ there is a
Sobolev inequality
\eq
||\phi||_4^4 \leq C ||\phi ||_2^2 \, ||\nabla^2\phi||_2^2,
\en
and hence $b=0$ for sufficiently small $\beta$.
If $\phi^2$ realizes the supremum, then so does any translate. In the
collapsed phase it is
expected that the law for the process is a mixture of ergodic
components, i.e., the process breaks translation invariance by choosing
where to collapse,
and if we restrict to a component, then $\phi^2(y) = \lim_{T
\rightarrow \infty} L_y(T) \mbox{ a.s.}$. Exponential decay
of the optimal $\phi^2$, and hence $b>0$,
thus corresponds to collapse.
This type of result is proved by Bolthausen and Schmock. A difficulty in
applying the Donsker--Varadhan theory is that the state space here is
all of $\zd$ and is therefore not compact. This is overcome by making
use of the fact that in the collapsed phase the state space is nearly
compact, since the walk spends the bulk of its time in a compact subset
of $\zd$.
Thus, $b>0$ corresponds to a localized local time, or a confined phase
for the walk. On the other hand, $b=0$ is interpreted as
corresponding to the supremum being approximated by a sequence of
increasingly more constant (zero) $\phi$'s, and hence to the local
time approaching a constant (zero) function. This is interpreted as
extended behaviour for the walk.
The above discussion leads one to expect that there will be a collapse
transition, at positive $\beta$, for $d \geq 2$ but not for $d=1$, when $p=1$.
\section{Exponential moments}
\label{sec-exdiff}
\setcounter{equation}{0}
In this section we begin by showing
that $\gamma_T$ (and hence $\underline{\gamma_T}$)
has uniform
exponential moments of all orders for $d=1$ and of small orders
for $d>2$.
We then show that, for $d=2$, $\underline{\gamma_T}$
has uniform exponential moments of small orders.
\subsection{Dimensions $d \neq 2$}
\label{sec-dnot2}
Recalling the definition of convolution in \refeq{convdef}, we write
$f^{*n}$ for the convolution of $n$ factors of $f$.
\begin{theorem}
\label{thm-simeq}
For $d>2$, there is a $\beta_0 =\beta_0(d) > 0$ such that for all
$-\infty < \beta < \beta_0$,
\[ \sup_T E e^{\beta \gamma_T} < \infty.
\] For $d=1$, the above inequality holds for all real $\beta$.
\end{theorem}
\proof
%We are grateful to S.R.S.\ Varadhan for suggesting the main idea
%in this proof.
%
For $\beta \leq 0$, $Ee^{\beta \gamma_T} \leq 1$, so assume $\beta >0$.
Let $d \neq 2$, and let $a = \max \{ 1, \frac{4-d}{2} \}$. In view of
\refeq{tauyT} and \refeq{tauL}, it suffices to obtain the uniform bound
of the theorem with
$\gamma_T$ replaced by $T^{-a} \sum_y \tau_y^2(T)$.
By Jensen's inequality, since $T^{-1}\sum_y \tau_y =1$, we have
\eqarray
\exp [ \beta T^{-a} \sum_y \tau_y^2 ]
& \leq & \frac{1}{T} \sum_y \tau_y \exp [ T^{1-a} \beta \tau_y]
\nonumber \\
\lbeq{V1}
& = & \frac{1}{T} \sum_y \sum_{n=0}^\infty
\frac{\beta^n}{n!} \frac{1}{T^{(a-1)n}} \tau_y^{n+1}.
\enarray
Therefore
\eq
\lbeq{V2}
E \exp \left[ \beta T^{-a} \sum_y \tau_y^2 \right] \leq
\frac{1}{T}
\sum_{n=0}^\infty
\frac{\beta^n}{n!} \frac{1}{T^{(a-1)n}} \sum_y E \tau_y^{n+1}.
\en
The factor $\tau_y^{n+1}$ amounts to a requirement that $\omega$ visit
the site $y$ at each of
$n+1$ times, and ordering these times gives rise to a factor
$(n+1)!$. Let $p_{t}(x)$
denote the probability that simple random walk goes from $0$ to $x$
in $t$ steps, and let $q_t(x) = p_t(0)\delta_{x,0}[1-\delta_{t,0}]$.
Then we have, using $(p*p)_t(x) = (t+1)p_t(x)$, that
\eqarray
\lbeq{V4}
E \exp \left[ \beta T^{-a} \sum_y \tau_y^2 \right]
& \leq & \frac{1}{T}
\sum_{n=0}^\infty
\frac{\beta^n}{n!} \frac{1}{T^{(a-1)n}} \sum_{x} (n+1)!
(p*q^{*n}*p)_T(x) \nonumber \\
& = & \frac{1}{T}
\sum_{n=0}^\infty
\beta^n \frac{1}{T^{(a-1)n}} (n+1)
\sum_{t=0}^T (q^{*n})_{t}(0) \sum_{x}
(p*p)_{T-t}(x) \nonumber \\
& = & \frac{1}{T}
\sum_{n=0}^\infty
\beta^n \frac{1}{T^{(a-1)n}}
(n+1) \sum_{t=2}^T (q^{*n})_{t}(0) (T-t+1) \nonumber \\
& \leq & \sum_{n=0}^\infty
\beta^n (n+1) \frac{1}{T^{(a-1)n}} \sum_{t=2}^T (q^{*n})_{t}(0) .
\enarray
When $d>2$, so that $a=1$, a uniform bound on the right
side for small $\beta$ then follows from the fact that
\eq
\lbeq{V7}
\sum_{t=0}^T (q^{*n})_{t}(0) \leq G(0)^n .
\en
For $d=1$, so that $a=\frac{3}{2}$, we have
$$
E \exp \left[ \beta T^{-3/2} \sum_y \tau_y^2 \right]
\leq \sum_{n=0}^\infty
\beta^n (n+1) T ^{-n/2} \sum_{t=2}^T (q^{*n})_{t}(0).
$$
For $d=1$, there is a constant $c$ such that $|q_t| \le c / \sqrt{1 + t}$.
Using this fact, together with the $r=\frac{1}{2}$ version of
the inequality \refeq{Konoineq} of Lemma~\ref{kono} below, gives
$$
\sum_{t=2}^T (q^{*n})_{t}(0) \le {(cT)^{n/2} \over \sqrt{n!}}.
$$
Combining these two inequalities completes the proof.
\qed
\begin{lemma}
\label{kono}
Let $T > 0$, $t_0=0$ and $-\infty < r <1$. There is a constant $c$
(depending on $r$) such that for all integers $p \ge 0$, \eqarray
\int_{0 \le t_1 \le \ldots \le t_p \le T} d^p t \, \prod_{j=1}^p {1
\over (t_j - t_{j-1})^r} & = & { \left [ \Gamma(1-r) \right ]^p
\over \Gamma(p(1-r) + 1) } T^{p(1-r)} \\ \lbeq{Konoineq} \sum_{0 \le
t_1 \le \ldots \le t_p \le T} \prod_{j=1}^p {1 \over (1 + t_j -
t_{j-1})^r} & \le & {(cT)^{p(1-r)} \over (p!)^{1-r}}. \enarray
\end{lemma}
\proof The inequality follows from the identity by Stirling's
formula and the fact that
$$
\sum_{t = t_{j-1}}^{t_j} {1 \over (1 + t)^r} \le c
\int_{t_{j-1}}^{t_j} {dt \over t^r},
$$
so it suffices to prove the identity. Let
$$
I_p(T) = \int_{0 \le t_1 \le \ldots \le t_p \le T} d^p t \,
\prod_{j=1}^p {1 \over (t_j - t_{j-1})^r}.
$$
Then $I_p(T) = T^{p(1-r)} I_p(1)$ and therefore
$$
\int_0^\infty dT \, I_p(T) e^{-T} =
\Gamma(p(1-r)+1) I_p(1).
$$
On the other hand, by definition
$$
\int_0^\infty dT \, I_p(T) e^{-T} =
\left (\int_0^\infty {dt \,e^{-t} \over t^r} \right )^p \\
=
\left [\Gamma ( 1-r ) \right ]^p.
$$
Therefore
$$
I_p(1) = { \left [ \Gamma (1-r) \right ]^p \over \Gamma (p(1-r) + 1) }.
$$
The identity then follows from $I_p(T) = T^{p(1-r)} I_p(1)$.
\qed
The above method can also be used to show that the partition
function of the continuum Edwards model in $d=1$ is an entire
function of the coupling constant, and provides a slightly different
approach to that of \cite{LeGa94}.
\subsection{Dimension $d=2$}
\label{sec-d2}
For $d=2$ the situation is complicated by the need for
renormalization: since the Edwards model requires renormalization for
a finite partition function in two dimensions, it is to be expected
that the discrete partition function must also be renormalized if it
is to be uniformly bounded in $T$. The primary aim of this section is
to prove a uniform bound on the renormalized partition function.
However, part of the discussion applies also to dimensions $1 \leq d
<4$, and is not restricted to $d=2$.
For $d < 3$,
the renormalized self-intersection local time of
planar Brownian motion, $B(s)$, $s
\in [0,1]$, can be defined using a dyadic decomposition
introduced by Westwater
\cite{West80}. The following discussion reviews some results of Le Gall
in this regard
\cite{LeGa94,LeGa85,LeGa86}. We will subsequently prove that his methods
give analogous results for random walk, with uniformity in $T$. We begin
by defining
\eq
\Tcal = \{(s,t) \in {\bf R}^2 :0 \le s < t \le 1 \}
\en
and
\eq
A_k^n = \left[ \frac{2k-2}{2^n}, \frac{2k-1}{2^n} \right)
\times \left( \frac{2k-1}{2^n}, \frac{2k}{2^n} \right] ,
\lbeq{dyadic1}
\en
so that the sets
\begin{equation}
\Tcal_N = \cup_{n=1}^N \cup_{k=1}^{2^{n-1}} A_k^n \lbeq{dyadic2}
\end{equation} increase with $N$ and their union is $\Tcal$. For $A$
any finite union of these sets $A_k^n$ and $f$ a function on ${\bf
R}^d$, the random variable
\begin{equation}
\lbeq{gammaep} \gamma(A) = \lim_{f \rightarrow \delta} \gamma_f(A),
\quad \gamma_f(A) = \int \int_{A} f(B(s)-B(t)) \lbeq{gamma}
\end{equation} is defined as the limit in $L^2$ as $f$ tends weakly to
the Dirac $\delta$ function. The existence of this limit is an easy
result because the $A_k^n$ are designed so that
\begin{equation}
\int \int_{A_k^n} ds\, dt\, f(B(s)-B(t)) \stackrel{(d)}{=}
\int_0^{2^{-n}} \int_0^{2^{-n}} ds\, dt\, f(B_1(s)-B_2(t)),
\end{equation} where $B_1, \ B_2$ are independent Brownian motions
starting at the origin. Existence of the corresponding random walk
limit will be proven in Lemma~\ref{lem-rosen} below.
Given a random variable $X$, we write
\eq
\underline{X} = X - EX.
\en
Then the renormalized self-intersection local time is
defined\footnote{The subtraction of the
expectation cannot be commuted with the limit because $E
\gamma(\Tcal)$ is divergent; the use of the underline notation on the
left side of \refeq{gamTcal} is thus
a little misleading.}
by
\begin{equation}
\lbeq{gamTcal} \underline{\gamma}(\Tcal) = \lim_{N \rightarrow
\infty} \underline{\gamma}(\Tcal_N) =
\sum_{n=1}^{\infty}\sum_{k=1}^{2^{n-1}} \underline{\gamma}(A_k^n).
\end{equation} Existence of the limit in $L^2$ follows easily, using
independence of the $\gamma(A_k^n)$ for $n$ fixed and Brownian motion
scaling. The analogous argument for random walk is given below in
Lemma~\ref{lem-Napprox}. Furthermore, Le Gall \cite{LeGa94} has shown
that there exists $\beta_1 > 0$ such that for all $\beta < \beta_1$
\begin{equation}
\lbeq{d2expmom} E e^{\beta \underline{\gamma}(\Tcal)} < \infty.
\end{equation} This existence of an exponential moment for
$\underline{\gamma}(\Tcal)$ is more delicate than the corresponding
results of Section~\ref{sec-dnot2}. For negative $\beta$,
\refeq{d2expmom} was already proved by Varadhan \cite{Vara69}.
Our goal now is to prove versions of \refeq{gamTcal}
and \refeq{d2expmom} for random walk, uniformly in $T$.
In Section~\ref{sec-d12}, we will also prove a uniform random walk version of
\refeq{gammaep}.
First we introduce several definitions. Define
\eq
\lbeq{TAT}
\Tcal_T = \{ (i,j) \in {\bf Z}^2 : 0 \leq i < j \leq T \},
\quad
\Acal_T = \{ (i,j) \in {\bf Z}^2 : 0 \leq i , j \leq T \}.
\en
and for $A \subset \Acal_T$, let
\eq
\alpha_T (A) = {1 \over T^{(4-d)/2}} \sum_{(i,j) \in A}
\delta_{\omega(i),\omega'(j)},
\en
where $\omega$, $\omega'$ are two independent $T$-step simple random walks
which both begin at the origin. Given a function $h$ on $\zd$,
we also define
\eq
\lbeq{alphaTh}
\alpha_{T,h} (A) = {1 \over T^{(4-d)/2}} \sum_{(i,j) \in A}
h(\omega(i)-\omega'(j)).
\en
To adapt the proofs of \refeq{gamTcal} and \refeq{d2expmom}
for Brownian motion to random walk, we will make use of the following
moment estimate on $\alpha_T$, which for two-dimensional Gaussian
processes is proven in (2.15) of \cite{Rose84}. For Brownian motion
in $d=2$ and $d=3$ it is proven in Lemma 2 of \cite{LeGa94} and in
Lemma~2.2 of \cite{LeGa86}, respectively. Our estimates will be limited
to $d<4$; this is a natural limitation since the Brownian motion
intersection local time has an infinite first moment for $d \geq 4$.
We will make use of the Fourier transform, defined for a function $f$
on $\zd$ by \eq \lbeq{Ftdef} \hat{f}(k) = \sum_x f(x) e^{ik \cdot x}
\quad (k \in [-\pi,\pi]^d). \en
\begin{lemma}
\label{lem-alphap}
Let $1 \leq d < 4$. There is a constant $C$ (depending on $d$) such
that for all $T$ and for all $p=1,2,3,\ldots$, \eq
E[(\alpha_T(\Acal_T))^p] \leq C^p (p!)^{d/2}. \en
\end{lemma}
\proof Our proof is a variation on a technique of \cite{Rose84}. We first
note that since
\eq
\alpha_T (\Acal_T) ={1 \over T^{(4-d)/2}} \sum_{(i,j) \in \Acal_T}
\delta_{\omega(i),\omega'(j)}
= {1 \over T^{(4-d)/2}} \sum_x \tau_x(T) \tau'_x(T),
\en
it suffices to show that
\eq
\lbeq{Rosensuff}
E\left [ \left( \sum_x \tau_x(T) \tau'_x(T) \right)^p \right ] \le
(C T^{(4-d)/2})^p (p!)^{d/2} .
\en
By the Plancherel theorem,
\begin{eqnarray}
E\left [ \left( \sum_x \tau_x(T) \tau'_x(T) \right)^p \right ] &=&
\sum_{x_1,\ldots,x_p} \left(E[\tau_{x_1} \cdots \tau_{x_p}]\right
)^2 \nonumber \\ &=& (2\pi)^{-dp} \int d^{dp} k\, \left |
E[\hat{\tau}_{k_1} \cdots \hat{\tau}_{k_p}] \right |^2
\end{eqnarray} where the integration is over $[-\pi,\pi]^d$ for each
of the $p$ variables $k$. The Fourier transform is given by \eq
\hat{\tau}_{k} = \sum_x e^{ik \cdot x} \tau_x = \sum_{t=0}^T e^{ik
\cdot \omega(t)}. \en Therefore \eq E[\hat{\tau}_{k_1} \cdots
\hat{\tau}_{k_p}] = \sum_{t_1,\ldots,t_p} E\left [ \exp[ i
\sum_{j=1}^p k_j \cdot \omega(t_j)] \right ] . \en
For a term in the sum for which $t_1 \le t_2 \le \ldots \le
t_p$, we rewrite the exponent using
\eq
\lbeq{kK}
\sum_{j=1}^p k_j \cdot \omega(t_j) = \sum_{j=1}^p
K_j \cdot (\omega(t_j) - \omega(t_{j-1})),
\en
where $K_j = k_{p} + k_{p-1} + \ldots + k_{j}$ and $t_0= 0$.
Let
\eq
\lbeq{Dhatdef}
\hat{D}(k) = \frac{1}{d} \sum_{j=1}^d \cos k_j .
\en
Since the exponential of the sum \refeq{kK} factors into independent random
variables and
\eq
E\left [ \exp[ i k \cdot \omega(t)] \right ] = \hat{D}^t(k),
\en
we obtain
\eq
E\left [ \exp[ i \sum_{j=1}^p k_j \cdot \omega(t_j)] \right ]
= \prod_{j=1}^p \hat{D}^{t_j-t_{j-1}}(K_j) .
\en
Now consider a general term in the sum for which $t_1,\ldots,t_p$ are
not necessarily in increasing order. Let $\pi$ be the unique
permutation of $\{1,2, \ldots ,p\}$ for which $t_{\pi(1)} \le
t_{\pi(2)} \le \ldots \leq t_{\pi(p)}$ and whenever $t_{\pi(i)} =
t_{\pi(j)}$ for some $i < j$ then $\pi(i) < \pi(j)$. For $j =
1,2,\ldots,p$ we let
\begin{eqnarray*}
&& s_j = t_{\pi(j)} - t_{\pi(j-1)} \\ && K_j = k_{\pi(p)} +
k_{\pi(p-1)} + \ldots + k_{\pi(j)}.
\end{eqnarray*} Then \eq E[\hat{\tau}_{k_1} \cdots \hat{\tau}_{k_p}] =
\sum_{t_1,\ldots,t_p} \prod_{j=1}^p \hat{D}^{s_j}(K_j) . \en
Therefore
\begin{eqnarray}
E\left [ \left( \sum_x \tau_x(T) \tau'_x(T) \right)^p \right ] &=&
\sum_{t_1,\ldots,t_p} \sum_{t'_1,\ldots,t'_p} (2\pi)^{-dp} \int
d^{dp} k\, \prod_{j=1}^p \left [ \hat{D}^{s_j}(K_j)
\hat{D}^{s'_j}(-K'_j) \right ] \nonumber \\ &\le& \left [
\sum_{t_1,\ldots,t_p} \left [ (2\pi)^{-dp} \int d^{dp} k\,
\prod_{j=1}^p \hat{D}^{2s_j}(K_j) \right ]^{1/2} \right ]^2,
\end{eqnarray} by the Cauchy--Schwarz inequality. We can perform the
$k$ integrals in the order $k_{\pi(1)}, \ldots, k_{\pi(p)}$ by using
translation invariance over the torus $[-\pi,\pi]^d$. For this, we
observe that \eq p_s(0) = (2\pi)^{-d} \int d^d k \, \hat{D}^{s}(k) \en
is the probability of return to the origin after $s$ steps, and obtain
\begin{eqnarray*}
E\left [ \left( \sum_x \tau_x(T) \tau'_x(T) \right)^p \right ] &\le&
\left [ \sum_{t_1,\ldots,t_p} \prod_{j=1}^p \sqrt{p_{2s_j}(0)}
\right ]^2 \\ &\le& (p!)^2 \left [ \sum_{0 \le t_1 \le \ldots \le t_p
\le T} \prod_{j=1}^p \sqrt{p_{2t_j - 2t_{j-1}}(0)} \right ]^2 .
\end{eqnarray*} Using the fact that $0 \leq p_t(0) \le c (1 +
t)^{-d/2}$ for some constant $c$ and applying Lemma~\ref{kono} then
gives \refeq{Rosensuff} and completes the proof. It is at this last
step that we use the hypothesis $d<4$. \qed
Let $d \leq 2$.
The $p=2$ estimate of Lemma~\ref{lem-alphap} allows us readily to obtain
a uniform version of \refeq{gamTcal} for random walk, for future reference.
We first define
\begin{equation}
\Tcal_{T,N} = \cup_{n=1}^N \cup_{k=1}^{2^{n-1}} T A_k^n \cap
\Zbold^2, \lbeq{dyadic3}
\end{equation}
\begin{equation}
{\gamma}_T(A) = \frac{1}{T^{(4-d)/2}} \sum_{(i,j)\in A}
\delta_{\omega(i),\omega(j)}, \quad A \subset \Acal_T, \lbeq{gammaT}
\end{equation} and \eq \lbeq{gnk} \underline{\gamma}(n,k) =
\underline{{\gamma}_T} (T A_k^n\cap \Zbold^2). \en
\begin{lemma}
\label{lem-Napprox}
For $d \leq 2$,
\begin{equation}
\lbeq{Napprox} \lim_{N \rightarrow \infty} \sup_T E \left (
\underline{{\gamma}_T}(\Tcal_T) -
\underline{{\gamma}_T}(\Tcal_{T,N}) \right )^2 = 0.
\end{equation}
\end{lemma}
\proof
For simplicity, we assume first that $T=2^M$ for some integer $M$.
We write $\|A\|_2 = \sqrt{EA^2}$. The
random variables $\underline{\gamma}(n,k)$ are independent for
$k=1,2,\ldots, 2^{n-1}$, and each has the distribution of $2^{-n(4-d)/2}
\underline{\alpha_{2^{-n}T}}(\Acal_{2^{-n}T})$. Using Lemma \ref{lem-alphap},
we have
\begin{equation}
\|\sum_{k=1}^{2^{n-1}} \underline{\gamma}(n,k) \|_2 = 2^{(n-1)/2} \|
\underline{\gamma}(n,1)\|_2 = 2^{-1/2}2^{-n(3-d)/2} \|
\underline{\alpha_{2^{-n}T}}(\Acal_{2^{-n}T})\|_2 \le c
2^{-n(3-d)/2}.
\end{equation} Hence the series is summable for $d<3$, and since its
tail bounds the expectation in \refeq{Napprox}, this proves the lemma
for the special case of dyadic $T$.
The proof for the general case is almost identical. The difference is
that a set $T A_k^n \cap \Zbold^2$ need not be a square, but could also be
a rectangle whose side lengths differ only by 1. This small change poses
no essential difficulties. For example, a moment of an asymmetric version
of $\alpha$, in which $\Acal_T$ is replaced by a rectangle, can be bounded
above by the moment of the symmetric $\alpha$ on the square obtained
by enlarging one side of the rectangle by 1.
\qed
Note that it was actually required that $d<3$, rather than
$d \leq 2$, in the proof of Lemma~\ref{lem-Napprox}.
The fact that the lemma fails for $d \geq 3$ is a symptom of
the fact that, for $d \geq 3$ and $p=\frac{4-d}{2}$,
renormalization beyond subtraction of
$E {\gamma}_T$ is required.
By Lemma~\ref{lem-alphap} and the fact that
$E[\underline{\alpha_T}(\Acal_T)]=0$, for $d \leq 2$
there is a constant $C_2$ and a $\lambda_0 >0$, both independent of $T$,
such that
\eq
\lbeq{lambda0}
E[\exp[\lambda \underline{\alpha_T}(\Acal_T)]] \leq 1+ C_2
\lambda^2 \leq e^{C_2 \lambda^2}, \quad
(0 \leq \lambda \leq 2 \lambda_0).
\lbeq{quadbnd}
\en
The fact that $\underline{\alpha_T}(\Acal_T)$ has an exponential moment will
be the key fact in proving a uniform version of \refeq{d2expmom}
for random walk.
We now prove such a bound on the renormalized partition function
by adapting Le Gall's proof for the analogous continuum problem \cite{LeGa94}.
\begin{theorem}
\label{thm-d2pfbd}
Let $d \leq 2$. There is a constant $K$ such that for $\beta >0$
sufficiently small and for all $T$, $E [ \exp [\beta
\underline{\gamma_T}(\Tcal_T)]] \leq K$.
\end{theorem}
\proof
Assume that $T=2^M$ for some integer $M$; the general case can be
treated similarly.
%Following \refeq{dyadic1}, \refeq{dyadic2} we define $A_k^n(T)
%= TA_k^n\cap {\bf Z}^2.$ Writing $\underline{\gamma}(n,k) =
%\underline{\gamma_T}(A_k^n(T))$, we have
By \refeq{TAT}, \refeq{dyadic3} and \refeq{gnk},
\eq
\lbeq{bargam}
\underline{\gamma_T}(\Tcal_T) = \sum_{n=1}^{\infty}\sum_{k=1}^{2^{n-1}}
\underline{\gamma}(n,k).
\en
Note that
$TA_k^n\cap {\bf Z}^2 = \phi$ for $n > M$, so the sum is finite
for each $T$. Let $\lambda \in (0,\lambda_0)$ (with $\lambda_0$ given
by \refeq{lambda0}), and define
\eq
\beta_N = 2\lambda \prod_{j=2}^N (1-2^{-j/2}) \;\;\; (N \geq 2),
\quad \quad \beta_1 = 2\lambda.
\en
The sequence $\beta_N$ decreases to a limit
$\beta_\infty = \beta_\infty(\lambda) >0$. Let
\eq
\zeta_T(N) =
E \left[ \exp \left(\beta_N \sum_{n=1}^{N}\sum_{k=1}^{2^{n-1}}
\underline{\gamma}(n,k) \right) \right] .
\en
Separating the contribution due to $n=N$, and applying H\"{o}lder's inequality
(with $p=2^{N/2}/(2^{N/2}-1)$ and $q=2^{N/2}$), we obtain
\eq
\zeta_T(N) \leq \zeta_T(N-1)^{1-2^{-N/2}}
\left[ E \exp \left( 2^{N/2} \beta_N \sum_{k=1}^{2^{N-1}}
\underline{\gamma}(N,k) \right) \right]^{2^{-N/2}}.
\en
The random variables $\underline{\gamma}(N,k)$ are independent for
$k=1,2,\ldots,
2^{N-1}$, and moreover each has the distribution of
$2^{-N} \underline{\alpha_{T2^{-N}}}(\Acal_{T2^{-N}})$. Hence, by
\refeq{quadbnd} and the fact that $\beta_N \leq 2\lambda_0$,
\eq
\left[ E \exp \left( 2^{N/2} \beta_N \sum_{k=1}^{2^{N-1}}
\underline{\gamma}(N,k) \right) \right]^{2^{-N/2}} \leq
\exp [2 C_2 \lambda_0^2 2^{-N/2}].
\en
By induction, this gives
\eq
\lbeq{zTN}
\zeta_T(N) \leq \zeta_T(1)
\exp \left[ 2 C_2 \lambda_0^2 \sum_{j=2}^N 2^{-j/2} \right]
\leq \zeta_T(1)
\exp \left[ \frac{2 C_2 \lambda_0^2}{2-\sqrt{2}} \right],
\en
where
\eq
\zeta_T(1) = E[e^{\beta_1 \underline{\gamma}(1,1)}]
= E\exp [2\lambda \underline{\alpha_{T/2}}({\cal A}_{T/2})]
\en
is uniformly bounded by \refeq{lambda0}.
Applying Fatou's Lemma to take the limit $N \to \infty$ in \refeq{zTN},
and using the fact that $\lambda \in (0,\lambda_0)$ was arbitrary,
this gives a uniform bound on $E \exp [\beta \underline{\gamma_T}(\Tcal)]$
for $\beta \in (0, \beta_\infty (\lambda_0))$.
\qed
\section{The scaling limit}
\label{sec-diff}
\setcounter{equation}{0}
\subsection{Preliminary for $d>2$}
\label{sec-highd}
Throughout this section we fix $p=1$ and the dimension $d>2$, and as
usual we set \eq \gamma_T = \frac{1}{T} \sum_{0 \leq i < j \leq T}
\delta_{\omega(i),\omega(j)} \en and \eq \underline{\gamma_T} =
\gamma_T - E \gamma_T. \en The combination of the following
proposition with existence of an exponential moment will allow us to
prove Theorem~\ref{thm-main}.
\begin{prop}
\label{prop-Jtozero}
In any dimension $d > 2$,
\begin{equation}
E\left[ \left( \underline{\gamma_T} \right)^2 \right] =
O(T^{-\theta}) \quad \mbox{as } T \to \infty,
\end{equation} for some $\theta >0$.
\end{prop}
\proof
By Lemma~\ref{lem-renconst}, we have
\eq
\lbeq{subterm}
E [\underline{\gamma_T} ]^2 = E[\gamma_T]^2
-(G(0)-1)^2 +O(T^{-\epsilon}).
\en
By definition, the first term on the right side is given by
\eq
\lbeq{Eeta2}
E [ \gamma_T]^2 = \frac{1}{T^2} \sum_{0 \leq s_1 < t_1 \leq T}
\sum_{0 \leq s_2 < t_2 \leq T} E \delta_{\omega(s_1),\omega(t_1)}
\delta_{\omega(s_2),\omega(t_2)}.
\en
To evaluate the limiting behaviour of the right side, we consider
separately the contributions to the double sum arising from the
following three cases.
\noindent Case (a): the intersection of the two intervals $[s_1,t_1]$,
$[s_2,t_2]$ is nonempty but does not contain either of the intervals,
\noindent Case (b): one of these two intervals is a subset of the
other,
\noindent Case (c): the intervals $[s_1,t_1]$ and $[s_2,t_2]$ do not
intersect.
We will see that the first two cases correspond to error terms, while the
third term gives the main term and will cancel the subtracted term in
\refeq{subterm}.
\noindent Case (a): Suppose that $0 \le s_1 < s_2 \le t_1 < t_2 \le
T$, or that the inequality with subscripts 1 and 2 interchanged holds.
Taking both possibilities into account, the contribution to
\refeq{Eeta2} due to this case is
\begin{equation}
2T^{-2 }\sum_{0 \le i_1 < i_2 \le i_3 < i_4 \le T} \sum_{x,y}
p_{j_1}(x) p_{j_2}(y-x) p_{j_3}(y-x) p_{j_4}(y-x)
\end{equation} where $j_1 = i_1$, $j_2 = i_2 - i_1$, $j_3 = i_3 -
i_2$, and $j_4 = i_4 - i_3$. This is at most
\begin{equation}
2T^{-2 } \sum_{\stackrel{j_1 , j_2 , j_3 , j_4 } {j_1 + j_2 + j_3 +
j_4 \le T}} \sum_{x,y} p_{j_1}(x) p_{j_2}(y-x) p_{j_3}(y-x)
p_{j_4}(y-x).
\end{equation} By symmetry we rewrite this as less than $3!$ times the
same sum but with the additional constraint $j_2 \le j_3 \le j_4 $.
Then we use $p_j(x) \le O(j^{-d/2})$ for $j = j_3, j_4$ and $\sum_v
p_j(v) = 1$ for $j = j_1, j_2$. In this way we find that the
contribution of this case is at most
\begin{equation}
T^{-2 } \sum_{0 \le j_1 \le T; \, 0 \leq j_2 \le j_3 \le j_4 \le T}
O(j_3^{-d/2} j_4^{-d/2}) \le O(T^{-\theta}),
\end{equation} with $\theta >0$.
\noindent Case (b): Suppose $0 \le s_1 \le s_2 < t_2 \le t_1 \le T$,
or that the inequality with subscripts 1 and 2 interchanged holds.
With the same definitions for $j_i$, we wish to estimate
\begin{equation}
2T^{-2 } \sum_{\stackrel{j_1 , j_2 , j_3 , j_4 } {j_1 + j_2 + j_3 +
j_4 \le T}} \sum_{x,y} p_{j_1}(x) p_{j_2}(y-x) p_{j_3}(0)
p_{j_4}(y-x).
\end{equation} By symmetry we rewrite this as less than $2!$ times the
same sum but with the additional constraint $j_2 \le j_4 $. Then we
use $p_j(x) \le O(j^{-d/2})$ for $j = j_3, j_4$ and $\sum_v p_j(v) =
1$ for $t = j_1, j_2$. Therefore the contribution of this case is,
for some $\theta >0$, at most
\begin{equation}
T^{-2} \sum_{0 \le j_1 \le T; \, 0 \leq j_2 \le j_4 \leq T; \, 0 \le
j_3 \le T} O(j_3^{-d/2} j_4^{-d/2}) \le O(T^{-\theta}).
\end{equation}
\noindent Case (c): Suppose now that $0 \leq s_1 < t_1 < s_2 < t_2
\leq T$, or that the inequality with subscripts 1 and 2 interchanged
holds. The contribution due to this case is \eq 2T^{-2}
\sum_{\stackrel{j_1 \geq 0;j_3 \geq 1; j_2,j_4 \geq 2} {0 \leq
j_1+j_2+j_3+j_4 \leq T}} \sum_{x,y} p_{j_1}(x) p_{j_2}(0)
p_{j_3}(y-x) p_{j_4}(0). \en Performing the sum over $x,y$, and then
the sum over $j_1$ and $j_3$, and letting \eq S(j_2,j_4,T) =
\frac{(T-j_2-j_4)(T-j_2-j_4+1)}{2} , \en this is equal to \eq
\lbeq{221} 2T^{-2} \sum_{j_2=2}^T \sum_{j_4=2}^{T-j_2} p_{j_2}(0)
p_{j_4}(0) S(j_2,j_4,T). \en Using the fact that $p_j(0) =
O(j^{-d/2})$, it is not hard to see that the dominant contribution
arises from the $T^2/2$ term in $S$, and that the above is equal to
\eq \left( \sum_{j=2}^\infty p_j(0) \right)^2 +O(T^{-\theta}) =
(G(0)-1)^2 +O(T^{-\theta}). \en This gives a cancellation in
\refeq{subterm}, and completes the proof. \qed
%\smallskip
%We now consider the case of $u=2$. We wish to estimate
%\eq
%\lbeq{u2}
% E\left [ T^{-1}|\omega(T)|^2 \left( \underline{\gamma_T}\right)^2 \right]
% = E\left [ T^{-1}|\omega(T)|^2 \left( \gamma_T \right)^2 \right]
% - 2 E\left [ T^{-1}|\omega(T)|^2 \gamma_T \right]
% E \gamma_T + \left(E \gamma_T \right)^2.
%\en
%The asymptotic behaviour of the last term on the right side is given
%by Lemma~\ref{lem-renconst}, and the behaviour of the remaining terms
%can be obtained as in the proof for $u=0$. All of the expectations
%here will be seen to have the same leading behaviour as $E \gamma_T$, yielding
%a cancellation.
%
%For the middle term of the right side of \refeq{u2}, we use symmetry and
%the fact that $\sum_x x^2 p_s(x) = s$ to conclude that
%\eqarray
% E\left [ T^{-1}|\omega(T)|^2 \underline{\gamma_T} \right]
% & = & \frac{1}{T^2}\sum_{x} x^2 (p*q*p)_T(x)
% \nonumber \\ \nonumber
% & = & \frac{1}{T^2}\sum_{x,y} (y + (x-y))^2
% \sum_{j_2=2}^T \sum_{j_1=0}^{T-j_2} p_{j_1}(y) p_{j_2}(0)
% p_{T-j_1-j_2}(x-y)
% \\
% & = & \frac{1}{T^2} \sum_{j_2=2}^T \sum_{j_1=0}^{T-j_2} p_{j_2}(0)
% \left( j_1 + (T-j_1-j_2) \right)
% \nonumber \\
% & = & G(0)-1 + O(T^{-\epsilon}).
%\enarray
%
%The first term on the right side of \refeq{u2}
%is treated by considering three cases as for $u=0$. For cases
%(a) and (b), using symmetry as with the middle term, we find that the
%net effect of the factor $T^{-1}|\omega(T)|^2$ is at most 1. Finally,
%for case (c) the effect of this factor is to introduce a factor
%$T^{-1}(j_1+j_3+(T-j_1-j_2-j_3-j_4))$ into \refeq{221}.
%The dominant term here is $T^{-1}T=1$
%and the others give rise to error terms.
%\qed
%
%For any closed interval $I = [s,t] \subset [0,T]$ with $s 2$ we have
%$\sum_{t \le T} E X_{[0,t]}/T \le O(T^{-1})$. Therefore
%\begin{eqnarray*}
% \sum_{I,I': I \cap I' \not = \emptyset} E X_I E X_{I'}
%&\le&
% \sum_{I} E X_I |I| \sum_{I'=[0,t]} E X_{I'}\\
%&\le&
% O(T^{-1}) \sum_{0 \le s < t \le T} E X_{[s,t]} |t-s| \\
%&\le&
% O(T^{-2}) \sum_{0 \le s < t \le T} |t-s|^{1-d/2} \le O(T^{1-d/2})\\
%\end{eqnarray*}
%which tends to zero as $T \rightarrow \infty$ for $d > 2$.
%
%Next we consider the first term in equation \refeq{overlap}. We break
%the sum over $I,I'$ up into: case (a)
%neither interval $I$, $I'$ contains the other; case (b) one
%interval is contained in the other.
%
%Case (a): Suppose $I = [s_1,s_3]$ and $I' = [s_2,s_4]$ with $0 \le s_1
%< s_2 \le s_3 < s_4 \le T$. (There is also the same configuration but
%with $I$ and $I'$ interchanged). Upon substituting in the definitions of
%$X_I$ and $X_{I'}$ we find we are estimating
%\begin{equation}
% T^{-2 }\sum_{0 \le s_1 < s_2 \le s_3 < s_4 \le T} \sum_{x,y} p_{t_1}(x)
% p_{t_2}(y-x)
% p_{t_3}(y-x)
% p_{t_4}(y-x)
%\end{equation}
%where $t_1 = s_1$, $t_2 = s_2 - s_1$, $t_3 = s_3 - s_2$, and $t_4 = s_4 -
%s_3$. This is less than
%\begin{equation}
% T^{-2 } \sum_{\stackrel{t_1 , t_2 , t_3 , t_4 }
% {t_1 + t_2 + t_3 + t_4 \le T}}
% \sum_{x,y} p_{t_1}(x)
% p_{t_2}(y-x)
% p_{t_3}(y-x)
% p_{t_4}(y-x).
%\end{equation}
%By symmetry we rewrite this as less than $3!$ times the same sum but
%with the additional constraint $t_2 \le t_3 \le t_4 $. Then we use
%$p_t(x) \le O(t^{-d/2})$ for $t = t_3, t_4$ and $\sum_x p_t(x) = 1$ for
%$t = t_1, t_2$. Therefore the contribution of this case less than
%\begin{equation}
% T^{-2 } \sum_{0 \le t_1 , t_2 \le t_3 \le t_4 \le T}
% O(t_3^{-d/2}) O(t_4^{-d/2}) \le O(T^{2-d})
%\end{equation}
%which tends to zero as $T \rightarrow \infty$.
%
%Case (b): Suppose $I = [s_1,s_4]$ and $I' = [s_2,s_3]$ with $0 \le s_1
%\le s_2 < s_3 \le s_4 \le T$. (There is also the same configuration but
%with $I$ and $I'$ interchanged). With the same definitions for $t_i$
%we find, upon substituting in the definitions of
%$X_I$ and $X_{I'}$, that we are estimating
%\begin{equation}
% T^{-2 } \sum_{\stackrel{t_1 , t_2 , t_3 , t_4 }
% {t_1 + t_2 + t_3 + t_4 \le T}}
% \sum_{x,y} p_{t_1}(x)
% p_{t_2}(y-x)
% p_{t_3}(0)
% p_{t_4}(y-x).
%\end{equation}
%By symmetry we rewrite this as less than $2!$ times the same sum but
%with the additional constraint $t_2 \le t_4 $. Then we use
%$p_t(x) \le O(t^{-d/2})$ for $t = t_3, t_4$ and $\sum_x p_t(x) = 1$ for
%$t = t_1, t_2$. Therefore the contribution of this case less than
%\begin{equation}
% T^{-2 } \sum_{0 \le t_1 , t_2 \le t_4 , t_3 \le T}
% O(t_3^{-d/2}) O(t_4^{-d/2}) \le O(T^{1 - d/2})
%\end{equation}
%which tends to zero as $T \rightarrow \infty$.
%\qed
%
%\begin{cor}
%\label{cor-scaletoBM}
%Let $\beta_c = \sup \{ \beta: \sup_T \exp [\beta \underline{J_T}/T ] <
%\infty \}$. If $\beta < \beta_c$, then $\forall n, t_1, \ldots t_n
%\in [0,1]$
%\begin{equation}
% \lim_{T \rightarrow \infty}
% E e^{\beta \underline{J_T}/T }
% \prod_i {\omega([t_i T]) \over \sqrt{T}}
%=
% E \prod_i X(t_i)
%\end{equation}
%where on the right hand side $X(t)$ is Brownian motion.
%\end{cor}
%
%\proof Pick $p > 1$ such that $p\beta < \beta_c$. Let $X_T = \exp [p\beta
%\underline{J_T}/T]$. By hypothesis the random variables $X_T$ are
%uniformly integrable, i.e., $\lim_{N \rightarrow \infty} \sup_T E
%X_T I(X_T > N)= 0$. By the inequality $|\exp [A] -1| \le |A| (1 +
%\exp [A])$
%and Proposition~\ref{prop-Jtozero},
%\begin{equation}
% E \left |(\exp[\beta \underline{J_T}/T] - 1) I(X_T \le N)\right |^p
%\le
% C(N,p) E \left | X_T \right |^p \rightarrow 0 \mbox{ as T } \rightarrow %\infty.
%\end{equation}
%Therefore $\exp[\beta \underline{J_T}/T] \rightarrow 1$ in $L_p$. By
%the Holder inequality it is sufficient to prove the corollary with
%$\exp\{\beta \underline{J_T}/T \} $ replaced by 1. But then it is a
%standard result \cite{Bill68}. \qed
%
%{\noindent {\bf Proof of Theorem~\ref{thm-main} }. \hspace{2mm}} THIS
%NEEDS WORK IN PARTICULAR PERHAPS WE SHOULD WEAKEN THE $O(\beta
%T)^{-\epsilon}$ TO $o(\beta T)$ BUT SHOULD BE A SIMPLE CONSEQUENCE OF
%Corollary~\ref{cor-scaletoBM} and Theorem~\ref{thm-simeq} and checking that
%the renormalization constant $\lim_{T \rightarrow \infty} E \beta
%J_T/T = t\beta [G(0)-1]$.
%\qed
\subsection{Preliminary for $d \leq 2$}
\label{sec-d12}
We define an approximate Kronecker delta function by \eq
\delta_{x;\epsilon} = {1 \over (2\pi)^d} \int_{[-\pi,\pi]^d} d^d k \,
\exp \left [ ik \cdot x - \epsilon k^2 \right ] . \en The following
lemma proves a uniform version of \refeq{gammaep} for random walk, and
is closely related to Lemma~1 of \cite{Rose90}. Its proof is
complicated in a minor way by the fact that the simple random walk
does not satisfy the strong aperiodicity hypothesis of \cite{Rose90},
so that it is not the case that $|\hat{D}(k)|=1$ only when each
component $k_j$ is an integer multiple of $2\pi$ (see \cite{Spit76},
page 75, P8). Our results could be extended to more general symmetric
random walks with second moments by an appropriate generalization of
this lemma.
\begin{lemma}
\label{lem-rosen}
Let $1 \leq d<4$ and $h_{\epsilon T}(x) = \delta_{x;\epsilon T} -
\delta_x$. Then \eq \lim_{\epsilon \to 0} E \left [
\alpha_{T,h_{\epsilon T}} (\Acal_T)\right ]^2 = \lim_{\epsilon \to 0}
E \left[ \alpha_{T,\delta_{\epsilon T}} (\Acal_T) - \alpha_T (\Acal_T)
\right ]^2 = 0, \en where the limit is uniform in $T$ and
$\alpha_{T,f}$ is given by \refeq{alphaTh}.
\end{lemma}
\proof
In the following, $k_1$, $k_2$
are each $d$-component vectors, $d^d k_1$,
$d^d k_2$ are each Lebesgue measures on ${\bf R}^d$ and $I(k_1,k_2)$ is
the indicator function of the set $\{k_1,k_2 \in [-\pi,
\pi]^d\}$. Going over to the Fourier transform (see \refeq{Ftdef}), we have
\begin{eqnarray*}
E \left [ \alpha_{T,h_{\epsilon T}} (\Acal_T) \right ]^2 &=&\\
&&\hspace{-2cm} \frac{1}{T^{4-d}} \int \frac{d^d k_1}{(2\pi)^d} \int
\frac{d^d k_2}{(2\pi)^d} |\hat{h}_{\epsilon T}( k_1)
\hat{h}_{\epsilon T}( k_2) | I(k_1,k_2)
%&&\hspace{1cm} \times
\left |
\sum_{0 \le s_1, s_2 \leq T}
E e^{ik_1 \cdot \omega(s_1) + ik_2 \cdot \omega(s_2)}
\right |^2.
\end{eqnarray*} Let \eq F_T(k_1,k_2) = \left | \sum_{0 \le s_1
\le s_2 \le T} E e^{i k_1 \cdot \omega(s_1)} e^{i k_2 \cdot
\omega(s_2)} \right | I(k_1,k_2). \en Then, using
$\hat{h}_{\epsilon T}(k) = e^{-\epsilon T k^2} -1 =
\hat{h}_{\epsilon}(\sqrt{T}k)$, we have \eq \lbeq{FTpreclaim} E
\left [ \alpha_{T,h_{\epsilon T}} (\Acal_T) \right ]^2 \leq {4
\over {T^{4-d}}} \int \frac{d^d k_1}{(2\pi)^d} \int \frac{d^d
k_2}{(2\pi)^d} |\hat{h}_{\epsilon }(\sqrt{T}k_1)
\hat{h}_{\epsilon }(\sqrt{T}k_2)| F_T^2 (k_1,k_2) . \en
Let $\vec{\pi}$ denote the vector in ${\bf R}^d$
whose components are all equal to $\pi$.
As $T \rightarrow \infty$,
the integrand of \refeq{FTpreclaim} can become singular at the points
$(k_1 , k_2) = (0,0)$, $(0,\vec{\pi})$, $(\vec{\pi},0)$,
$(\vec{\pi},\vec{\pi})$. We break the domain of integration
into subsets which each contain one of these points, and a
complementary set where the integrand remains bounded.
As before, we write $\hat{D}(k) = d^{-1} \sum_{j=1}^d \cos k_j$.
Beginning with
the subset that contains $(0,0)$, we fix
a small constant $a > 0$ so that $|p| < a$ (Euclidean distance)
implies that
\eq
0 < \hat{D}(p) = 1 - \frac{1}{d} \sum_{j=1}^d (1- \cos p_j) \leq
1 - cp^2 ,
\en
for some fixed $c>0$.
Let $p_1 = k_1 + k_2$, $p_2 = k_2$ and let $I_1(k_1,k_2)$ be the
indicator function of the set $|p_1| < a$ and $|p_2| < a$. Note
that
\eq
E \left [ e^{i k_1 \cdot \omega(s_1)} e^{i k_2 \cdot \omega(s_2)}
\right ]
= \hat{D}^{s_1}(p_1) \hat{D}^{s_2 -s_1}(p_2 ).
\en
Since $\hat{D}(p_1)$ and $\hat{D}(p_2)$ are positive and bounded by 1,
\eqarray
F_T (k_1,k_2) I_1(k_1,k_2)
&=&
\sum_{0 \le s_1 \le s_2 \le T}
\hat{D}^{s_1}(p_1) \hat{D}^{s_2-s_1}(p_2 )
I_1(k_1,k_2) \nonumber \\
&\le&
\sum_{0 \le s_1 \le T} \ \sum_{s_2:s_1 \le s_2 \le T+s_1}
\hat{D}^{s_1}(p_1) \hat{D}^{s_2-s_1}(p_2 )
I_1(k_1,k_2) \nonumber \\
&\leq&
\min \{T+1,{1 \over 1-\hat{D}(p_1)} \}
\min \{T+1,{1 \over 1-\hat{D}(p_2)} \}\nonumber\\
&\le&
b_T(p_1) b_T(p_2),
\enarray
where
\eq
b_T(p) = 2 \min \{T, {1 \over c p^2} \}.
\en
Using this estimate and scaling $k \rightarrow k/\sqrt{T}$ we have
\begin{eqnarray}
{1 \over {T^{4-d}}} \int_{[-\pi,\pi]^d} d^d k_1 \,
\int_{[-\pi,\pi]^d} d^d k_2 \, |\hat{h}_{\epsilon }(\sqrt{T}k_1)
\hat{h}_{\epsilon }(\sqrt{T}k_2)| F_T^2 (k_1,k_2) I_1(k_1,k_2) \\
\nonumber &&\hspace{-7cm}\le \int_{R^d} d^d k_1 \, \int_{R^d} d^d
k_2 \, |\hat{h}_{\epsilon }(k_1)| |\hat{h}_{\epsilon }(k_2)|
b_1^2(p_1) b_1^2(p_2) . \lbeq{Isub1}
\end{eqnarray} The right side approaches $0$ as $\epsilon \rightarrow
0$, by dominated convergence, if $d<4$. Therefore the contribution
from $I_1(k_1,k_2)$ to the right hand side of \refeq{FTpreclaim} tends
to zero uniformly in $T$.
Next we consider the possible singularity at $(0,\vec{\pi})$.
Let $I_2(k_1,k_2)$ be the indicator function of the set $|p_1| <
a$ and $|p_2-\vec{\pi}| < a$. This time, since
$\hat{D}(p_1)$ is positive and $|\sum
\hat{D}(p_2)^s| \le 1$ because $\hat{D}(p_2)$ is
negative and $|\hat{D}(p_2)| \le 1$, we use
\eqarray
F_T (k_1,k_2) I_2(k_1,k_2)
&=&
\left |
\sum_{0 \le s_1 \le s_2 \le T}
\hat{D}^{s_1}(p_1) \hat{D}^{s_2-s_1}(p_2 )
\right |
I_2(k_1,k_2) \nonumber \\
&\le&
\sum_{0 \le s_1 \le T} \
\hat{D}^{s_1}(p_1) I_2(k_1,k_2) \nonumber \\
&\le&
b_T(p_1) I_2(k_1,k_2).
\enarray
Therefore
\begin{eqnarray*}
&& {1 \over {T^{4-d}}} \int d^d k_1 \, \int d^d k_2 \,
|\hat{h}_{\epsilon }(\sqrt{T}k_1) \hat{h}_{\epsilon }(\sqrt{T}k_2)|
F_T^2 (k_1,k_2) I_2(k_1,k_2)\\ &&\hspace{1cm}\le {1 \over {T^{4-d}}}
\int d^d k_1 \, \int d^d k_2 \, |\hat{h}_{\epsilon }(\sqrt{T}k_1)
\hat{h}_{\epsilon }(\sqrt{T}k_2)| b_T^2(p_1) I_2(k_1,k_2)\\
&&\hspace{1cm}\le {1 \over {T^2}} \int_{|p_1|d/2$
%there exist functions $f, g_T \in L^p({\bf R}^2 \times {\bf R}^2)$
%such that
%\eq
%\lbeq{FTclaim}
% | F_T(k_1,k_2)| \le T^2 \left \{
% f(\sqrt{T} k_1,\sqrt{T} k_2) + g_T(\sqrt{T} k_1,\sqrt{T} k_2)
% \right \} \;\;\; \mbox{with} \;\;\;
% \lim_{T \rightarrow \infty} \| g_T \|_p = 0.
%\en
%Given the claim, the conclusion of the lemma then follows by
%estimating the right side of \refeq{FTpreclaim} with \refeq{FTclaim},
%scaling the $T$ out of the integrals, observing that $|\hat{h}_{\epsilon}(k)|
%\le 1$ and tends to zero pointwise, and applying
%the dominated convergence theorem.
%
%The remainder of the proof is devoted to showing that the claim
%\refeq{FTclaim} holds.
%Define
%\eq
%\lbeq{Dhatdef}
% \hat{D}(p) = \frac{1}{2} \sum_{j=1}^2 \cos p_j.
%\en
%Note that for $p \in [-\pi,\pi]^2$,
%\eq
% \hat{D}(p) = 1 - \frac{1}{2} \sum_{j=1}^2 (1- \cos p_j) \leq
% 1 - cp^2
%\en
%for some $c > 0$.
%Now
%\eq
% E \left [ e^{i k_1 \cdot \omega(s_1)} e^{i k_2 \cdot \omega(s_2)}
% \right ]
% = \hat{D}^{s_1}(p_1) \hat{D}^{s_2 -s_1}(p_2 ),
%\en
%where $p_1 \in [-\pi,\pi]^2 \times [-\pi,\pi]^2 = k_1 + k_2 \ {\rm
% mod} \ 2\pi$ and $p_2 = k_2$. Consider
%first the subset of $\{k_1,k_2 \in [-\pi,
%\pi]^2\}$ where $\hat{D}(p_1) \ge 0$
%and $\hat{D}(p_2 ) \ge 0$. On this subset,
%\eqarray
% \sum_{0 \le s_1 < s_2 \le T}
% \left| E \left [ e^{i k_1 \cdot \omega(s_1)} e^{i k_2 \cdot \omega(s_2)}
% \right ] \right|
%&\le&
% \sum_{0 \le s_1 < s_2 \le T} e^{-c s_1 p_1^2}
% e^{-c (s_2 - s_1) p_2^2} \nonumber \\
%&\le&
% \sum_{0 \le s_1 \le T} \ \sum_{s_2:s_1 \le s_2 \le T+s_1}
% e^{-c s_1 p_1^2}
% e^{-c (s_2 - s_1) p_2^2}. \hspace{10mm}
%\enarray
%For $x \ge 0 $ and bounded, there exists a constant $c_1$ such that
%\eq
% \sum_{0 \le s \le T} e^{-sx} \le
% \min \left\{ T+1, {1 \over 1 - e^{-x} } \right\}
% \le c_1 T \min \left\{ 1, {1 \over T x } \right\},
%\en
%therefore
%\eq
% \sum_{0 \le s_1 < s_2 \le T}
% |E \left [ e^{i k_1 \cdot \omega(s_1)} e^{i k_2 \cdot \omega(s_2)}
% \right ]|
% \le
% T^2 f(\sqrt{T} k_1, \sqrt{T} k_2)
%\en
%with $ f(k_1,k_2) = c_1^2 \min \{ 1, {1 \over p_1^2 } \}
%\min \{ 1, {1 \over p_2^2 } \}$. This function
%$f$ is clearly in $L^p$ for all $p > d/2.$
%
%Now consider the subset of $[-\pi,\pi]^2$ where $\hat{D}(p_2 ) < 0$ and
%$\hat{D}(p_1) \ge 0.$ Using the inequality $\frac{1+x^n}{1+x} \leq 1$
%(for $x \geq 0$), we obtain
%\eqarray
% \sum_{0 \le s_1 < s_2 \le T}
% |E \left [ e^{i k_1 \cdot \omega(s_1)} e^{i k_2 \cdot \omega(s_2)}
% \right ]|
%&=&
% |\sum_{0 \le s_1 \le T} \hat{D}(p_1)^{s_1}
% {1 - \hat{D}^{T- s_1}(p_2) \over 1 - \hat{D}(p_2)}| \nonumber \\
%&\le&
% \sum_{0 \le s_1 \le T} \hat{D}^{s_1}(p_1) \nonumber \\
%% \mbox{ by } {1+x^n\over 1+x} \le 1 \mbox{ if } x \ge 0
%&\le&
% \sum_{0 \le s_1 \le T} e^{-c s_1 p_1^2} \nonumber \\
%&\le&
% T^2 g_T(\sqrt{T} k_1, \sqrt{T} k_2).
%\enarray
%where $g_T(k_1,k_2) = cT^{-1} \min \{1,1/p_1^2 \} \ I(k_1,k_2 \in [- \pi,
%\pi]^2)$, which is in $L^p$ and tends to zero in $L^p$ as $T
%\rightarrow \infty$ for all $p > d/2$. The case where the roles
%of the indices $1$ and $2$ are reversed is similar.
%
%On the subset of $[-\pi,\pi]^2$ where $\hat{D}(p_2 ) < 0$ and
%$\hat{D}(p_1) < 0$ we use the same argument except that
%$\hat{D}^{s_1}(p_1)$ is
%replaced by $|\hat{D}^{s_1}(p_1)| = \hat{D}^{s_1}(\bar{p}_1)$ where
%$\bar{p}_1 = p_1 \, + \buildrel \to \over \pi$, with $\buildrel \to \over
%\pi$ a vector $(\pm \pi, \pm \pi) \in {\bf R}^2$ chosen so that
%$\bar{p}_1 \in [-\pi,\pi]^2$. Then
%the argument continues as before and concludes with a new $g_T$ in
%which $p_1$ is replaced by $p_1 + \buildrel \to \over
%\pi$.
\subsection{Convergence}
\label{sec-conv}
Throughout this section, given a walk $\omega(i)$ and $t \in [0,1]$,
we define a piecewise constant function $X_T(t) = T^{-1/2}
\omega(\lfloor tT \rfloor ])$. We begin by considering the case of
high dimensions.
\subsubsection{Dimensions $d>2$}
\label{sec-convdge2}
In this section, we complete the proof of Theorem~\ref{thm-main},
making use of Theorem~\ref{thm-simeq} and
Proposition~\ref{prop-Jtozero}.
\begin{prop}
\label{prop-convdge2}
Let $d>2$ and $-\infty < \beta < \infty$. For $t_m \in [0,1]$,
$n=0,1,2,\ldots $, and $k_m \in {\bf R}^d$, \eq \lim_{T \rightarrow
\infty} E \prod_{m=1}^n \left[ e^{ik_m \cdot X_T( t_m )} \right]
e^{i \beta \underline{\gamma_T}(\Tcal_T)} = E \prod_{m=1}^n e^{ik_m
\cdot B( t_m )}. \en Hence, the vector $(X_T(t_1), \ldots,
X_T(t_n), \underline{\gamma_T}(\Tcal_T))$ converges in distribution to
the vector $(B(t_1), \ldots, B(t_n), 0)$.
\end{prop}
\proof
We set
\begin{equation}
Q(X_T) = \prod_{m=1}^n e^{ik_m \cdot X_T( t_m )}
\end{equation} and we write $Q$ for $Q(X_T)$ and $q$ for $Q(B)$. Then
it suffices to show that \eq E Q e^{i\beta \underline{\gamma_T}} - Eq
\rightarrow 0 \en as $T \to \infty$. The above difference can be
written as \eq E Q \left( e^{i\beta \underline{\gamma_T}} -1 \right) +
EQ- Eq. \en By Donsker's theorem, $EQ-Eq \to 0$ as $T \to \infty$.
Using the inequality $|Q (e^{i\beta \underline{\gamma_T}}-1)| \leq
|\beta \underline{\gamma_T}|$, the first term is bounded above by
$|\beta|E|\underline{\gamma_T}| \leq
|\beta|[E|\underline{\gamma_T}|^2]^{1/2}$, which approaches zero as $T
\to \infty$ by Proposition~\ref{prop-Jtozero}.
The last statement of the proposition then follows immediately from
the fact that convergence of characteristic functions implies convergence
in distribution on ${\bf R}^N$; see Theorem~7.6 of \cite{Bill68}.
\qed
The following corollary yields the results of Theorem~\ref{thm-main}.
\begin{cor}
\label{cor-dge2}
Let $d>2$ and $-\infty < \beta < \beta_0$. For $t_m \in [0,1]$, $i_m
\in \{1, \ldots, d\}$, and $n=0,1,2,\ldots $, \eq \lbeq{XTe} \lim_{T
\rightarrow \infty} E \prod_{m=1}^n \left[ X_T^{(i_m)}( t_m ) \right]
e^{ \beta \underline{\gamma_T}(\Tcal_T)} = E \prod_{m=1}^n \left[
B^{(i_m)}( t_m ) \right], \en where the superscript $(i_m)$ denotes a
component of $X_T$ or $B$. Moreover, the process $X_T$ converges in
distribution to Brownian motion.
\end{cor}
\proof
By Proposition~\ref{prop-convdge2}, the expectation of
any bounded continuous function of $(X_T(t_1), \ldots, X_T(t_n),
\underline{\gamma_T}(\Tcal_T))$ converges to the corresponding
continuum expectation. The function appearing in the corollary is not
bounded, but it is uniformly integrable.
To see this, it is sufficient (see page~32 of \cite{Bill68})
to show that there is an
$\epsilon >0$ such that
\eq
\sup_T E \left| \prod_{m=1}^n \left[ X_T^{(i_m)}( t_m ) \right]
e^{ \beta \underline{\gamma_T}(\Tcal_T)} \right|^{1+\epsilon}
< \infty.
\en
But by H\"older's inequality
\eq
E \left| \prod_{m=1}^n \left[ X_T^{(i_m)}( t_m ) \right]
e^{ \beta \underline{\gamma_T}(\Tcal_T)} \right|^{1+\epsilon}
\leq \left\| \prod_{m=1}^n \left[
X_T^{(i_m)}( t_m ) \right]^{1+\epsilon}
\right\|_q \;
\left\| e^{ \beta (1+\epsilon) \underline{\gamma_T}(\Tcal_T)}
\right\|_p ,
\en
and by Theorem~\ref{thm-simeq}
the right side is bounded uniformly in $T$ provided $p$ is chosen
sufficiently close to $1$ and $\epsilon$ is chosen sufficiently small
that $\beta (1+\epsilon)p < \beta_0$.
It then follows from Proposition~\ref{prop-convdge2}, together with
Corollary~1 to Theorem~5.1
and Theorem~5.4 of \cite{Bill68}, that \refeq{XTe} holds.
It is then immediate that the renormalized partition function
converges to $1$ and that the diffusion constant is equal to 1. The fact
that $c_T \to \exp[\beta (G(0)-1)]$ as $T \to \infty$ then follows from
Lemma~\ref{lem-renconst}.
To complete the proof, it remains to show
that $X_T$ is tight. For this, it is sufficient to prove that
for any $0 \leq t_11/2$ and constant $K$,
\eq
\lbeq{tight}
\frac{1}{c_T^{ren}} E\left[ |X_T(t_2 )- X_T(t_1)|^{2a}
|X_T( t_3 )-X_T(t_2)|^{2a} e^{\beta \underline{\gamma_T}}
\right] \leq K |t_2-t_1|^a |t_3-t_2|^a
\en
(see Theorem~8.4 and pages 87-89 of \cite{Bill68}). The normalizing
partition function on the left side is asymptotically 1 and
can be ignored. Applying H\"{o}lder's inequality to separate the
exponential interaction factor from the displacement factors, as above,
gives \refeq{tight} for any $a \geq 0$.
\qed
\subsubsection{Dimensions $d \leq 2$}
\label{sec-convd12}
In this section, we complete the proofs of Theorems~\ref{thm-main2}
and \ref{thm-main1}.
\begin{prop}
\label{weakconvergence}
Let $d = 1$ or $2$ and $p=\frac{4-d}{2}$. For any $-\infty < \beta <
\infty$, $t_m \in [0,1]$, $n = 0,1, \ldots$, and $k_m \in {\bf R}^d$,
\begin{equation}
\lim_{T \rightarrow \infty} E \prod_{m=1}^n \left[ e^{ik_m \cdot
X_T( t_m )} \right] e^{i \beta \underline{\gamma_T}(\Tcal_T)} = E
\prod_{m=1}^n \left[ e^{ik_m \cdot B( t_m )} \right] e^{i \beta
\underline{\gamma}(\Tcal)}.
\end{equation} Hence, the vector $(X_T(t_1), \ldots, X_T(t_n),
\underline{\gamma_T}(\Tcal_T))$ converges in distribution to the
vector $(B(t_1), \ldots, B(t_n), \underline{\gamma}(\Tcal) )$.
\end{prop}
\proof We want to apply Donsker's theorem, which says that the
expectation of any functional of $X_T$ which is bounded and continuous in the
Skohorod topology, converges to its natural continuum limit as $T
\rightarrow \infty$. However, unlike the simpler situation encountered in
Section~\ref{sec-convdge2}, we cannot apply it directly because
the renormalized self-intersection local time is not
a continuous functional. We will use the results of
Sections~\ref{sec-d2} and \ref{sec-d12}
to introduce cutoffs and reduce the problem to one
involving a bounded continuous functional.
We set
\begin{equation}
Q(X_T) = \prod_{m=1}^n \left[ e^{ik_m \cdot X_T( t_m )} \right]
\nonumber
\end{equation} and we write $Q$ for $Q(X_T)$ and $q$ for $Q(B)$.
Recalling the definitions of $\Tcal_{N}$ and $\Tcal_{T,N}$ from
\refeq{dyadic2} and \refeq{dyadic3}, we write \eqarray E Q e^{i \beta
\underline{\gamma_T}(\Tcal_T)} - E q e^{i \beta
\underline{\gamma}(\Tcal)} & = & E Q\left( e^{i \beta
\underline{\gamma_T}(\Tcal_{T})} - e^{i \beta
\underline{\gamma_T}(\Tcal_{T,N})} \right) + E Q e^{i \beta
\underline{\gamma}(\Tcal_{T,N})} - E q e^{i \beta
\underline{\gamma}(\Tcal_N)} \nonumber \\ && +E q \left( e^{i \beta
\underline{\gamma}(\Tcal_N)} - e^{i \beta \underline{\gamma}(\Tcal)}
\right) . \lbeq{i1} \enarray The first term on the right side can be
estimated using the inequality \eq \lbeq{AtoB} |Q| |e^{iA} - e^{iB}|
\le |A-B|, \en together with $E|A-B| \leq \| A-B\|_2$ and
Lemma~\ref{lem-Napprox}; its contribution can be made as small as
desired by taking $N$ large independent of $T$. By \refeq{gamTcal}
the same assertion holds for the last term on the right side. Thus it
suffices to show that for fixed (large) $N$, \eq \lbeq{i2} \lim_{T
\rightarrow \infty} E \left[ Q e^{i \beta
\underline{\gamma_T}(\Tcal_{T,N})} \right] = E \left[ q e^{i \beta
\underline{\gamma}(\Tcal_N)} \right]. \en
We define
$\gamma_{T,\epsilon}(\Tcal_{T,N}) $ by replacing $\delta_x$ in
\refeq{gammaT} by the approximate Kronecker delta
\begin{equation}
\delta_{x;\epsilon T} = {1 \over (2\pi)^d} \int_{[-\pi,\pi]^d} d^d k
\, \exp \left [ ik \cdot x - \epsilon T k^2 \right ] ,
\end{equation} and define $\gamma_\epsilon(\Tcal_N) =
\gamma_{\delta_\epsilon}(\Tcal_N)$ as in \refeq{gamma}, where
\begin{equation}
\delta_{\epsilon}(x) = {1 \over (2\pi)^d} \int_{R^d} d^d k \, \exp
\left [ ik \cdot x - \epsilon k^2 \right ].
\end{equation} Adding and subtracting as in the previous paragraph,
and using \refeq{gamma} and Lemma~\ref{lem-rosen}, we see that it is
sufficient to prove that for fixed (large) $N$ and (small) $\epsilon$,
\begin{equation}
\lbeq{i3} \lim_{T \rightarrow \infty} E Q e^{i \beta
\underline{\gamma_{T,\epsilon}}(\Tcal_{T,N}) } = E q e^{i \beta
\underline{\gamma_{\epsilon}} (\Tcal_N)}.
\end{equation}
%Now that we have a continuous functional
%$\underline{\gamma_{\epsilon}}(\Tcal_N)$ on the set of continuous paths,
%we are well situated to apply Donsker's theorem.
By definition, \eqarray \gamma_{T,\epsilon}(\Tcal_{T,N}) & = &
\frac{1}{T^{2-d/2}} \sum_{(i,j) \in {\cal T}_{T,N}} {1 \over (2\pi)^d}
\int_{[-\pi,\pi]^d} d^d k \, e^{ik \cdot (\omega(i)-\omega(j))} e^{-
\epsilon T k^2} \nonumber \\ \nonumber &=& \frac{1}{T^{2}}
\sum_{(i,j) \in {\cal T}_{T,N}} {1 \over (2\pi)^d}
\int_{[-\sqrt{T}\pi,\sqrt{T}\pi]^d} d^d k \, e^{ik \cdot (X_T(\lfloor
\frac{i}{T}\rfloor )- X_T(\lfloor \frac{j}{T}\rfloor ))} e^{-
\epsilon k^2} \\ \nonumber &=& \frac{1}{T^{2}} \sum_{(i,j) \in {\cal
T}_{T,N}} \delta_{\epsilon}(X_T(\lfloor i/T \rfloor) - X_T(\lfloor
i/T \rfloor)) +O(e^{-C_\epsilon T}) \\ &=& \int \int_{\Tcal_N} ds
\,dt\, \delta_{\epsilon}(X_T(s) - X_T(t))+O(T^{-1}) +O(e^{-C_\epsilon
T}) \enarray for some $C_\epsilon >0$. The error terms arise from
the difference in the ranges of the $k$ integrals in
$\delta_\epsilon(x)$ and $\delta_{x;\epsilon}$, and boundary effects
in replacing the sum by the integral (note that the terms in the sum
are constant on squares of side $T$), and are uniform in $\omega$.
Let \eq F(X_T) = Q(X_T) \exp \left [ i \beta \int \int_{\Tcal_N} ds
\,dt\, \delta_{\epsilon}(X_T(s) - X_T(t)) \right ]. \en It suffices
to show that $E[ F(X_T)]$ converges to $E[ F(B)]$, where $B$ is
Brownian motion, But this follows directly from Donsker's theorem
\cite{Bill68} because $X_T \mapsto F(X_T)$ is bounded and continuous
in the Skorohod topology. \qed
The following corollary yields the results of Theorems~\ref{thm-main2}
and \ref{thm-main1}, apart from the monotonicity of the diffusion
constant, which is deferred to Section~\ref{sec-diffconst}.
\begin{cor}
Let $d = 1$ or $2$ and $p=\frac{4-d}{2}$, and let $\beta_0$ be given
by \refeq{beta0def}. For any $-\infty < \beta < \beta_0$, $t_m \in
[0,1]$, and $n = 0,1, \ldots$, \eq \lim_{T \rightarrow \infty} E
\prod_{m=1}^n \left[ X_T( t_m ) \right] e^{ \beta
\underline{\gamma_T}(\Tcal_T)} = E \prod_{m=1}^n \left[ B( t_m )
\right] e^{ \beta \underline{\gamma}(\Tcal) } . \en In particular,
for $d=2$, $E e^{ \beta \underline{\gamma}(\Tcal) } < \infty$ for
$\beta < \beta_0$. Moreover, $X_T$ converges in distribution to the
process with law $d\nu_{d,\beta}$.
\end{cor}
\proof
We apply uniform integrability as in Corollary~\ref{cor-dge2}, using
Theorems~\ref{thm-simeq} and \ref{thm-d2pfbd} respectively for $d=1,2$.
\qed
%Let $dP_{n,T}$ be the finite
%dimensional distribution for $X_T(t_m)$, $m = 1, 2, \ldots, n$,
%together with $\underline{\gamma}(\Tcal_T)$, determined by the
%expectation for simple random walk. By Proposition
%\ref{weakconvergence} and Theorem 7.6, p46 of \cite{Bill68}, $dP_{n,T}$
%converges weakly to the corresponding continuum measure. By the
%H\"older inequality and Theorem \ref{thm-d2pfbd}
%\begin{equation}
% X = \prod_{m=1}^n \left[ X_T( t_m ) \right]
% e^{\beta \underline{\gamma}(\Tcal_T)\}} \nonumber
%\end{equation}
%is uniformly integrable as $T \rightarrow \infty$. The result follows
%by Theorem 5.4, page 32 of \cite{Bill68}.
%By Vitali's theorem (see e.g. problem 33(b), page 35 of \cite{RS72})
%convergence for imaginary $\beta$ implies convergence for all $\beta$
%in any domain on which the expectations on the left are uniformly bounded
%on compact subsets.
As a consequence of the corollary for $d=2$, since $ E_T
[\gamma_T(\Tcal_T)] \sim \frac{1}{\pi} \log T$ by
Lemma~\ref{lem-renconst}, for $-\infty < \beta < \beta_0$ the
unrenormalized partition function satisfies the asymptotic relation
\eq c_T = \left[ E_T e^{\beta \underline{\gamma_T}(\Tcal_T)} \right]
e^{\beta E_T [\gamma_T(\Tcal_T)]} \sim \left[ E e^{\beta
\underline{\gamma}} \right] T^{\beta/\pi}. \en
\section{The diffusion constant}
\label{sec-diffconst}
\setcounter{equation}{0}
For $d > 2$ and $p=1$, the diffusion constant is equal to $1$ for
all $-\infty < \beta < \beta_0$, while for $d \leq 2$ and
$p=\frac{4-d}{2}$, we have
\eq
\lbeq{dc1}
D(\beta) =
\frac{E [B(1)^2 e^{\beta \underline{\gamma}}]}
{E e^{\beta \underline{\gamma}}}.
\en
For the rest of this section, we consider only
$d \leq 2$.
The following elementary calculation shows that the
diffusion constant is {\em strictly\/} decreasing for $\beta$ equal
to zero, and hence, by continuity, for $\beta$ near zero.
Consider first $d=1$, so there is no need for renormalization. Then
\eq
\lbeq{Dprime}
D'(0) =
\left. \frac{d}{d\beta} \right|_{\beta =0}
\frac{E [ B(1)^2 e^{\beta \gamma}] }{E e^{\beta \gamma} }
= E[ B(1)^2 \gamma ] - [E B(1)^2] [E \gamma] .
\en
Writing now
$p_t(x) = (2\pi t)^{-d/2} \exp [-x^2 /2t]$
for the Brownian motion transition function, this
gives
\begin{eqnarray*}
D'(0) &=& \int_{0 \leq s < t \leq 1} ds \, dt\int dx \, dy \, y^2 \,
p_s(x)p_{t-s}(0)p_{1-t}(y-x) \\ && \hspace{1cm} - 1 \cdot \int_{0
\leq s < t \leq 1} ds \, dt\int dx \, dy \,
p_s(x)p_{t-s}(0)p_{1-t}(y-x) \\ &=& \int_{0 \leq s < t \leq 1} ds \,
dt (s + (1-t)) p_{t-s}(0) - \int_{0 \leq s < t \leq 1} ds \, dt \,
p_{t-s}(0)\\ &=& - \int_{0 \leq s < t \leq 1} ds \, dt \, (t-s) \,
p_{t-s}(0).
\end{eqnarray*} The last expression is clearly negative, so the
diffusion constant is strictly decreasing at $\beta =0$. For $d=2$,
formally $\underline{\gamma} = \gamma - E\gamma$ and the constant
$E\gamma$ drops out in the right side of \refeq{Dprime}, so that the
same argument applies also for $d=2$. This becomes a correct argument
by a suitable introduction of cutoffs.
For $\beta \in [0,\beta_0)$, we now prove that $D(\beta)$ is strictly
decreasing, as follows. The following proposition shows that
for $\beta \in (0,\beta_0)$, the inequality $D'(\beta) \leq 0$ is
a consequence of a correlation inequality of Fr\"ohlich and Park \cite{FP78}.
Combined with the analyticity of $D(\beta)$ for $\beta \in (-\infty, \beta_0)$,
and the fact that $D$ is strictly decreasing in a neighbourhood of $\beta =0$,
this implies that $D$ is strictly decreasing for $\beta \in (0,\beta_0)$.
We believe, but have not proved, that $D$ is strictly monotone for
all $\beta < \beta_0$.
\begin{prop}
\label{monotone}
Let $d \geq 1$, $\epsilon >0$ and $f(x) = (2\pi
\epsilon)^{-d/2}e^{-x^2/2\epsilon}$. For any $0 < \beta < \infty$,
$t_m \in [0,1]$, $n = 0,1, \ldots$, and $k_m \in {\bf R}^d$,
\begin{equation}
\lbeq{mono1} \langle \prod_{m=1}^n \left[ e^{i k_m \cdot B( t_m )}
\right] \rangle_{f,\beta} \equiv { E \prod_{m=1}^n \left[ e^{i k_m
\cdot B( t_m )} \right] e^{ \beta \gamma_f(\Tcal) } \over E e^{
\beta \gamma_f(\Tcal)} } \nonumber
\end{equation} is monotone nondecreasing in $\beta$. In addition, for
$d=1$ or $2$ and $0 < \beta < \beta_0$, $D(\beta)$ is nonincreasing in
$\beta$.
\end{prop}
\proof
In dimensions $d =1$ or $2$ the expectation \refeq{mono1}
has a limit as $f \rightarrow \delta$ (that is, as $\epsilon \to 0$),
so the limiting expectations are also nondecreasing.
For the diffusion constant, we use
\eq
D(\beta) = 2d \lim_{k \to 0} k^{-2}(1- \langle e^{k\cdot B(1) }
\rangle_{\delta,\beta} )
\en
to conclude that $D$ is nonincreasing. This is derived using
Euclidean symmetry and $\langle B(1) \rangle = 0$.
Thus it suffices to obtain \refeq{mono1}, for arbitrary dimensions. We
will show that this is
a consequence of the general Ginibre correlation inequalities
proved by Fr\"ohlich and Park in \cite{FP78} using duplicate variables.
In particular, we will apply their Theorem~3.1(5).
For this, we need to rewrite \refeq{mono1} in the formalism of \cite{FP78}.
Accordingly, we let ${\cal H}$ denote the Hilbert space obtained by
completing $\Rbold^d \otimes \Ccal[0,1]$ with the inner product
\begin{equation}
C(g,h) = \frac{1}{d} \int_0^1 \int_0^1 ds \, dt \, \sum_{i=1}^d
g^{(i)}(s) \min\{s,t\} h^{(i)}(t),
\end{equation} where the superscripts $(i)$ denote components. Let
$\phi$ denote the Gaussian process with mean $0$ and covariance $C$
indexed by ${\cal H}$, and let $d\mu_C$ denote the associated Gaussian
measure on the space ${\cal S}'$ of tempered distributions. By
definition of $f$, $\hat{f}(q) = e^{-\epsilon q^2/2}$. We define
\begin{eqnarray*}
X & = & \{x \equiv (q,s,t): q \in \Rbold^d, 0 \le s < t \le 1\},\\
d\rho(x) & = & \frac{1}{2\pi} dq \, ds \, dt \, \hat{f}(q) ,
\end{eqnarray*} and a mapping $l:X \rightarrow {\cal H}$ by
\[ l_x = q (\delta(\cdot - t) - \delta(\cdot - s)), \quad x=(q,s,t)
\in X .
\] Then $C(l_x,l_x) = d^{-1}q^2 |t-s|$. (Fr\"ohlich and Park's
hypothesis that the integral $\int_X d\rho(x) \exp [\frac{1}{2}
C(l_x,l_x)]$ be finite is thus not satisfied for small $\epsilon$,
but, in fact, this hypothesis is not necessary for part (5) of their
Theorem~3.1.)
By construction, the Gaussian random variable $\phi (l_x)$ has the
same distribution as $q \cdot (B(t) - B(s))$; in fact,
\eq
\int e^{i q \cdot (B(t) - B(s))} dW = e^{-q^2|t-s|/2d}
= e^{-\frac{1}{2}C(l_x,l_x)} = \int_{{\cal S}'} e^{i \phi(l_x)} d\mu_C .
\en
Using this fact, as well as the $B \to -B$ symmetry,
rewriting $\gamma_f$ in terms of the Fourier transform gives
\begin{eqnarray*}
\gamma_f(\Tcal) &=& \frac{1}{2\pi} \int dq \, \hat{f}(q) \int
\int_{0 \le s < t \le 1} ds \, dt\, e^{i q \cdot (B(t) - B(s))}\\
&=& \frac{1}{2\pi} \int dq \, \hat{f}(q) \int \int_{0 \le s < t \le
1} ds \, dt\, \cos \left (q \cdot (B(t) - B(s)) \right )\\ &=&
\int_X d\rho \, \cos(\phi(l_x)).
\end{eqnarray*} In \refeq{mono1}, by the $B \rightarrow - B$ symmetry
we can replace $\prod_{m=1}^n \left[ e^{i k_m \cdot B( t_m )} \right]$
by $\cos (\sum_{j=1}^n k_j \cdot B( t_j ))$. Define $m \in {\cal H}$
by $m = \sum k_j \delta( \cdot - t_j)$. Then the Gaussian random
variable $\phi(m)$ has the same distribution as $\sum k_j \cdot
B(t_j)$, and therefore
\begin{equation}
\lbeq{mono2} \langle \prod_{j=1}^n e^{ k_j \cdot B( t_j )}
\rangle_{f,\beta} = \Xi(f,\beta)^{-1} \int d\mu_C(\phi)\, \cos
(\phi(m)) e^{\beta \int_X d\rho \, \cos(\phi(l_x))} ,
\end{equation} where
\begin{equation}
\lbeq{mono3} \Xi(f,\beta) = \int d\mu_C(\phi)\, e^{\beta \int_X
d\rho \, \cos(\phi(l_x))}.
\end{equation} Let \eq \langle \langle \; F \; \rangle
\rangle_{f,\beta} = \Xi(f,\beta)^{-1} \int d\mu_C(\phi)\, \; F \;
e^{\beta \int_X d\rho \, \cos(\phi(l_x))}. \en The derivative of
$\langle \prod_{j=1}^n e^{ k_j \cdot B( t_j )} \rangle_{f,\beta}$ with
respect to $\beta$ can then be written as \eq \int_X d\rho(x) \left(
\langle \langle \cos(\phi(l_x)) \cos (\phi(m)) \rangle
\rangle_{f,\beta} - \langle \langle \cos(\phi(l_x)) \rangle
\rangle_{f,\beta} \langle \langle \cos ( \phi(m )) \rangle
\rangle_{f,\beta} \right). \en This is nonnegative, by Theorem~3.1(5)
of \cite{FP78}. \qed
\subsection*{Acknowledgements}
The work of D.C.B. was supported in part by NSF grant DMS 910256.
D.C.B. thanks Alain Sznitman and the Forschungsinstitut f\"ur
Mathematik at ETH-Zentrum for their hospitality while part of this
work was carried out. The work of G.S. was carried out primarily
during visits to the University of Virginia and the Isaac Newton
Institute in Cambridge, and was supported in part by a Visiting
Fellowship at the Isaac Newton Institute funded by SERC grant G59981,
and by NSERC of Canada grant A9351.
Our interest in this model arose from a conversation with M. Aizenman and
C.M.\ Newman. We thank E.\ Bolthausen, T.\ Hara, A.D.\ Sokal and
A.-S.\ Sznitman for useful conversations and correspondence. We are
particularly grateful to J.-F.\ Le Gall for communicating the contents
of \cite{LeGa94} to us prior to publication, to S.R.S.\ Varadhan for
providing the main idea in the proof of Theorem~\ref{thm-simeq} and to
J. Fr\"ohlich for a remark that led to Proposition \ref{monotone}.
%\bibliography{../bibdef/bib}
\begin{thebibliography}{10}
\bibitem{AZ93} S.~Albeverio and X.Y. Zhou. \newblock A modified
{Domb}--{Joyce} model in four dimensions. \newblock Preprint, (1993).
\bibitem{Bill68}
P.~Billingsley.
\newblock {\em Convergence of Probability Measures}.
\newblock John Wiley and Sons, New York, (1968).
\bibitem{Bolt91}
E.~Bolthausen.
\newblock On the construction of the three dimensional polymer measure.
\newblock {\em Probab. Theory Relat. Fields}, {\bf 97}:81--101, (1993).
\bibitem{Bolt94}
E.~Bolthausen.
\newblock Localization of a two-dimensional random walk with an attractive path
interaction.
\newblock To appear in {\em Ann.\ Probab.}, (1994).
\bibitem{BoltS94}
E.~Bolthausen and U.~Schmock.
\newblock On self-attracting random walks.
\newblock Preprint, (1994).
\bibitem{BoltS94a}
E.~Bolthausen and U.~Schmock.
\newblock On self-attracting random walks in arbitrary dimensions.
\newblock Preprint, (1994).
\bibitem{Boro81}
A.N. Borodin.
\newblock On the asymptotic behavior of local times of recurrent random walks
with finite variance.
\newblock {\em Theory Probab. Appl.}, {\bf 26}:758--772, (1981).
\bibitem{Boro89}
A.N. Borodin.
\newblock Brownian local time.
\newblock {\em Russian Math. Surveys}, {\bf 44}:1--51, (1989).
\bibitem{BFF84}
A.~Bovier, G.~Felder, and J.~Fr\"{o}hlich.
\newblock On the critical properties of the {Edwards} and the self-avoiding
walk model of polymer chains.
\newblock {\em Nucl. Phys. B}, {\bf 230} [FS10]:119--147, (1984).
\bibitem{BGW92}
R.~Brak, A.J. Guttmann, and S.G. Whittington.
\newblock A collapse transition in a directed walk model.
\newblock {\em J. Phys. A: Math. Gen.}, {\bf 25}:2437--2446, (1992).
\bibitem{BOP93}
R.~Brak, A.L. Owczarek, and T.~Prellberg.
\newblock A scaling theory of the collapse transition in geometric cluster
models of polymers and vesicles.
\newblock {\em J. Phys. A: Math. Gen.}, {\bf 26}:4565--4579, (1993).
\bibitem{BEI92}
D.~Brydges, S.N. Evans, and J.Z. Imbrie.
\newblock Self-avoiding walk on a hierarchical lattice in four dimensions.
\newblock {\em Ann. Probab.}, {\bf 20}:82--124, (1992).
\bibitem{CPP94}
S.~Caracciolo, G.~Parisi, and A.~Pelissetto.
\newblock Random walks with short-range interaction and mean-field behavior.
\newblock Preprint, (1994).
\bibitem{FP78}
J.~Fr\"{o}hlich and Y.M. Park.
\newblock Correlation inequalities and the thermodynamic limit for classical
and quantum continuous systems.
\newblock {\em Commun. Math. Phys.}, {\bf 59}:235--266, (1978).
\bibitem{GH93}
A.~Greven and F.~den Hollander.
\newblock A variational characterization of the speed of a one-dimensional
self-repellent random walk.
\newblock {\em Ann. Appl. Probab.}, {\bf 3}:1067--1099, (1993).
\bibitem{IM92}
D.~Iagolnitzer and J.~Magnen.
\newblock Polymers in a weak random potential in dimension four: rigorous
renormalization group analysis.
\newblock To appear in {\em Commun.\ Math.\ Phys.}
\bibitem{Kenn94}
T.~Kennedy.
\newblock Ballistic behavior in a $1$-$d$ weakly self-avoiding walk with
decaying energy penalty.
\newblock Preprint, (1994).
\bibitem{Lawl91}
G.F. Lawler.
\newblock {\em Intersections of Random Walks}.
\newblock Birkh\"{a}user, Boston, (1991).
\bibitem{LeGa85}
J.-F. Le~Gall.
\newblock Sur le temps local d'intersection du mouvement brownien plan et la
methode de renormalization de {Varadhan}.
\newblock In J.~Az\'{e}ma and M.~Yor, editors, {\em S\'{e}minaire de
Probabilit\'{e}s XIX. Lecture Notes in Mathematics \#1123}, Berlin, (1985).
Springer.
\bibitem{LeGa86}
J.-F. Le~Gall.
\newblock Propri\'{e}t\'{e}s d'intersection des marches al\'{e}atoires {I}.
{Convergence} vers le temps local d'intersection.
\newblock {\em Commun. Math. Phys.}, {\bf 104}:471--507, (1986).
\bibitem{LeGa94}
J.-F. Le~Gall.
\newblock Exponential moments for the renormalized self-intersection local time
of planar {Brownian} motion.
\newblock To appear in {\em S\'eminaire de Probabilit\'{e}s XXVIII, Springer
Lecture Notes in Mathematics}, (1994).
\bibitem{Oono75}
Y.~Oono.
\newblock On the divergence of the perturbation series for the excluded-volume
problem in polymers.
\newblock {\em J. Phys. Soc. Japan}, {\bf 39}:25--29, (1975).
\bibitem{Oono76}
Y.~Oono.
\newblock On the divergence of the perturbation series for the excluded-volume
problem in polymers. {II}. {Collapse} of a single chain in poor solvents.
\newblock {\em J. Phys. Soc. Japan}, {\bf 41}:787--793, (1976).
\bibitem{Perk82}
E.~Perkins.
\newblock Weak invariance principles for local time.
\newblock {\em Z. Wahrsch. verw. Gebiete}, {\bf 60}:437--451, (1982).
\bibitem{Rose84}
J.~Rosen.
\newblock Self-intersections of random fields.
\newblock {\em Ann. Probab.}, {\bf 12}:108--119, (1984).
\bibitem{Rose90}
J.~Rosen.
\newblock Random walks and intersection local time.
\newblock {\em Ann. Probab.}, {\bf 18}:959--977, (1990).
\bibitem{Schm90}
U.~Schmock.
\newblock Convergence of the normalized one-dimensional {Wiener} sausage path
measures to a mixture of {Brownian} taboo processes.
\newblock {\em Stochastics and Stochastic Reports}, {\bf 29}:171--183, (1989).
\bibitem{Spit76}
F.~Spitzer.
\newblock {\em Priciples of Random Walk}.
\newblock Springer, New York, 2nd edition, (1976).
\bibitem{Stol89}
A.~Stoll.
\newblock Invariance principles for {Brownian} intersection local time and
polymer measures.
\newblock {\em Math. Scand.}, {\bf 64}:133--160, (1989).
\bibitem{Szni91}
A.-S. Sznitman.
\newblock On the confinement property of two-dimensional {Brownian} motion
among {Poissonian} obstacles.
\newblock {\em Commun. Pure Appl. Math.}, {\bf 44}:1137--1170, (1991).
\bibitem{Vara69}
S.R.S. Varadhan.
\newblock Appendix to: {Euclidean} quantum field theory, by {K.}\ {Symanzik}.
\newblock In R.~Jost, editor, {\em Local Quantum Field Theory}, New York,
(1969). Academic Press.
\bibitem{West80}
J.~Westwater.
\newblock On {Edwards'} model for long polymer chains.
\newblock {\em Commun. Math. Phys.}, {\bf 72}:131--174, (1980).
\bibitem{West82}
J.~Westwater.
\newblock On {Edwards'} model for long polymer chains {III}. {Borel}
summability.
\newblock {\em Commun. Math. Phys.}, {\bf 84}:459--470, (1982).
\bibitem{West85}
J.~Westwater.
\newblock On {Edwards'} model for long polymer chains.
\newblock In S.~Albeverio and P.~Blanchard, editors, {\em Trends and
Developments in the Eighties. {Bielefeld} Encounters in Mathematical Physics
{IV/V}}. World Scientific, Singapore, (1985).
\bibitem{Zola87}
H.~Zoladek.
\newblock One-dimensional random walk with self-interaction.
\newblock {\em J. Stat. Phys.}, {\bf 47}:543--550, (1987).
\end{thebibliography}
\bibliographystyle{plain}
\end{document}