\documentclass[12pt`]{article}
\newcommand{\Int}{\mathop{\mathrm{Int}}\nolimits}
\newcommand {\Ebar} {{\mbox{\rm$\mbox{I}\!\mbox{E}$}}}
\newcommand {\Rbar} {{\mbox{\rm$\mbox{I}\!\mbox{R}$}}}
\newcommand {\Hbar} {{\mbox{\rm$\mbox{I}\!\mbox{H}$}}}
\newcommand {\Nbar} {{\mbox{\rm$\mbox{I}\!\mbox{N}$}}}
%\newcommand {\Cbar} {{\mbox{\rm$\mbox{I}\!\mbox{C}$}}}
\newcommand {\Cbar}
{\mathord{\setlength{\unitlength}{1em}
\begin{picture}(0.6,0.7)(-0.1,0)
\put(-0.1,0){\rm C}
\thicklines
\put(0.2,0.05){\line(0,1){0.55}}
\end {picture}}}
\newcommand{\Cox}{{\hspace*{\fill}\rule{2mm}{2mm}\linebreak}}
\newsavebox{\zzzbar}
\sbox{\zzzbar}
{\setlength{\unitlength}{0.9em}
\begin{picture}(0.6,0.7)
\thinlines
% \put(0,0){\framebox(0.6,0.7){}}
\put(0,0){\line(1,0){0.6}}
\put(0,0.75){\line(1,0){0.575}}
\multiput(0,0)(0.0125,0.025){30}{\rule{0.3pt}{0.3pt}}
\multiput(0.2,0)(0.0125,0.025){30}{\rule{0.3pt}{0.3pt}}
\put(0,0.75){\line(0,-1){0.15}}
\put(0.015,0.75){\line(0,-1){0.1}}
\put(0.03,0.75){\line(0,-1){0.075}}
\put(0.045,0.75){\line(0,-1){0.05}}
\put(0.05,0.75){\line(0,-1){0.025}}
\put(0.6,0){\line(0,1){0.15}}
\put(0.585,0){\line(0,1){0.1}}
\put(0.57,0){\line(0,1){0.075}}
\put(0.555,0){\line(0,1){0.05}}
\put(0.55,0){\line(0,1){0.025}}
\end{picture}}
\newcommand{\Zbar}{\mathord{\!{\usebox{\zzzbar}}}}
\newcommand{\Zzbar}
{\mathord{\!\setlength{\unitlength}{0.9em}
\begin{picture}(0.6,0.7)
\thinlines
\put(0,0){\line(1,0){0.6}}
\put(0,0.75){\line(1,0){0.575}}
\multiput(0,0)(0.0125,0.025){30}{\rule{0.3pt}{0.3pt}}
\multiput(0.2,0)(0.0125,0.025){30}{\rule{0.3pt}{0.3pt}}
\put(0,0.75){\line(0,-1){0.15}}
\put(0.015,0.75){\line(0,-1){0.1}}
\put(0.03,0.75){\line(0,-1){0.075}}
\put(0.045,0.75){\line(0,-1){0.05}}
\put(0.05,0.75){\line(0,-1){0.025}}
\put(0.6,0){\line(0,1){0.15}}
\put(0.585,0){\line(0,1){0.1}}
\put(0.57,0){\line(0,1){0.075}}
\put(0.555,0){\line(0,1){0.05}}
\put(0.55,0){\line(0,1){0.025}}
\end{picture}}}
\renewcommand{\theequation}{\thesection.\arabic{equation}}
\newtheorem{lemma}{Lemma}[section]
\newtheorem{prop}{Proposition}[section]
\newtheorem{thm}{Theorem}[section]
\newtheorem{definition}{Definition}[section]
\newtheorem{corollary}{Corollary}[section]
\newtheorem{example}{Example}[section]
\newcommand{\Z}{\Zbar}
\newcommand{\R}{\Rbar}
\newcommand{\N}{\Nbar}
\newcommand{\C}{\Cbar}
\newcommand{\Q}{\Qbar}
\newcommand{\h}{\ensuremath{\mathcal{H} }}
\newcommand{\p}{\ensuremath{\mathcal{P} }}
\newcommand{\s}{\ensuremath{\mathcal{S} }}
%\newcommand{\a}{\ensuremath{\alpha}}
\newcommand{\A}{\ensuremath{\mathcal{A} }}
\newcommand{\F}{\ensuremath{\mathcal{F} }}
%\newcommand{\L}{\ensuremath{\mathcal{L} }}
\newcommand{\E}{\Ebar}
\begin{document}
\setlength{\textheight}{21cm}
\title{{\bf On the definition of entropy production,\\ via
examples}}
\author{{\bf Christian
Maes}\thanks{Onderzoeksleider FWO, Flanders. \ Email:
Christian.Maes@fys.kuleuven.ac.be },
\, {\bf Frank
Redig}\thanks{Post-doctoraal Onderzoeker FWO, Flanders. \ Email:
Frank.Redig@fys.kuleuven.ac.be }, \,{\bf Annelies Van
Moffaert}\thanks{Aspirant FWO, Flanders. \ Email:
Annelies.VanMoffaert@fys.kuleuven.ac.be } \\ Instituut voor
Theoretische Fysica,
\\ K.U.Leuven, B-3001 Leuven, Belgium.}
\maketitle
\begin{abstract} We present a definition of entropy production rate
for classes of deterministic and stochastic dynamics. The point of
departure is a Gibbsian representation of the steady state
pathspace measure for which `the density' is determined with
respect to the time-reversed process. The Gibbs formalism is used
as a unifying algorithm capable of incorporating basic properties
of entropy production in nonequilibrium systems. Our definition is
motivated by recent work on the Gallavotti-Cohen (local)
fluctuation theorem and it is illustrated via a number of examples.
\end{abstract}
\vspace{3mm}
\noindent
%{\bf Keywords:} fluctuation theorem, large deviations,
%nonequilibrium, Gibbs states, entropy production rate.
%\renewcommand{\baselinestretch}{2}\normalsize
\section{Introduction.}
One of the more obscure concepts in nonequilibrium statistical
physics is that of entropy and its production. While most people
adhere to the standard textbook-formulation in the case of close to
equilibrium processes, opinions start seriously deviating
concerning the appropriate entropy-concepts when confronted with
far from equilibrium steady states.\\ The question is of course in
the first place not a mathematical one, but rather conceptual. The
field of nonequilibrium thermodynamics is still under construction
and while various mathematically precise definitions make a lot of
sense {\it a priori}, it needs to be seen how these definitions
relate to the phenomena. Yet, in the tradition of mathematical
physics, \cite{Gaa}, a mathematical treatment of various examples
or models hopefully clarifies the situation and may enable an
algorithmic and constructive setup, available and testable also in
cases when the correct physical intuition is not immediately
available.
In the last years, a lot of interest was generated in understanding
`entropy production' (or, entropy creation rate) in far from
equilibrium steady states. In the context of certain dynamical
systems, thermostatted systems in particular, entropy production
appears as synonymous with `phase space contraction rate.' In fact
under certain assumptions, one proves that the change of Shannon
entropy in the steady state exactly equals (minus) that contraction
rate, \cite{Ru1,Ru2,Ru4}. The Gallavotti-Cohen theorem states a
symmetry in the probability of fluctuations of this entropy
creation rate, and while proven only for a limited class of systems
(\cite{gc,gc1}), has been observed in computer simulations in a
variety of models, see e.g. the motivating experiment \cite{ECM}.
In all examples, the physical interpretation as entropy creation
rate seems to be confirmed.\\ In \cite{M} another approach to the
definition of entropy production appeared. There one considers the
pathspace measure in the steady state and compares it (locally)
with the pathspace measure of the time-reversed process. If such a
comparison can be made, the relative density can be defined (as
corresponding Radon-Nikodym derivative). This density can be
written as an exponential of a `relative energy' for an interaction
governing the pathspace measure with respect to the time-reversal
transformation. This `relative energy' is extensive and only
contains the space-time interactions that break the time-reversal.
Its density is what we call the entropy production. Under various
conditions, by the very nature of its `Gibbsian' definition, it
satisfies a fluctuation theorem. It is interesting to see that it
coincides with the definition given in the Gallavotti-Cohen setup
when indeed the dynamics satisfies the conditions of their theorem.
But it also gives a unifying description for more general dynamics
if, at least for the purpose of computing macroscopic properties,
the space-time trajectories are distributed via the appropriate
(space-time) Gibbs measure. In particular, via this one and the
same algorithm the entropy action functionals for every example of
stochastic dynamics appearing in \cite{LS,K} can be computed.\\ The
goal of this paper is to continue and to extend the analysis of
\cite{M} mostly via specific illustrative examples. The main
result is that it works: both theoretical considerations as well as
exact results for examples of far from equilibrium steady state
dynamics confirm our definition and the Gibbsian picture which is
behind it. The material we work with here is mostly taken from
\cite{LS} for the examples of stochastic dynamics, and for
deterministic dynamics we mainly restrict us to the
Gallavotti-Cohen setup.\\
The plan of the paper: We start in the
next section with our Gibbsian setup and with an abstract
definition of entropy production. It is then specified to the
context of interacting particle systems, both stochastic and
deterministic. For stochastic dynamics we consider
spinflip, particle exchange and
diffusion processes. There is also an example of a molecular
motor. The discussion on deterministic dynamics is more
descriptive as we limit ourselves mostly to explaining the relation
of the Gallavotti-Cohen work with the Gibbsian setup. The last
section is devoted to a brief discussion of the transient (not
steady state) regime and the mathematics behind the so-called
nonequilibrium work relations.\\ By the nature of the presented
material, the reader will be confronted at the same time with
elements of the Gibbs formalism, interacting particle systems,
stochastic calculus, dynamical systems and thermodynamics. We have
tried to make the sections more or less self-contained and we have
added many references to refer to for background information.
\section{Definition and main properties of entropy production}
\setcounter{equation}{0}
\subsection{Pathspace measure: Gibbsian setup}
We start with an informal description of the main algorithm used in
the identification of the entropy production.\\ Suppose a system
composed of many locally interacting components in a nonequilibrium
steady state. The pathspace measure gives the microscopic
distribution of the trajectories of the system compatible with the
macroscopic information that is available. We take as hypothesis
that for the purpose of computing the macroscopic properties of the
system, this pathspace measure in fact defines a Gibbs state on the
space-time configurations. For the moment, we forget how this
Gibbsian description is obtained but we will come back to this in a
remark below.\\ For the sake of simplicity let us in fact consider
the case where we are dealing with a Gibbs state for a lattice spin
system, see \cite{Geo,EFS,Sim} for background. The configurations
$\sigma$ are then elements of $\Omega
= G^{\Z^{d+1}}$ where $G$, the single spin space, is finite and
$\sigma=(\sigma(n,i), n\in \Z, i \in \Z^d)$ is a space-time
trajectory for $n=$ discrete time and $i$ a spatial coordinate on
the $d-$dimensional lattice $\Z^d$. Physically, it is better to
replace the infinite space-time lattice $\Z^{d+1}$ by a huge
space-time box $W$, possibly with appropriately chosen boundary
conditions but what follows is easily adapted to that case.
Furthermore, we have a family of macroscopic variables, formally,
\begin{equation} \label{macro}
H^\alpha = \sum_A U_A^\alpha, \alpha=0,\ldots,m
\end{equation}
each additive in `space-time potentials' $U_A^\alpha$ for which we
assume that $U_A^\alpha$ is a real-valued function only depending on
the configuration in the finite set $A$, summable according to
\begin{equation}\label{summable}
\sum_{A \ni x} \sup_\sigma |U_A(\sigma)| < +\infty, x\in \Z^{d+1}.
\end{equation}
Again, this condition is not strictly needed but it is the simplest
choice for making sense of the differences
\begin{equation}\label{differ}
H^\alpha(\sigma) -H^\alpha(\eta) =
\sum_{A\cap\Lambda\neq\emptyset}[U^\alpha_A(\sigma) -
U^\alpha_A(\eta)]
\end{equation}
when $\sigma, \eta\in \Omega$ coincide outside a finite set $\Lambda
\subset \Z^{d+1}$. Moreover, thanks to the additive structure,
we immediately get that (\ref{differ}) equals
\begin{equation}
\sum_{x\in \Lambda} \sum_{A\ni x, A\subset \Lambda} \frac
1{|A|}[U^\alpha_A(\sigma)-U^\alpha_A(\eta)]
\end{equation}
up to boundary terms $O(|\partial \Lambda|)$ when $\Lambda$ is a
regular, say cubic-like, region.
In the same way, we will be demanding
translation invariance even though this is
negotiable for some of what follows. In fact,
it is good to split translations in spatial and time-like translations.
We denote
the translation over a lattice vector $x\in \Z^{d+1}$
by $\tau_x$ and the translation
is time-like if $x=(n,0)$
for some $n\in \Z$, space-like if $x=(0,i)$ for some $i\in \Z^d$.
Since we are considering a system
in its steady state, the condition that the potential
(and the pathspace measure) is
time-like translation invariant is a natural requirement, but the
space-like invariance is often broken. All the same, we wish to
continue with the simplest setup
and global translation invariance is part of this: $U_A(\tau_x \sigma)=
U_{A+x}(\sigma), where \tau_x\sigma(y) = \sigma(x+y)$.\\
Gibbs states corresponding to the
ensemble defined via (\ref{macro}) are probability measures $\mu$ on
$\Omega$ whose conditional distributions to find the trajectory
$\sigma$ in an arbitrary finite set
$V\subset \Z^{d+1}$ when the configuration
$\sigma$ is given outside $V$ is
\begin{equation}
\mu[\sigma \mbox{ on } V|\sigma \mbox{ on } V^c]=
\frac 1{Z_V(\sigma)}
\exp[-\sum_\alpha E_\alpha \sum_{A\cap V \neq \emptyset} U_A^\alpha(\sigma)].
\end{equation}
Here, $Z_V(\sigma)$ is the partition function depending on the
values $\sigma(x), x\in V^c$ and the $E_\alpha$ are conjugate
variables. As a reference measure we have taken the product of
counting measures on $G$.\\ Suppose now that we have a
transformation $\theta$ on $G$ for which $\theta^2=1$ and which
leaves invariant the counting measure on $G$. We fix a sequence of
increasing space-time cubes $\Lambda_{N,L} =
\{x=(n,i)\in \Z^{d+1}, |n| \leq N, |i| \leq L\}$. We can then
define the transformations $\pi_{\Lambda_{N,L}}=\pi_{N,L}$ on $\Omega$ for which
\begin{equation}
(\pi_{N,L}\sigma)(n,i) = \sigma^{N,L}(n,i)=\theta(\sigma(-n,i))
\end{equation}
when $(n,i)\in \Lambda_{N,L}$ and $(\pi_{N,L}\sigma)(x)=\sigma(x), x\in \Lambda_{N,L}^c$.
The time-reversal transformation $\pi$ is then defined on local
observables (functions) as
\begin{equation}\label{timere}
f\pi(\sigma)= f\circ \pi(\sigma) = f(\pi_{N,L}\sigma)
\end{equation}
where for the local $f$ with dependence set ${\cal D}_f$
(i.e. the smallest set $A$ so that $f(\sigma)=f(\sigma(x),x\in A)$)
$\Lambda_{N,L}$ is the smallest box containing ${\cal D}_f$.\\
We assume that $U^0_A(\pi_{N,L}\sigma)=U_A^0(\sigma)$ whenever
$A\subset \Lambda_{N,L}$. In other words, $H^0$ in (\ref{macro}) is put as a reference
`action' or `Hamiltonian' which is invariant under the time-reversal transformation
$\pi$. One should think of the Gibbs measure $\nu \sim \exp[-E_0H^0]$ as the unperturbed
equilibrium steady state. The amplitudes $E_\alpha$ should correspond to gradients
of intensive variables each conjugate to time-reversal breaking
macro-variables
$H^\alpha, \alpha=1,\ldots,m$ and they determine for each of the $m$ considered mechanisms
the amount of driving away from equilibrium.\\
Associated to the transformation $\pi_{N,L}$ there is the
Gibbs measure $\pi_{N,L}\mu$ defined via
\begin{equation}
\pi_{N,L}\mu(f) = \mu(f\circ \pi_{N,L})
\end{equation}
for an observable $f$ on $\Omega$.
In the present context, most important for
us is that the measure $\pi_{N,L}\mu$ has a density
with respect to the original pathspace measure $\mu$:
\begin{equation} \label{crux}
\frac {d(\pi_{N,L}\mu)}{d\mu} =e^{-R_{N,L}}.
\end{equation}
This is
automatically so for Gibbs measures but the full glory of Gibbsian states is not needed
as we only require in (\ref{crux}) the existence of a density corresponding to the
transformations $\pi_{N,L}$. In a more formal way (which does make sense however for
a finite space-time lattice $W=\Lambda_{N,L}$), what we are doing is comparing the
plausibility or weight $\mu[\sigma]$ in the pathspace measure $\mu$
of a trajectory $\sigma$ of the system
with the weight $\mu[\sigma^{N,L}]$ where $\sigma^{N,L}(n,i)= (\theta\sigma)(-n,i)$.
The condition (\ref{crux}) then amounts to asking that
$\mu[\sigma^{N,L}] = 0$ whenever $\mu[\sigma]=0$ with well-defined
ratio
\begin{equation}\label{crux1}
\frac{\mu[\sigma^{N,L}]}{\mu[\sigma]} = e^{-R_{N,L}}.
\end{equation}
At the risk of generating confusion, we will say that the pathspace
measure $\mu$ (or, the dynamics generating it via its steady state
statistics) is {\it microscopically reversible} if there exists a
transformation $\theta$ for which this property (\ref{crux}) or
(\ref{crux1}) is satisfied. (This should not be confused with the
condition of detailed balance that we will meet later and which
will make $R_{N,L}$ a boundary term, i.e. of order $|\partial
\Lambda_{N,L}|$.)\\
Continuing with our Gibbsian setup three properties of
$R_{N,L}$ are immediate.
\begin{prop}\label{prop1:Gibbs}
Given the above hypothesis for a translation invariant Gibbs
measure $\mu$,
\begin{itemize}
\item
(Extensivity) There exists a continuous function $\dot{s}$
(constructed in (\ref{cur}) below) so that (with
$\dot{s}_x=\dot{s}\circ \tau_x$)
\begin{equation}\label{ext}
R_{N,L} = \sum_{x\in\Lambda_{N,L}} \dot{s}_x + O(NL^{d-1} +
L^d)
\end{equation}
with order $|O|/N \leq cL^{d-1}$ if $L\leq N$.
\item (Symmetry) Let $P_{N,L}$ denote the law of $R_{N,L}$ as induced
by $\mu$ and let $\tilde{P}_{N,L}$ denote the law of $-R_{N,L}$. Then,
\begin{equation}
\frac{d\tilde{P}_{N,L}}{dP_{N,L}}(y) = e^{-y}.
\end{equation}
In particular, for each complex number $z\in \C$,
\begin{equation}\label{sym}
\mu(e^{-zR_{N,L}}) = \mu(e^{-(1-z)R_{N,L}}).
\end{equation}
\item
(Positive expectation)
\begin{equation}
\mu(R_{N,L}) \geq 0
\end{equation}
with equality only if $R_{N,L}$ is zero $\mu-$almost surely.
\end{itemize}
\end{prop}
\medskip\noindent
{\bf Proof:} The extensivity is a standard result of the Gibbs
formalism. With the definition
\begin{equation} \label{cur}
\dot{s} = \sum_{A\ni 0} \frac 1{|A|}
\sum_{\alpha=1}^m E_\alpha [U^\alpha_A\pi-U^\alpha_A]
\end{equation}
it is readily checked, using (\ref{differ}) (see also \cite{M}), that
\begin{equation}
R_{N,L} = \sum_{A\cap\Lambda_{N,L}\neq \emptyset}
\sum_\alpha E_\alpha [U_A^\alpha \pi_{N,L} - U_A^\alpha]
\end{equation}
can be written as a sum over the $x\in \Lambda_{N,L}$ of
translations of $\dot{s}$ (as in (\ref{ext})) up to boundary terms
of order $\partial
\Lambda_{N,L}$. \\
The symmetry is a consequence of (\ref{crux}) and $R_{N,L}\pi_{N,L}=-R_{N,L}$.
Furthermore, there is the trivial identity
\begin{equation}
\mu(e^{-(1-z)R_{N,L}})= \pi_{N,L}\mu (e^{zR_{N,L}}) = \mu(e^{-zR_{N,L}}).
\end{equation}
Finally, since of course $\mu(e^{-R_{N,L}})=1$, the positivity of the
expectation value follows from Jensen's inequality. (Remark: in fact,
all moments of $R_{N,L}$ have non-negative expectation as is easy to
find from the symmetry (\ref{sym}) in its characteristic function.)
$\Cox$
\medskip\noindent
We define the {\bf entropy production} (creation rate) as the
expectation value of $\dot{s}$:
\begin{equation} \label{rate}
\mu(\dot{s}) = \sum_{\alpha=1}^m E_\alpha \mu(J^\alpha)
\end{equation}
where the currents $J^\alpha$ are
\begin{equation}
J^\alpha=\sum_{A\ni 0} \frac 1{|A|}
[ U_A^\alpha \pi - U_A^\alpha].
\end{equation}
The rest of the paper is devoted to explaining and motivating this
definition. This will be done as follows. We first list some of
the properties of our definition in the present setup. This is
repeated in a somewhat more abstract setting in the next subsection
which however can guide us in many different cases. We then
illustrate it via examples.\\ (\ref{rate}) attempts to define the
entropy production directly as bilinear in thermodynamic forces
$E_\alpha$ and fluxes $J^\alpha$ and it inherits the properties of
$R_{N,L}$ in Proposition \ref{prop1:Gibbs}:
\begin{prop}\label{prop2:Gibbs}
Under the hypotheses above for a translation invariant Gibbs measure
$\mu$,
\begin{itemize}
\item
\begin{equation}
\mu(\dot{s})\geq 0
\end{equation}
with equality only if $\dot{s}=0$ $\mu-$almost surely (in which case
$\mu\pi$ and $\mu$ must be Gibbs measures with respect to the same
(physically equivalent) potential $(U_A\pi)\sim(U_A)$.
\item
If $\mu$ is extremal, then
\begin{equation}\label{lln}
\mu(\dot{s}) = \lim_{N,L} \frac 1{|\Lambda_{N,L}|} R_{N,L}
\end{equation}
$\mu-$almost surely.
\item
Let $Q_{N,L}$ denote the law of $\sum_{x\in \Lambda_{N,L}}
\dot{s}_x/|\Lambda_{N,L}|$ and let $\tilde{Q}_{N,L}$ denote the law of
$-\sum_{x\in \Lambda_{N,L}}
\dot{s}_x/|\Lambda_{N,L}|$. Then,
\begin{equation}
\label{reader}
\lim_{N,L} \frac
1{|\Lambda_{N,L}|}\ln\frac{d\tilde{Q}_{N,L}}{dQ_{N,L}}(y)=-y.
\end{equation}
In particular, the
limit
\begin{equation}\label{gk}
p(\lambda,E) = -\lim_{N,L} \frac
1{|\Lambda_{N,L}|}\ln\mu(e^{-\sum_{\alpha,x\in\Lambda_{N,L}} \lambda_\alpha J^\alpha_x})
\end{equation}
exists and equals $p(2E-\lambda,E)$ for all
$\Lambda=(\lambda_1,\ldots,\lambda_m), E=(E_1,\ldots,E_m) \in \R^m$.
\end{itemize}
\end{prop}
\medskip\noindent
{\bf Proof:} (\ref{lln}) is a consequence of the law of large numbers using that
$\mu$ is a translation invariant extremal Gibbs state.
The rest of the proof was already given in \cite{M}.$\Cox$
\medskip\noindent
The symmetry of (\ref{gk}) for the generating function of $\dot{s}$
gives rise to relations between correlation functions when
$p(\lambda,E)$ is differentiable with respect to $\lambda$ and $E$.
Most important is the following version of the Green-Kubo formula (also already proven in
\cite{M})
\begin{corollary}
\begin{equation}\label{green}
L_{\alpha\gamma}=\frac {\partial}{\partial
E_\alpha}\mu(\frac{\partial}{\partial E_\gamma} \dot{s})(E=0) =
\sum_x \nu(J^\gamma J^\alpha_x).
\end{equation}
\end{corollary}
Obviously, ({\ref{green}) is (Onsager-) symmetric in interchanging
$\alpha$ with $\gamma$.
{\bf Remark: } As far as we see there are two main approaches
connecting the Gibbs formalism with nonequilibrium statistical
mechanics. The first one is easiest to grasp for the `mechanically
inclined.' It is obtained by the explicit construction of the
pathspace measure of various dynamics as a Gibbs measure. In fact
(and fortunately), the full identification of the interaction is
not needed for our purposes as we are only interested in a specific
`relative energy' (as in (\ref{crux}-\ref{crux1})). The second
approach is more familiar to the `statistically inclined.' It
involves identifying Gibbs measures as solutions of the maximum
entropy principle, given space-time information on macroscopic
variables such as steady state currents. It does not build the
pathspace measure from the microscopic dynamics but instead tries
to predict macroscopic behavior from a Gibbs measure on the
space-time trajectories which is statistically compatible with
certain experimentally available data. We refer to the method of
\cite{Ja} and to specific examples (in the quantum domain) in e.g.
\cite{an1}. In this paper, we deal exclusively with the first
approach. It should however be realized that, while the first
approach works equally well for systems containing only few degrees
of freedom, the stone-wall character of the laws of irreversible
thermodynamics can only be expected for systems containing a large
number of degrees of freedom.
\subsection{Abstract definition}
\label{subsec2.2}
Here we start from an abstract probability space $(\Omega,{\cal
F},\mu)$. The set $\Omega$ has to be thought of as the set of
discrete or continuous time paths of a particle system. Next we
consider an index set ${\cal S}, \leq$ equipped with the partial
order $\leq$, and an increasing family of sigma-fields $\{{\cal F}_s,
s\in {\cal S}\}, {\cal F}_s \subset {\cal F}, \forall s \in {\cal
S}$, with ${\cal F} =\sigma (\cup_{s\in{\cal S}} {\cal F}_s )$.
Typically ${\cal S}$ has to be thought of as some set of
increasing space-time windows, e.g.
\[
{\cal S} = \{[0,T], T\geq 0\}
\]
or
\[
{\cal S}=\{V\times [0,T], T\geq 0, V\subset \Z^d (\mbox{ finite })\}
\]
ordered by inclusion.
Consider a set of transformations $\{\pi_s, s\in {\cal S}\}$,
indexed by ${\cal S}$ such that the following four conditions are
satisfied:
\begin{itemize}
\item
(i) $\pi_s:\Omega\rightarrow \Omega$ is ${\cal F}_s$ measurable for
all $s\in {\cal S}$, i.e., $\pi_s^{-1} (A) \in {\cal F}_s$ for all
$A\in {\cal F}_s$.
\item
(ii) $\forall s\in {\cal S}, \forall \omega \in \Omega, \pi_s\circ
\pi_s(\omega) =\omega$
\item
(iii) $\forall s\in {\cal S}, \pi_s \mu$ is absolutely
continuous with respect to $\mu$.
\item
(iv)$\forall t>s$, $\forall \ {\cal F}_s$ measurable $f$:
$\E(f\circ\pi_t|{\cal F}_s ) = f\circ\pi_s$
\end{itemize}
The transformation $\pi_s$ has to be thought of as the time-reversal
transformation in the window indexed by $s\in {\cal S}$. \\
By condition (iii) above, we can write the Radon-Nikodym derivative
\begin{equation}
\frac{d(\pi_s\mu)}{d\mu} = e^{-R_s}
\end{equation}
and we immediately obtain, by the same proof, the analogue of
Proposition \ref{prop1:Gibbs}:
\begin{prop}\label{prop3:Gibbs}
For all complex numbers $z\in \C$,
\begin{equation}
\mu(e^{-zR_s}) = \mu(e^{-(1-z)R_s})
\end{equation} and $\mu(R_s)\geq 0$ with equality iff $R_s=0$ $\mu-$
almost surely.
\end{prop}
In the examples that follow we will always identify the function
$R_s$ by means of a Girsanov formula. The procedure is always to
refer to a time-reversal invariant process $\nu$ and to compute the
ratio
\begin{equation}
\frac{d\mu}{d\nu}(\pi_s(\omega)) / \frac{d\mu}{d\nu}(\omega) =
e^{-R_s(\omega)}.
\end{equation}
Since we have not insisted on a natural notion of
spatial translations
in our abstract setup
we cannot expect to obtain an analogue to (\ref{ext}) which for
statistical mechanics is of prime importance as it is related to the
extensiveness of the entropy production.
Note however that from condition (iv)
\begin{equation}
M_t:= \E [ \frac{d(\pi_t\mu)}{d\mu} |{\cal F}_t ]
\end{equation}
is a martingale and hence $\E(-\log M_t)$ is a non-decreasing function of
time $t$ ($-\log M_t$ is a non-negative submartingale).
Most of the time, there will be a natural choice for
an increasing function $\alpha:{\cal S}\rightarrow \R_+$ being a suitable
normalization in the sense that $\sup_{s\in{\cal S}} \alpha(s) =+\infty$ and the
limit (free energy functional)
\begin{equation}
p(\lambda) = -\lim_s \frac 1{\alpha(s)} \ln \mu(e^{-\lambda R_s})
\end{equation}
exists (and is non-trivial) for $\lambda\in \R$.
By Proposition \ref{prop3:Gibbs} we have
automatically $p(\lambda)=p(1-\lambda)$, see (\ref{gk}).\\
We then call
\begin{equation}
\mu(\dot{s})= \lim_s \frac 1{\alpha(s)} \mu(R_s) \geq 0
\end{equation}
the entropy production corresponding to $(\Omega,{\cal F},\mu)$ and
transformations $\pi_s$. This is the analogue of
(\ref{rate}-\ref{lln}) in the general context.\\ When we deal with
``fluctuations of entropy production", then we mean fluctuations of
the random variable $R_s/\alpha (s)$. In the identification of
$R_s$, we always want to obtain $R_s$ ``up to boundary terms". More
precisely, given a suitable normalization $\alpha$, we define
$R_s\doteq R'_s$ iff for all $\lambda >0$
\begin{equation}\label{negli}
\limsup_{s} \frac{1}{\alpha (s )} \log \int
\exp \left[ ({\lambda |R_s - R'_s|}) \right] d\mu = 0.
\end{equation}
This of course implies that $R_s $ and $R'_s$ give rise to the same
free energy functional. We also denote $\frac
{d(\pi_s \mu)}{d\mu}\doteq 1$ iff $p(\lambda )= 0$ for all
$\lambda\geq 0 $. When $R_s\doteq 0$ we say that $R_s$ is
``negligible". In this case, we say that the dynamics is
time-reversal invariant.
\section{Stochastic dynamics}
\label{Sec3}
\setcounter{equation}{0}
In this section we discuss a variety of examples. In each
particular case, from the Gibbs representation of the pathspace
measure of the stochastic process (as in (\ref{crux}-\ref{crux1}),
we can identify the entropy production by comparison with the
time-reversed process.
\subsection{ Spinflip dynamics}
Spinflip processes (SFP) are continuous time Feller processes
taking values in the space $X= \{ +1, -1\}^{\Z^d}$ of Ising spin
configurations. The spin at site $i\in\Z^d$ is flipped according to
\begin{equation}
\mbox{Prob} [\sigma_{t+dt} (i) = -\xi (i)|\sigma_t =\xi]
= c(i,\xi ) dt,
\end{equation}
where the spin flip rate $c(i,\xi)$ is assumed strictly positive,
translation invariant and local. The main difference with the automata
considered in
\cite{M} is the sequential
updating whereas the ``discreteness" of time is replaced by the event times
of a Poisson process.
The generator of a SFP is defined on local functions via
\begin{equation}
Lf(\xi) = \sum_{i \in \Zbar^d} c(i,\xi) \left( f(\xi^i) - f(\xi)
\right),
\end{equation}
where $\xi^i$ denotes the configuration obtained from $\xi$ by
flipping the spin at $i$, i.e. $\xi^i(j)=-\xi(i)$ if $ j=i$ and
$\xi^i(j)=\xi(j)$ for $j \neq i$. See \cite{Ligg} for the existence
of the process with generator $L$ and for more details on SFP. A
probability measure $\rho$ on $X$ is called invariant or stationary
if for all local $f$, $\int d\rho Lf =0$. If we start the process
from $\rho$ then we obtain of course a stationary process. We
denote by $\mu$ its pathspace measure. The spinflip rates are
satisfying the condition of detailed balance if
\begin{equation}\label{detba}
\frac{c(i,\xi)}{c(i,\xi^i)}= \exp [-(H(\xi^i)-H(\xi))],
\end{equation}
for some Hamiltonian $H$ on $X$ (see \cite{Ligg} for more details).
If the spinflip rates satisfy the condition of detailed balance,
then the Gibbs measures with respect to the Hamiltonian $H$ are
``reversible", which means that the process started at one of them
is time-reversal invariant. In order to apply the general formalism
of Section 2, we identify:
\begin{equation}
\Omega:= \Omega_{T_0,\Lambda_{L_0}}:=D([-T_0,T_0], \{-1,+1 \}^{\Lambda_{L_0}}),
\end{equation}
the set of cadlag trajectories of
spin configurations in the finite volume $\Lambda_{L_0}:=[-L_0,L_0]^d\cap\Z^d$
in the finite time interval $[-T_0,T_0]$.
Elements of $\Omega$ are denoted by $\omega,\sigma$.
The time-reversal transformation is defined in windows
$[-T,T]\times\Lambda_L$ for $T\leq T_0$ and $L\leq L_0$:
\begin{eqnarray}\label{trans}
(\pi_{T,L}\sigma)_t (i) &=& \sigma_{-t} (i) \mbox{ if } t\in [-T,T],
i\in\Lambda_L
\nonumber\\
&=& \sigma_t (i) \mbox{ otherwise}.
\end{eqnarray}
Notice that this transformation does not preserve the
right-continuity of the paths at the jumping times but we can
modify the jumps of $\pi_{T,L} (\sigma )$ making it again an
element of $\Omega$. With a slight abuse of notation we write
$\pi_{T,L}(\sigma )$ for this cadlag modification of (\ref{trans}).
\begin{lemma}\label{spinfliplemma}
\begin{equation}
\frac{d\pi_{T,L}\mu}{d\mu} = \exp (-R_{T,L}),
\end{equation}
with
\begin{equation}
R_{T,L} (\sigma) = \sum_{i\in\Lambda_L} \int_{-T}^T
\log \frac{c(i,\sigma_s)}{c(i,\sigma^i_s)} dN^i_s
+ G_{T,L} (\sigma),
\end{equation}
where \[
N^i_s = \frac{1}{2}\sum_{t \in [-T,s]} |\sigma_{t^+}(i) - \sigma_{t^-}(i) |
\]
is the basic jump process, counting the number of flips at site $i$ in the time
interval $[-T,s]$ and
$ G_{T,L} $ is negligible in the sense (\ref{negli}).
\end{lemma}
{\bf Proof:} This is a direct application of the Girsanov formula.
We introduce the time-reversal invariant reference process of
independent spinflips, i.e. the process with generator
\begin{equation}
L_0 f(\xi)=\sum_{i\in\Lambda_{L_0}} \left[f(\xi^i) - f(\xi)\right]
\end{equation}
and denote by $\mu^0_\xi$ its pathspace measure starting from $\xi
\in \{-1 ,+1\}^{\Lambda_{L_0}} $, i.e. $\sigma(t=0,.) \equiv
\sigma_{t=0} \equiv \sigma_0=\xi$. We also denote by $\mu$ the
pathspace measure of the process with generator $L$ starting from
its stationary measure $\rho$, and by $\mu_0$ the same for the
process with generator $L_0$, starting from the stationary measure $\rho_0$.
>From the Girsanov formula for point processes (see \cite{Lips} p314) we obtain
\begin{equation}
\label{Girs}
\frac{d\mu_\xi}{d\mu^0_\xi} =
\exp \left\{ \sum_i\int_{-T}^T \log c(i, \sigma_s) dN^i_s - \left[
\int_{-T}^T c(i,\sigma_s)ds -2T \right] \right\},
\end{equation}
Using time-reversal invariance of the reference process we obtain:
\begin{eqnarray}\label{bruu}
\lefteqn{ R_{L,T} = - \log \frac{d(\pi_{T,L}
\mu)}{d\mu}
} \nonumber \\
& \doteq &
- \log \frac{d(\pi_{T,L} \mu)}{d\mu_0}
+ \log \frac{d \mu}{d\mu_0}
\\
& \doteq &
\sum_{i\in \Lambda} \int_0^T \log
\frac{c(i,\sigma_s)}{c(i,\sigma^i_s)} dN^i_s %\right\}.
\end{eqnarray}
In the first step we put `$\doteq$' because the process $\mu_0$
satisfies
$ \frac{d\pi_{T,L} \mu_0}{d\mu_0}
\doteq 1$, in the last step because %$\frac{d\mu_\xi}{d\mu} \doteq
we omitted the term coming from the initial measures
($\rho$ for the process, and $\rho_0$ for the reference process).
\Cox
For the entropy production we obtain from Lemma \ref{spinfliplemma}
\begin{equation}\label{haha}
\mu(\dot{s}) = \lim_{T,\Lambda} \frac{1}{2T|\Lambda|}
\mu\left( \sum_{i\in \Lambda} \int_{-T}^T \log
\frac{c(i,\sigma_s)}{c(i,\sigma^i_s)} dN^i_s \right).
\end{equation}
Use that $N^i_s - \int_{-T}^s c(i,\sigma_t)dt $ is a martingale.
By stationarity and translation invariance of $\rho$, we obtain from
(\ref{haha})
\begin{eqnarray}
\label{entro}
\lefteqn{ \mu(\dot{s}) = \lim_{T, \Lambda} \frac{1}{2T |\Lambda|}
\mu\left( \sum_{i\in \Lambda} \int_{-T}^T \log
\frac{c(i,\sigma_s)}{c(i,\sigma^i_s)} dN^i_s \right) }
\nonumber \\
&=&
\int \rho(d\xi)
c(0, \xi) \log \frac{c(0,\xi)}{c(0,\xi^0)}.
\end{eqnarray}
If $\mu$ is time-reversal invariant, then (\ref{entro}) is zero:
this is the case when $\rho$ is a Gibbs measure with respect to the Hamiltonian
of (\ref{detba}), i.e., when
\begin{equation}
\int\rho(d\sigma ) c(x,\sigma ) [f(\sigma^x )-f(\sigma )]=0,
\end{equation}
for all $f$ local and for all $x\in\Z^d$.
{\bf Example} (cf. \cite{Ku}): Consider the one-dimensional
spinflip dynamics with the following asymmetric rates:
\begin{equation}
c(x,\sigma )= \exp (-\beta \sigma (x)\sigma (x+1 )).
\end{equation}
The invariant measure $\rho$ is the one-dimensional Ising model at
inverse temperature $\beta/2$. The process starting from $\rho$ is
not time-reversal invariant. For the entropy production we find,
after an easy calculation:
\begin{equation}
\mu(\dot{s} )= 2\beta\tanh (\frac{\beta}{2}) >0.
\end{equation}
It would be interesting to understand how this could have been
guessed from a physical interpretation of the model.
\subsection{Molecular Motor}
We illustrate our method for a simple model of a molecular motor.
This example was suggested to us by H. Wagner. Since, at the time
of writing this paper, we received the preprint \cite{JM} which
deals with a similar model, we omit giving context and motivation
and we refer to \cite{JM, Julicher, Magnasco} for details. Here,
after defining the simplest variant of the model, we proceed at
once with our algorithm.
We consider a point particle that can jump on the one-dimensional
lattice ($\Z$) under the influence of a periodic potential $U$.
This potential $U$ can take two different shapes $U_1$ and $U_2$
(see figure) and it switches between them at a rate $\Gamma$. The
state of the particle can be characterized by a couple $(x,\alpha)$
where $x$ describes the position of the particle and $\alpha\in
\{1,2\}$ specifies in which potential the particle moves. The
potential $U_2$ is symmetric and periodic with period 1
while $U_1$ has
minima of two different heights bounded by asymmetric energy
barriers. It is also periodic but with period 2.
\vspace{5mm}
\begin{picture}(300,150)(7,90)
\put(5,210){$U_1$}
\put(5,195){$U_2$}
%\put(75,230){2}
%\put(110,230){3}
%\put(143,230){4}
\thinlines
\put(30,195){\line(1,-2){30}}
\thicklines
\put(30,210){\line(1,-3){30}}
%\dashline{0.15}(2.0,0.0)(2.0,2.0)
%\dottedline{3}(60,195)(60, 110)
\thinlines
\put(60,135){\line(1,2){30}}
\thicklines
\put(60,120){\line(1,2){30}}
\thinlines
\put(90,195){\line(1,-2){30}}
\thicklines
\put(90,180){\line(1,-1){30}}
%\dottedline{3}(120,195)(120, 120)
\thinlines
\put(120,135){\line(1,2){30}}
\thicklines
\put(120,150){\line(1,2){30}}
\thinlines
\put(150,195){\line(1,-2){30}}
\thicklines
\put(150,210){\line(1,-3){30}}
\thinlines
%\dottedline{3}(180,195)(180, 120)
\put(180,135){\line(1,2){30}}
\thicklines
\put(180,120){\line(1,2){30}}
\thinlines
\put(210,195){\line(1,-2){30}}
\thicklines
\put(210,180){\line(1,-1){30}}
\thinlines
%\dottedline{3}(240,195)(240, 120)
\put(240,135){\line(1,2){30}}
\thicklines
\put(240,150){\line(1,2){30}}
\thinlines
\put(270,195){\line(1,-2){30}}
\thicklines
\put(270,210){\line(1,-3){30}}
\thinlines
\put(30,135){\vector(1,0){280}}
%(300,135)
%\put(300,132){$\rightarrow$}
\put(320,135){$\Zbar$}
\put(60,110){1}
\put(120,110){2}
\put(180,110){3}
\put(240,110){4}
\put(300,110){5}
\end{picture}
\vspace{4mm}
To be concrete, let us put for $n \in \Zbar$, $U_2(n)=0$,
$U_1(2n)=\epsilon$ $= -U_1(2n+1)$. The asymmetry of the barriers
implies that in $U_1$ the rates for jumping to the right are
different from those for jumping to the left. The transitions occur
at the rates:
\begin{equation}
\begin{array}{l}\label{RATES}
r\left( (2n+1,1) \rightarrow (2n+2,1) \right) = \gamma_1 \\
r\left( (2n+1,1) \rightarrow (2n,1) \right) = \delta_1 \\
r\left( (2n,1) \rightarrow (2n+1,1) \right) = \gamma_2 \\
r\left( (2n,1) \rightarrow (2n-1,1) \right) = \delta_2 \\
r\left( (x,1) \rightarrow (x,2) \right) = \Gamma \\
r\left( (x,2) \rightarrow (x,1) \right) = \Gamma \\
r\left( (x,2) \rightarrow (x \pm 1,2) \right) = \gamma.
\end{array}
\end{equation}
In the figure above we have taken
$\delta_1 < \gamma_1 < \gamma_2 < \delta_2$. It is the combination of the energy
difference and the height of the energy barrier between adjacent minima
that determines the rates
in a concrete model.
We suppose that
\begin{equation}
\label{delta2}
\frac{\gamma_2}{\delta_1} = \frac{\delta_2}{\gamma_1}=
\mbox{e}^{2\epsilon/T_s}
\end{equation}
for thermal transitions at the temperature $T_s$ in the potential
$U_1$. Notice that from this condition of detailed balance both
potentials (separately) give rise to a time-reversal invariant
dynamics. However switching between the two potentials at rate
$\Gamma>0$ destroys time-reversal invariance and can
produce a netto current and a non-zero entropy production. The
physical reason is that the jumping of the particle from site to
site is a thermal process occurring at temperature $T_s$ while the
switching between the two potentials is also a thermal process but
occurring at a different temperature $T_r=+\infty$. Through the
system there is a net passage of heat from the reservoir at
temperature $T_r$ to the system at temperature $T_s$. As we
will now see, our algorithm nicely captures this.\\ We distinguish
the following four different states:
\begin{eqnarray}\label{steet}
(x,h) & \leftrightarrow & \mbox{`state'} \nonumber\\
(2n+1,1) && 1 \nonumber \\
(2n,1) &&2 \nonumber \\
(2n+1,2) &&3 \nonumber \\
(2n,2) && 4
\end{eqnarray}
Denote by $p_i$ the probability to find the particle in state $i$. Then
\begin{eqnarray}\label{master}
\frac{dp_1}{dt} &=& ( \gamma_2+\delta_2 )p_2 - (\gamma_1 +\delta_1 ) p_1
+ \Gamma(p_3 - p_1), \nonumber\\
\frac{dp_2}{dt} &=& ( \gamma_1 +\delta_1 )p_1 - (\gamma_2+\delta_2 ) p_2
+ \Gamma(p_4 - p_2) , \nonumber\\
\frac{dp_3}{dt} &=& 2 \gamma (p_4 -p_3) + \Gamma(p_1 - p_3),
\nonumber \\
\frac{dp_4}{dt} &=& 2 \gamma(p_3 - p_4) + \Gamma(p_2 - p_4) .
\end{eqnarray}
>From (\ref{master}) we easily obtain the following probabilities in the
stationary measure:
$p_i= 1/4 + (-1)^{i+1} a_{i}$, where $a_1=a_2=a$ and $a_3=a_4=b$, with
\begin{equation}
a= \frac{1}{4}
\frac{(\gamma_2 +\delta_2 - \gamma_1 - \delta_1)(4\gamma + \Gamma)}
{(4\gamma + \Gamma) \Sigma + 4 \gamma \Gamma},
\end{equation}
\begin{equation}
b = \frac{1}{4}
\frac{(\gamma_2 +\delta_2 - \gamma_1 - \delta_1) \Gamma}
{(4\gamma + \Gamma) \Sigma + 4 \gamma \Gamma},
\end{equation}
and $\Sigma=\gamma_1 + \delta_1 + \gamma_2 + \delta_2$.
Let us now introduce the particle current in the stationary state:
\begin{eqnarray}
J^+ &=& p_1 \gamma_1 - p_2 \delta_2 + (p_3 - p_4)\gamma \label{cur1}\\
&=& p_2 \gamma_2 - p_1 \delta_1 + (p_4 -p_3) \gamma \label{cur2}.
\end{eqnarray}
Expression (\ref{cur1}) is the net current to the right at odd and
(\ref{cur2}) at even
lattice sites. Since there are no sources or sinks both expressions must be
equal (as they are).
Using the explicit expression for the stationary state we obtain
\begin{equation}
J^+= \frac{1}{2} \frac{\gamma \Gamma
(\gamma_1+\gamma_2-\delta_1-\delta_2)} {(4\gamma+\Gamma)\Sigma
+4\gamma\Gamma}.
\end{equation}
Apart from this particle current we can introduce yet another
current. It is the heat current measuring the amount of energy that
is displaced through our system.
\begin{eqnarray}
J_Q &=& \Gamma \ (p_4 - p_2) \ \epsilon + \Gamma \ (p_1 - p_3) \ \epsilon\\
&=& \Gamma \ T_s\
(p_4-p_2) \log
\frac{\delta_2}{\gamma_1},
\end{eqnarray}
where we used (\ref{delta2}) and stationarity.
$J_Q$ is the rate at which energy or
heat flows from the reservoir to our system at temperature
$T_s$.
%Again, by stationarity, it equals the current $\Gamma
%\ (p_1-p_3)
%\
%\epsilon$.
Let us now turn to the setup of Section \ref{subsec2.2}. We put
$\Omega = D([-T,T], \{ 1,2,3,4 \})$ the set of paths on the states
(\ref{steet}),
$\mathcal{S} = \{ [-T,T]: \ T \geq0\}$ the set of increasing time windows,
\begin{equation}
\pi_T(\omega)(t) = \omega(-t)
\end{equation}
and $\alpha([-T,T]) = 2T$.
Let $\mu$ be the space-time extension of the stationary state
$\rho= (p_1,p_2,p_3,p_4)$. Then
\begin{eqnarray}
\lefteqn{ R_T(\omega)= -\log \left[
\frac{d(\pi_T\mu)}{d\mu} (\omega)
\right] } \nonumber \\
&=& \int_{-T}^T \log \frac{r(\omega_{t^-},\omega_{t^+})}
{r(\omega_{t^+},\omega_{t^-})} dN_t,
\end{eqnarray}
where $N_t$ denotes a mean one Poisson process.
Therefore the entropy production
corresponding to $(\Omega, \mathcal{F}, \mu)$ is
\begin{eqnarray}
\lefteqn{
\mu(\dot{s}) = \lim_T \frac{1}{2T} \mu(R_T)=} \nonumber \\
&& p_1 \left[ \gamma_1 \log \frac{\gamma_1}{\delta_2} + \delta_1
\log \frac{\delta_1}{\gamma_2} \right] +
p_2 \left[ \gamma_2 \log \frac{\gamma_2}{\delta_1} +
\delta_2 \log \frac{\delta_2}{\gamma_1}
\right].
\end{eqnarray}
Using $\gamma_1\gamma_2 = \delta_1\delta_2$ together with the explicit
expression for the stationary state
we can rewrite this as
\begin{equation}
\mu(\dot{s})
= \Gamma \ [p_4 - p_2] \ \log \frac{\delta_2}{\gamma_1}
= J_Q \cdot \frac{1}{T_s} .
\end{equation}
Again we recognize the form `current $\cdot$ field', where the role
of the field is played by an inverse temperature difference. Since
we have taken $\Gamma$ constant independent of the particle
position, the effective temperature of the second reservoir is
$T_r=\infty$ and thus $\mu(\dot{s}) = J_Q/T_s = J_Q/T_s -
J_Q/T_r$. The particle current $J^+$ does not contribute to the
entropy production because we did not include an external load, see
\cite{JM}. Remark also that $\mu(\dot{s}) \geq 0$ with equality
only if $J_Q=0$. This happens when $\gamma \ \Gamma=0$ or
$\gamma_1=\delta_2$, $\delta_1=\gamma_2$ (
$\epsilon=0$) or $\gamma_1=\delta_1=\gamma_2 =\delta_2$ ($T_s=\infty$). \\
This shows that from our algorithm, the exact
expression for the entropy production is obtained. Of course, for
a thermodynamic interpretation, one really should be imagining lots
of particles.
\subsection{Particle exchange dynamics}
For particle exchange processes (PEP), the configuration space is
$X=\{0,1\}^{\Zbar^d}$, where $\xi(i)=1,0$ is interpreted as the presence
respectively absence of a particle at lattice site $i$. A PEP is a Feller
process on $X$ with generator given by
\begin{equation}
Lf(\xi) = \sum_{|i-j|=1} c(i,j,\xi) \left[
f(\xi^{ij}) - f(\xi) \right]
\end{equation}
where $|i-j|=\sum_{\alpha=1}^d |i_\alpha-j_\alpha|=1$
and
\begin{equation}
\xi^{ij}(k) = \left\{ \begin{array}{ll}
\xi(j) & k=i \\
\xi(i) & k=j \\
\xi(k) & \mbox{otherwise}.
\end{array}
\right.
\end{equation}
The exchange rates $c(i,j,\xi)$ are strictly positive, local
and translation invariant. As for spinflip processes, we put
\begin{equation}
\Omega=\Omega_{L_0,T_0}= D([-T_0,T_0],\{ 0,1 \}^{\Lambda_{L_0}}),
\end{equation}
\begin{eqnarray}
\pi_{T,L} (\omega )_t (i) &=& \omega_{-t} (i) \ t\in[-T,T],i\in\Lambda_L
\nonumber\\
&=& \omega_t (i) \ \mbox{ otherwise },
\end{eqnarray}
where $T\leq T_0$, $L\leq L_0$.
We then have the following analogue of Lemma \ref{spinfliplemma}:
\begin{lemma}
\begin{equation}
\frac{d\pi_{T,L} \mu}{d\mu} = \exp{(-R_{T,L})},
\end{equation}
with
\begin{equation}\label{dracula}
R_{T,L} (\omega ) = \sum_{i,j\in\Lambda_L,|i-j|=1}\int_{-T}^T
\log\frac{c(i,j,\omega_s)}{c(i,j,\omega^{ij}_s )} dN_s^{ij}
+ G_{T,L} (\omega),
\end{equation}
where
\begin{equation}
N^{ij}_s =\sum_{t\in [-T,s]} \frac{1}{2} |\omega_{t^+} (i) -\omega_{t^+} (j)
- \omega_{t^-} (i) + \omega_{t^-} (j)|,
\end{equation}
and $G_{T,L}$ is negligible in the sense (\ref{negli}).
\end{lemma}
{\bf Proof:} The lemma follows again from the Girsanov formula. The
basic jump processes are now indexed by nearest neighbour bonds
rather than by sites. As time-reversal invariant reference process
we choose the simple symmetric exclusion process with generator
\begin{equation}
L_0 f(\xi) = \sum_{|i-j|=1} \left[ f(\xi^{ij}) - f(\xi) \right].
\end{equation}
Again, $\mu_\xi$ and $\mu_\xi^0$ denote the pathspace measures
starting from a configuration $\xi \in X$ under the original and
reference dynamics respectively, and $\mu$ and $\mu_0$ denote the
pathspace measures starting from a stationary measure $\rho$ (resp.
$\rho_0$). We obtain
\begin{equation}
\frac{d\mu_\xi}{d\mu^0_\xi} =
\exp \left\{ \sum_{|i-j|=1} \int_{-T}^T \log c(i,j, \omega_s) dN^{ij}_s
- \left[
\int_{-T}^T c(i,j,\omega_s)ds -2T \right] \right\},
\end{equation}
and
\begin{equation}
\frac{d(\pi_{T,L} \mu)}{d\mu} \doteq
\exp \left\{
\sum_{|i-j|=1,\{i,j\}\subset \Lambda_L} \int_{-T}^T -\log
\frac{c(i,j,\omega_s)}{c(i,j,\omega^{ij}_s)} dN^{ij}_s \right\}.
\end{equation}
\Cox
The
entropy production
%in a stationary measure $\rho$
is
\begin{eqnarray}
\label{inter}
%\lefteqn{
\mu(\dot{s}) &=& \lim_{T,\Lambda} \frac{1}{2T|\Lambda|}
\int d\mu\left( \sum_{|i-j|=1, \{i, j\} \subset \Lambda} \int_{-T}^T \log
\frac{c(i,j,\sigma_s)}{c(i,j,\sigma^{ij}_s)} dN^{ij}_s \right) %}
\nonumber \\
&=& \lim_\Lambda \frac{1}{|\Lambda|}
\sum_{|i-j|=1,\{ i,j \} \subset \Lambda} \int \rho(d\xi)
c(i, j,\xi) \log \frac{c(i,j,\xi)}{c(i,j,\xi^{ij})}.
\end{eqnarray}
If $\rho$ is translation invariant then
\begin{equation}
\mu(\dot{s}) =
\frac{1}{2}\sum_{|i|=1}\int \rho(d\xi)
c(0, i,\xi) \log \frac{c(0,i,\xi)}{c(0,i,\xi^{0i})}.
\end{equation}
{\bf Examples: Asymmetric exclusion processes.} We consider the
case of bulk driven diffusive lattice gases as was done in
\cite{LS}, see also \cite{S}. For the asymmetric exclusion
process in dimension $d=1$,
\begin{equation}
c(i,i+1,\xi) = \xi(i)(1-\xi(i+1)) \frac{\mbox{e}^{E/2}}{2}
+ \xi(i+1)(1-\xi(i)) \frac{\mbox{e}^{-E/2}}{2}.
\end{equation}
the Bernoulli measure $\rho$, with
particle density $q$ is a stationary (non-reversible) measure.
Denoting by $\mu$ its space-time extension, we find for
the entropy production
\begin{eqnarray}
\label{hydro}
\mu(\dot{s}) &=& \int c(0,1,\xi) \log \frac{c(0,1,\xi)}{c(0,1,\xi^{01})}
\rho (d\xi) \nonumber \\
&=& E \ q \ (1-q) \ \mbox{sinh}(\frac{E}{2}).
\end{eqnarray}
The particle current is defined by
\begin{equation}
j_{T,L} = \frac{1}{2TL^d}
\sum_{\{i,j\}\subset\Lambda_L, |i-j|=1}\int_{-T}^T [\omega_s (i) -
\omega_s (j)]dN^{ij}_s
\end{equation}
Notice the relation
\begin{equation}
\frac{\partial R_{T,L}}{\partial E}\doteq 2TL^d j_{T,L}.
\end{equation}
In the limit $T,L\uparrow\infty$,
the expectation of the particle current is precisely
\begin{equation}
j(E)= \int \rho (d\xi) c(0,1,\xi)(\xi(0)-\xi(1))=q \ (1-q)\
\mbox{sinh}(\frac{E}{2}).
\end{equation}
This is also the quantity appearing as the current in the hydrodynamic
equation which in this case is the Burgers' equation for a
space-time density profile $q_t(r), r\in \R$:
\begin{equation}
\frac{\partial q}{\partial t} = \sinh (\frac{E}{2})
\frac{\partial}{\partial r} (q(1-q)).
\end{equation}
Hence the entropy production can be written in the form
\begin{equation}
\mu (\dot{s} ) = E.j(E),
\end{equation}
which corresponds to the dissipated power in a resistor according
to Ohm's law. As soon as the process is not time-reversal invariant
(i.e. $E\neq0$), the entropy production (\ref{hydro}) is strictly
positive.
It is not necessary to have a non-vanishing particle current in
order to have strictly positive entropy production. Consider e.g.
the one-dimensional asymmetric exclusion process with generator
\begin{equation}
Lf(\xi) = \sum_{i,j\in \Zbar} p(j-i)\xi(i) (1-\xi(j))
\left[ f(\eta^{ij}) - f(\eta) \right]
\end{equation}
where $p: \Zbar \rightarrow \Rbar^+ $ satisfies $ \sum_i p(i) = 1
$, $\sum_i ip(i)=0$ but $p(i) \neq p(-i)$. Then we find, starting
from the Bernoulli measure $\rho$ with density $q$:
\begin{equation}
\mu(\dot{s})= q(1-q) \sum_i \left(p(i)-p(-i)\right) \log \frac{p(i)}{p(-i)}
\end{equation}
which is clearly strictly positive, whereas the hydrodynamical
equation of this process is a linear diffusion equation (i.e., we
do not have macroscopic drift).
{\bf Remark:} The asymmetric exclusion process has stationary measures
which are non-translation invariant but reversible (i.e.,
the corresponding process is time-reversal invariant).
When we start the process from such a measure, the expectation
of $R_{T,L}$ in (\ref{dracula}) is zero but the random variable
is not negligible in the sense of (\ref{negli}). This shows that
the reversible measure has to be translation invariant
in order to conclude negligibility of
$R_{T,L}$.
\subsection{Diffusion processes}
In this section we consider, following \cite{LS}, three examples of
diffusion processes. A fluctuation theorem for diffusion processes
was first discussed in \cite{K}. Via the Girsanov formula we find a
Gibbsian representation for the path space measure and we can apply
our formalism to obtain the entropy production. We will use a
similar notation as in \cite{LS}.
{\bf Example : Diffusion with drift}\\
Consider the stochastic differential equation (\cite{Lips})
\begin{equation}\label{verg}
dx_t= [(c(x_t)-\frac{1}{2} a.\nabla U(x_t))+\frac{1}{2} \nabla .a (x_t)]dt
+\sqrt{a(x_t)} dW_t,
\end{equation}
The covariance matrix $a(x)$ is strictly positive, sufficiently
smooth and bounded, and $U\in \mathcal{C}^2$
satisfies $\int_{\R^d} \exp (-U(x)) dx <\infty$.
Furthermore we suppose that the drift $c$ is sufficiently confining so that
there exists a unique stationary probability measure which is absolutely
continuous with respect to Lebesgue measure.
To put ourselves in the context of Section 2.2 we identify
\begin{eqnarray}
&&\Omega :=\Omega_T :=
{\cal C} ([-T,T],\R^d)\nonumber\\
&&{\cal S}:= \{ [-T,T]: T\geq 0\}\nonumber\\
&&\pi_T (\omega ) (s):= \omega (-s )\nonumber\\
&&\alpha ([-T,T]):= 2T.
\end{eqnarray}
The transformation $\pi_T$ is time-reversal in the time window
$[-T,T]$. As before we denote by $\rho$ the stationary measure of
the process $x_t$ and $\mu$ the corresponding pathspace measure.
\begin{lemma}\label{diffusielemma}
\begin{equation}
\frac{d\pi_T\mu}{d\mu} = \exp (-R_T),
\end{equation}
with
\begin{equation}
R_T (\omega )= 2\int_{-T}^T a^{-1}(\omega_s ) c(\omega_s ) \circ d\omega_s
+ G_T,
\end{equation}
where $\circ$ stands for Stratonovich integral and $G_T$ is negligible
in the sense of (\ref{negli}).
\end{lemma}
{\bf Proof:}
If $c\equiv 0$ in (\ref{verg}), then the corresponding process
$\{ y_t:t\in [-T,T]\}$ satisfying
\begin{equation}\label{revverg}
dy_t = [-\frac{1}{2} a.\nabla U (y_t) + \frac{1}{2} \nabla .a(y_t)]dt
+\sqrt{a(y_t)} dW_t,
\end{equation}
is time-reversal invariant when started (at $t=-T$)
from the stationary measure
\begin{equation}
\rho_0 ( dx):= \frac{\exp{[-U(x)]} dx}{\int_{\Rbar^d} \exp{[-U(x)]} dx}.
\end{equation}
>From the Girsanov formula (\cite{Lips}) we obtain the following
expression for the Radon-Nikodym derivative of the pathspace
measure of the process $\{ x_t :t\in [-T,T]\}$ with respect to the
process $\{ y_t:t\in [-T,T]\}$, starting at the same point
$u=x_{t=-T}=y_{t=-T}$:
\begin{eqnarray}\label{GIRSA}
\lefteqn{
\frac{d\mu_u}{d\mu^0_u} (\omega ) =
\exp \left\{
\int_{-T}^T a^{-1} (\omega_s) c(\omega_s) d\omega_s \right.%} \nonumber\\
- \frac{1}{2}\int_{-T}^T a^{-1} (\omega_s) } \nonumber \\
&&\left. \left[ (c-\frac{1}{2} a.\nabla U + \frac{1}{2} \nabla .a)^2 (\omega_s )
- (-\frac{1}{2} a.\nabla U + \frac{1}{2} \nabla. a)^2 (\omega_s ) \right]
ds \right\}.
\end{eqnarray}
In this expression the
integrals are Ito-integrals, i.e.,
\begin{equation}\label{ito}
\int_{-T}^T f(\omega_s )d\omega_s = \lim_{n\uparrow\infty,\Delta s\downarrow 0}
\sum_{j=1}^n f(\omega_{s_{j-1}}) (\omega_{s_j} -\omega_{s_{j-1}}),
\end{equation}
where $-T=s_0 From (\ref{GIRSA}), (\ref{Breuk}), and this lemma we obtain finally
\begin{equation}\label{final}
\frac{d(\pi_T \mu) }{d\mu}
\doteq \exp{\left[-2\int_{-T}^T a^{-1} (\omega_s)
c(\omega_s ) \circ d\omega_s \right]},
\end{equation}
where we put $\doteq$ because we start the process $\{ x_t:-T\leq t\leq T \}$
from its true stationary measure and not from
the reversible measure of the process $\{ y_t: -T\leq t\leq T\}$,
hence we have to include an extra factor of order one
(as $T\uparrow\infty$, since these two measures are
absolutely continuous) in the expression (\ref{final}).
This finishes the proof of Lemma \ref{diffusielemma}.\Cox
The entropy production corresponding to $(\Omega, \mathcal{F},\mu)$
is
\begin{eqnarray}
\mu(\dot{s}) &=& -\lim_T \frac{1}{2T} \Ebar_\rho \left[ \log
\frac{d(\pi_T \mu_u) }{d\mu_u} \right]
= \lim_T \frac{1}{T} \Ebar_\rho \left[
\int_{-T}^T a^{-1} (\omega_s) c(\omega_s) \circ d\omega_s \right]
\nonumber\\
&=& 4\int \rho (dx) [(a^{-1}(x) c(x)).c(x) +\frac{1}{2} \nabla.c (x)].
\end{eqnarray}
When $c=-1/2 a.\nabla\tilde{U}$ we have
\begin{equation}
-2\int_{-T}^T a^{-1} (x_s ) c(x_s)\circ dx_s = \tilde{U}(x_T )- \tilde{U}
(x_{-T} ),
\end{equation}
which is a boundary term. Hence, in the case of a conservative driving force
we get $\mu(\dot{s})=0$
%\begin{equation}
%\frac{dP_x\circ\pi_T}{dP_x}
%\doteq 1
%\end{equation}
as it should since then the process $\{ x_t: \ {-T}\leq t\leq T\}$ is
time-reversal invariant.
{\bf Example: Langevin Equation with temperature gradient}:
Consider the following stochastic differential equation
\begin{equation}\label{ltg}
\left\{ \begin{array}{lcl}
dx_t &=& v_t dt \\
dv_t &=& \left[ - \frac{\nabla U}{m}(x_t) - \gamma(x_t)v_t \right]
dt +\sqrt{\frac{2\gamma(x_t)}{m\beta(x_t)}} dW_t,
\end{array}
\right.
\end{equation}
which describes a particle subject to friction, a conservative
force and a random force.
We take $U\in {\cal{C}}^2$ and $\gamma,\beta\in {\cal{C}}^2$ positive,
bounded and bounded
away from zero. This guarantees the existence of a unique stationary probability
measure absolutely continuous with respect to Lebesgue measure.
To apply the formalism of Section 2.2, we
identify $ \Omega =\Omega_T:= \mathcal{C}([-T,T], \Rbar^{d}) $ the
velocity paths, $ \mathcal{S} := \{ [-T,T]: T \geq0\}$,
$\alpha([-T,T]) := 2T$ and $\pi_{T}\omega(s) = -\omega(-s)$. %if $s
%\in [-T,T]$.
The minus sign in the transformation $\pi_T$ comes from
the fact that $\omega(t)$ is interpreted as the velocity at time
$t$.
\begin{lemma}\label{langevinlemma}
\begin{equation}
\frac{d\pi_T\mu}{d\mu} = \exp (-R_T ),
\end{equation}
with
\begin{equation}
R_T = \int_{-T}^T \frac{1}{2} [mv^2_s + U(x_s)]\circ d\beta (x_s)+ G_T,
\end{equation}
where $G_T$ is negligible in the sense of (\ref{negli}).
\end{lemma}
{\bf Proof:}
We introduce the reference process
\begin{equation}
\left\{
\begin{array}{lll}
dX_t &=& V_t dt \\
dV_t &=& \left[ - \frac{\nabla U}{m}(X_t) - \frac{\gamma(X_t)}{\beta(X_t)}
\right] dt + \sqrt{\frac{2\gamma(X_t) m}{\beta(X_t) }} dW_t.
\end{array}
\right.
\end{equation}
In this process, $ \{V_t, \ t \geq 0\}$ is $\pi$-invariant
when started from the
measure
\[ \rho_0(dV)= \frac{\mbox{e}^{-1/2mV^2}dV}{\int\mbox{e}^{-1/2mV^2 }dV}.
\]
Let $\mu_u$ and $\mu^0_u$ be the path measures in the two processes, starting at
a common initial condition $u=(x_0,v_0)=(X_0,V_0) \in \Rbar^d $.
>From the Girsanov formula, we obtain
\begin{eqnarray}
\log \frac{d\mu_u}{d\mu^0_u} &=& \int_{-T}^T \frac{1}{2}m(1-\beta(X_s))V_sdV_s
\nonumber \\
&&+ \frac{1}{2} \int_{-T}^T \nabla U(X_s) V_s (1-\beta(X_s)) ds \nonumber \\
&& + \int_{-T}^T \Phi(X_s,V_s) ds,
\end{eqnarray}
where the last integral is $\pi_T$-invariant:
\begin{equation}
\left[\int_{-T}^T \Phi(X_s,V_s) ds \right] \circ \pi_T =
\int_{-T}^T \Phi(X_s,V_s) ds.
\end{equation}
Hence, using Lemma \ref{lemma2.1},
\begin{eqnarray}
\lefteqn{
R_T \doteq
\log \frac{ d\mu}{d\mu_0} -
\log \frac{ d\mu}{d\mu_0} \circ \pi_T
} \nonumber \\
& \doteq &
\int_{-T}^T m(1-\beta(x_s))v_s \circ dv_s
+\int_{-T}^T \nabla U(x_s) v_s (1-\beta(x_s)) ds \nonumber \\
&=&
\int_{-T}^T \frac{1}{2}m(1-\beta(x_s)) \circ dv_s^2
+\int_{-T}^T (1-\beta(x_s)) \circ dU(x_s) \nonumber \\
&\doteq&
\int_{-T}^T \left[ \frac{1}{2}mv_s^2 + U(x_s) \right] \circ d\beta(x_s),
\end{eqnarray}
where in the last line we have put `$\doteq$' because we omitted the boundary
terms of the partial integration.\Cox
We obtain for the entropy production:
\begin{equation}
\mu(\dot{s}) = \lim_T \frac{1}{2T} \mu(R_T)
=
\lim_T \frac{1}{2T} \Ebar_\rho \left[
\int_{-T}^T
\left[ \frac{1}{2}m v_s^2 + U(x_s) \right] \circ d\beta(x_s) \right]
\end{equation}
where once again, $\rho$ is a stationary measure and $\mu$ is its space-time
extension.
%The integral has the interpretation of the total work due to
%thermal gradients; it coincides exactly with the action functional of \cite{LS}.
{\bf Example 3: Langevin equation with non-conservative driving
force.}
\\
We now put $\beta$ constant in (\ref{ltg}) and consider
\begin{equation}
\left\{
\begin{array}{lll}
dx_t &=& v_tdt \nonumber \\
dv_t &=& \left[ \frac{1}{m} F(x_t) - \gamma(x_t)v_t \right] dt +
\sqrt{\frac{2\gamma(x_t) }{m\beta }} dW_t.
\end{array}
\right.
\end{equation}
where again $F$ is supposed to be sufficiently confining so that the velocity
process $v_t$ has a unique stationary probability measure which is absolutely
continuous with respect to Lebesgue measure.
\begin{lemma}\label{driftlemma}
As in (\ref{langevinlemma}),
\begin{equation}
\frac{d\pi_T\mu}{d\mu} = \exp (-R_T),
\end{equation}
with now
\begin{equation}
R_T = \int_{-T}^T \beta F(x_s ) v_s ds + G_T,
\end{equation}
where
$G_T$ is negligible in the sense of (\ref{negli}).
\end{lemma}
{\bf Proof:} The reversible reference process now reads
\begin{equation}
\left\{
\begin{array}{lll}
dX_t &=& V_tdt \nonumber \\
dV_t &=& - \gamma(X_t)V_t dt +
\sqrt{\frac{2\gamma(X_t) }{m\beta }} dW_t,
\end{array}
\right.
\end{equation}
i.e., in this process $\{V_t, \ t \in [-T,T]\}$ is $\pi$-invariant
when started from the measure
\[ \rho_0(dV)= \frac{\mbox{e}^{-1/2mV^2}dV}{\int\mbox{e}^{-1/2mV^2} dV}.
\]
>From the Girsanov formula we obtain
\begin{eqnarray}
\log \frac{d\mu_u}{d\mu^0_u} &=&
\int_{-T}^T \frac{\beta}{2 \gamma(X_s)} F(X_s) dV_s
\nonumber \\
&& - \frac{1}{2}
\int_{-T}^T \frac{\beta}{2m\gamma(X_s)} F^2(X_s) ds \nonumber \\
&& + \frac{1}{2} \int_{-T}^T \beta F(X_s) V_s ds
\end{eqnarray}
and thus
\begin{equation}
R_T =
\log \frac{ d\mu}{d\mu_0}
-\log \frac{ d\mu}{d\mu_0}\circ\pi_T
\doteq
\int_{-T}^T \beta F(x_s) v_s ds.
\end{equation}
We used that for a $\mathcal{C}^1$ function $\phi : \Rbar^d \rightarrow \Rbar^d$
\begin{equation}
\left[ \int_{-T}^T \phi(X_s) dV_s \right] \circ \pi_T -
\left[ \int_{-T}^T \phi(X_s) dV_s \right] =0.
\end{equation}
This can be seen from the following calculation:
\begin{eqnarray}
\lefteqn{
\left| \left(\int_{-T}^T \phi(X_s) dV_s \right) \circ \pi_T -
\int_{-T}^T \phi(X_s) dV_s \right| } \nonumber \\
&=& \left| \lim_{n \uparrow \infty, \Delta S \downarrow 0} \sum_{i=1}^n
\phi(X_{s_i})(V_{s_i} - V_{s_{i-1}}) -
\lim_{n \uparrow \infty, \Delta S \downarrow 0} \sum_{i=1}^n
\phi(X_{s_{i-1}})(V_{s_i} - V_{s_{i-1}}) \right| \nonumber \\
&=& \lim_{n \uparrow \infty, \Delta S \downarrow 0} \sum_{i=1}^n
\left[ \phi(\int_{-T}^{s_i} V_r dr ) -
\phi(\int_{-T}^{s_{i-1}} V_r dr ) \right]
\left[
V_{s_i} - V_{s_{i-1}} \right] \nonumber \\
&\leq& \|\phi'\|_\infty \sup_{0\leq t\leq T} |V_s| \sqrt{\langle V,V\rangle_T}
\lim_{n \uparrow \infty, \Delta S \downarrow 0}
\sqrt{ \sum_{i=1}^n
\left( s_i - s_{i-1} \right)^2} \nonumber \\
&=&0
\end{eqnarray}
where $\langle V,V\rangle_T$ denotes the quadratic variation process:
\begin{equation}
_T =
\lim_{n \uparrow \infty, \Delta s \downarrow 0}
\sum_{i=1}^n
\left( V_{s_i} - V_{s_{i-1}} \right)^2.
\end{equation}
\Cox
For this example we conclude that the entropy production corresponding to
$(\Omega,\mathcal{F},\mu)$ is given by
\begin{equation}\label{integralwork}
\mu(\dot{s}) = \lim_T \frac{1}{T}
\int d\mu\left[ \int_{-T}^T \beta F(x_s) v_s ds \right].
\end{equation}
If $F$ is conservative, i.e. $F(X_s)=-\nabla U(X_s)$,
then the integral $-\int_{-T}^T \beta F(X_s) V_s ds = U(X_T) - U(X_{-T})$
is a
boundary
term, making $\mu(\dot{s})=0$,
%\[ \frac{ d\P_v \circ \pi_T}{ d\P_v} \doteq 1 \],
expressing the time reversal symmetry.
Notice that (\ref{integralwork}) is not dependent on
$\gamma$, so that at least formally in the limit $\gamma \rightarrow 0$
(``zero noise limit"), we can use the same expression.
\medskip\noindent
{\bf Remark:}
There
is one generalization which we have not
considered so far and which is physically rather important. It
concerns the case of dynamics with memory. When the time-scales of
a reservoir and a subsystem have not been infinitely separated, memory
effects are present. Fortunately, in principle our setup can handle this
case. After all, a Markovian dynamics just corresponds to a short
range interaction in the time-direction for our space-time Gibbs
measure. If this interaction happens to be long range (but summable
in the appropriate sense), the formalism still applies unchanged.
Remark indeed that in order to apply the Girsanov formula for the stochastic
differential equation (\ref{verg}),
%of example 1,
we do not need that the drift is
a function $c_t (x)=c(x_t)$; it suffices that $c_t$ is an adapted process, hence
$x_t$ does not need to to be a Markov process.
The only problem is to find the correct analogue of Lemma
\ref{lemma2.1} which will now envolve anticipating stochastic integrals.
%We wish to postpone however
%a systematic study of this problem to a future publication.
\section{Deterministic dynamics}
\setcounter{equation}{0}
\medskip\noindent
{\bf Dynamical systems:} In recent years new ideas in
nonequilibrium statistical mechanics have emerged from the study of
smooth dynamical systems, see e.g. \cite{Ru4}. In particular, the
chaotic character of a time evolution plays an important role and,
depending on author and context, is argued to be responsible for
the positivity of entropy production and for the positivity of
transport coefficients. This, at first sight, is paradoxical since
the results obtained from the theory of dynamical systems apply to
small systems. It is certainly not the case that nonequilibrium
behavior (such as macroscopic irreversibility) is compatible with
systems having only a few degrees of freedom. One suggestion to
bridge this gap is contained in the Chaoticity Hypothesis of
Gallavotti and Cohen, \cite{G1,G,gc,gc1}. We will come back to this
below. We start with some general remarks.
Consider a discrete time dynamics on a compact connected Riemannian
manifold
$X$ defined by the map $T: X\rightarrow X$. Extra conditions will
have to be added but we prefer to postpone them until they are
explicitly needed. Trajectories starting from $v\in X$ are
sequences $(v,Tv,T^2v,\ldots)$. For every trajectory-segment
$(v_1,v_2,\ldots,v_n)$ we must have $v_k=T v_{k-1}$. \\ We assume
that $T$ is invertible and that there exists an involution $\theta$
on $X$ which leaves the Riemann metric $dv$ on $X$ invariant,
$\theta^2=1$, so that $\theta\circ T^{-1}= T\circ \theta$.
This is called reversibility. To see the relation with
(\ref{crux}) or (\ref{crux1}), it is crucial to observe that the
trajectory-segment $(v_{-n},v_{-n+1},\ldots, v_n)$ is allowed if
and only if the trajectory segment $(\theta v_n,\theta
v_{n-1},\ldots,\theta v_{-n})$ is allowed. One of the segments is
then of the form $(T^{-n}v,T^{-n+1}v,\ldots,T^nv)$ and the other
$(\theta T^n v,\theta T^{n-1}v,\ldots,\theta T^{-n}v)$ is allowed
because $T(\theta T^{k}v)=\theta T^{k-1}v$. Another related
consequence comes from the fact that if $T^n v =v$ for a certain
$n$, then also $T^n w = w$ for $w=\theta v$. In other words,
$\theta$ is a bijection on the set of $n-$periodic points Fix $T^n
= \{v\in X, T^nv = v\}$. \\ Next, since we want to study steady
state properties, comes considering time-invariant states. Most of
the time, there are plenty of them and it is important to select
the natural ones. This can of course only be decided from the
(partial) information we have on the particular system we are
interested in (for example via initial conditions, symmetries etc.)
At any rate, steady states $\mu$ describing the statistics of
trajectories are completely determined by the selection of an
invariant measure $\rho$ in the sense that the only randomness in a
trajectory comes from the initial data. It is therefore somewhat
artifical to use another notation for an invariant measure $\rho$
and for its corresponding steady state (pathspace measure) $\mu$
since, for an observable $f$ that depends on the configuration of
the system at times $n_1,n_2,\ldots,n_k$, we have
\begin{equation}
\mu(f(v_{n_1},\ldots,v_{n_k})) =
\rho(f(v,T^{n_2-n_1}v,\ldots,T^{n_k-n_1}v).
\end{equation}
As far as we are aware, there are two main strategies to connect
steady states to Gibbs measures, getting the Gibbs formalism of
Section 2 at work also for deterministic dynamics. The first
strategy uses the concept of Markov partition and symbolic dynamics
and, for our purposes, is most useful for (if not limited to)
mixing Anosov diffeomorphisms $T$; we refer to
\cite{BK,BK1,%PY,
%PS,
Bo,Bo1,%BS,
Si,Si1} for older results and newer
extensions. The second strategy uses the definition of Capocaccia
in \cite{Ca}, and the equivalence of Gibbs states and equilibrium
states for homeomorphisms satisfying expansiveness and
specification, see e.g. \cite{Har}.\\ Whatever strategy is
taken, the entropy production would be defined in a similar way as
for stochastic dynamics: suppose that we can find a function
$\dot{S}_N(v) = \sum_{n=-N}^N \dot{s}(T^n v)$ with $\dot{s}(\theta(Tv)) =
-\dot{s}(v)$ such that for all continuous $f$ on $X$,
\begin{equation}\label{defdet}
\limsup_N \frac 1{N} \log \frac{\int \rho(dv) f(v)
e^{-\dot{S}_N(v)}}{\int \rho(dv) f(\theta v)}
= 0,
\end{equation}
then we call $\rho(\dot{s})$ the entropy production of the dynamics $T$
in the state $\rho$. This, again, should be compared with the
situation for Gibbs states $\tilde{\mu}$ (here, one-dimensional)
with $\theta$ in (\ref{defdet}) replacing the $\pi$ of Section 2,
see (\ref{crux}). The main difference is that there (local)
approximators $\pi_{N,L}$ of $\pi$ appear, in the sense that $\lim
\mu(f\circ
\pi_{N,L}) =
\mu(f\circ \pi)$ for all continuous functions $f$ and that was
constantly used for stochastic dynamics. Here however, in general, there is no
obvious candidate for such approximating $\theta_N$ for $\theta$.
Equation (\ref{defdet}) is not yet sufficient to derive a
fluctuation theorem for $\dot{S}_N$ since we would need a function
$f$ in (\ref{defdet}) depending on $N$. Suppose however that we
have the following strengthening of (\ref{defdet}):
\begin{equation}\label{largedev}
\limsup_{N} \frac{1}{N} \log \frac{\int\rho (dv)
\exp (NF(\mu_N (v)) \exp (-\dot{S}_N (v))}
{\int \rho (dv) \exp (N F(\mu_N (\theta v))}=0,
\end{equation}
where $\mu_N (v) = \frac{1}{2N+1}\sum_{i=-N}^N \delta_{T^i v}$
is the empirical distribution
and $F$ is an arbitrary weakly continuous function on the space of probability
measures on $X$. If (\ref{largedev}) holds, then the following symmetry relation
holds
\begin{equation}
\limsup_N \frac{1}{N} \log \frac{\int \rho (dv)
\exp (-\lambda \dot{S}_N (v))}{\int \rho (dv) \exp (-(1-\lambda ) \dot{S}_N (v))} =0.
\end{equation}
For Anosov systems this program can be
completed.
\vspace{2mm}
\medskip
\noindent
{\bf Gallavotti-Cohen theorem:}
The Gallavotti-Cohen fluctuation
theorem can be seen as a result about a symmetry in the
fluctuations of the phase space contraction rate in the theory of
smooth reversible dissipative dynamical systems, see
\cite{gc,gc1,Ru4}. It selects a class of dynamical systems
(so-called mixing Anosov diffeomorphisms) where via the
existence of Markov partitions and symbolic
dynamics a one-to-one relation with a one-dimensional Gibbs measure $\tilde{\mu}$
with an exponentially decaying interaction can be established.\\ Consider minus
the logarithm of the Jacobian
determinant $J$ which arises from the change of variables implied by the dynamics.
We write $\dot{s} \equiv - \log J$, the phase space contraction
rate, and the object of interest is
\begin{equation}\label{gccur}
w_N(v) \equiv \frac 1 {2\rho(\dot{s}) N} \sum_{-N}^{N}
\dot{s}(T^n(v)),
\end{equation}
for large time $N$. $\rho$ is the SRB measure of the dynamics which
arises naturally from
\begin{equation}\label{srb}
\rho(f) = \lim_N \frac 1{N} \sum_0^N f(T^n v)
\end{equation}
corresponding to time-averages for almost every randomly chosen
initial point $v\in X$ with respect to the Riemann volume element $dv$
on $X$. One assumes (and sometimes proves) dissipativity:
\begin{equation}\label{popo}
\rho(\dot{s}) > 0.
\end{equation}
The fluctuation theorem of Gallavotti and Cohen states that
$w_N(v)$ has a distribution $\rho_N(p)$ with respect to the
stationary (SRB) state $\rho$ such that
\begin{equation}\label{flucthm}
\lim_N \frac 1{N \rho(\dot{s})p}\ln \frac{\rho_N(p)}{\rho_N(-p)} = 1
\end{equation}
always. In other words, the distribution of $w_N$ for $N$ large
satisfies some general symmetry property. The reader will of course recognize
the relation with (\ref{reader}).
The technical (mixing Anosov) assumption assures the uniform
hyperbolicity of the dynamical system. The use of symbolic dynamics
converts the study of entropy production into the framework of
statistical mechanics for one-dimensional lattice spin systems with
an exponentially decaying interaction. This is intrinsic in the
proof of \cite{gc,gc1} and it was explicitly remarked in Section 3
of \cite{BGG}.\\
The steady state $\mu$
(corresponding to the SRB-state $\rho$) is the $\gamma-$image of a
translation invariant Gibbs measure $\tilde{\mu}$ on
$\Omega:=G^{\Z}$, corresponding to the $d=0$ case of the previously
considered interacting particle systems.
The (de)coding $\gamma:\Omega\rightarrow X$ is continuous one-to-one almost
everywhere and satisfies $T\circ \gamma =
\gamma
\circ \tau$ where $\tau=\tau_1$ is the shift on $\Z$.
This is brought about via a finite (Markov) partition $(I_a, a \in
G)$ of $X$ from which we define $\ell(v) = a$ if $v\in I_a$. Given $\sigma
= \gamma^{-1}v\in \Omega$, there are many $w\in X$
for which $\ell(T^n w)=\sigma(n), |n|\leq N$. In the same way
there could be many $w'$ for which $\ell(T^n w') =
\ell(\theta\circ T^{-n} v) = \ell(T^n \circ \theta v), |n|\leq N$.
What we are interested in is to take the ratio of the corresponding
weights according to our pathspace measure $\mu$ (as in (\ref{crux})
or (\ref{crux1})). Following our general scheme, the logarithm of
this must be related to the entropy production just as in
(\ref{defdet}).\\ The reversibility plays as follows on the
symbols. First of all, again because of the Anosov character of
$T$, we can choose the partition such that $\ell(\theta v) =
\tilde{\theta}
\ell(v), v\in X$ for some involution $\tilde{\theta}$ on $G$.
Define $\pi=\gamma^{-1}
\circ
\theta
\circ \gamma$. In the same way we can define the local
transformation $\pi_N(\sigma)(n)=\tilde{\theta}(\sigma(-n)),
|n|\leq N,
=\sigma(n)$ for $|n| > N$ and obviously, $g\circ \pi_N
\rightarrow g\circ\pi$ for continuous $g$ on $\Omega$. In the same
way
\begin{equation}
\theta_N = \gamma \circ \pi_N \circ \gamma^{-1}
\end{equation}
approximates $\theta$. But now, (\ref{defdet}) relies on a
statement about the Gibbs measure $\tilde{\mu}$ and the
transformation $\pi_N$, just as in our Section 2: we can apply the
theory of large deviations for Gibbs states, see \cite{Lan}.
The
reason why the phase space contraction appears as entropy
production is that the SRB state $\mu$ (or, after transforming to
symbolic sequences, $\tilde{\mu}$) is a Gibbs state with respect to
the interaction $-\log J^u$ (which is H\"older continuous) where
$J^u >0$ is the expanding or unstable Jacobian. The Jacobian
determinant satisfies $J(\theta\circ T)= J^{-1}$ and $\theta$
interchanges the stable with the unstable directions ($J^s(\theta
Tv) = J^u(v)^{-1}$) so that
\begin{equation}
-\log J^u(\theta Tv) + \log J^u(v) = \log J(v).
\end{equation}
As a consequence, we recover the expression (\ref{cur}) where the
entropy production is related to the `relative energy' after
time-reversal:
\begin{equation}
\sum_{k=-N}^N \log J(T^k v) = \sum_{k=-N}^N[ (-\log J^u(T^{k} \theta
v)) - (-\log J^u(T^kv))] + O(1)
\end{equation}
where $0(1)/N \rightarrow 0$ as $N \uparrow \infty$.\\ The reason
why the quantity $\dot{s}$ can be identified with the change of
entropy in the steady state follows from the following simple
calculation. Define the (Shannon) entropy of a probability
distribution $m(dv)=m(v)dv$ on $X$ as
\begin{equation}
S(m) = - \int dv m(v) \log m(v).
\end{equation}
If $m$ is the density at time $n$, then, under the dynamics, the
density at time $n+1$ is
\begin{equation}
m'(v) = \frac{m(T^{-1}v)}{J(T^{-1}v)}
\end{equation}
and the change in entropy (gained by the system) is therefore
\begin{equation}\label{enttime}
S(m') - S(m) = \int dv m(v) \log J(v).
\end{equation}
Taking $n$ to infinity, the empirical probability distribution
approaches the SRB distribution $\rho$, as in (\ref{srb}).
Therefore, the amount of entropy produced by the system per time
unit is (\ref{popo}), see also \cite{Ru1,Ru2,Ru4}.
Even though the preceding discussion was mentioning
mostly technical points that are part of the theory of (Anosov)
dynamical systems, this was certainly not the final goal of the
authors. This is summarized via their chaoticity hypothesis: ``A
reversible many particle system in a stationary state can be
regarded as a transitive Anosov system for the purpose of computing
the macroscopic properties,'' see also e.g.
\cite{gc,gc1,G,G1,Ru3,G2}.
In fact, various numerical experiments have
shown extremely good agreement with the symmetries predicted by
Gallavotti and Cohen. e.g. in \cite{BGG}. The theorem originated
from numerical evidence in \cite{ECM}. These computer experiments
are carried out via so called thermostatted systems. These are
dynamical systems where mechanical forces are replacing the action
of reservoirs in keeping the energy of the system constant. More
theoretically, the theorem has various interesting consequences.
For example \cite{G} has been extending Green-Kubo type formulas to
arbitrary forcing fields for a class of nonequilibrium dynamics,
see also \cite{Ru4}. We see it therefore as a major argument (and
motivation) in favor of the suggested definition of entropy
production.
\section{Transient regime}
\setcounter{equation}{0}
So far we have been considering the
(nonequilibrium) steady state. However, our setup can just as well
be applied to transient regimes as e.g. decribed in
\cite{cr2, ja4}. We briefly comment here on the
mathematics behind the so-called nonequilibrium work relations and
how they fit into our framework. Let us start with the mathematics
in the easiest example of a (discrete time) Markov chain $\sigma_n$
on a finite configuration space $X$. Consider a probability measure
$\rho_i$ on $X$, $\rho_i(\xi) >0$, $\xi \in X$, as initial state
and the corresponding pathspace measure $P_N$ on $X^{N+1}$ for
which
\begin{eqnarray}
\lefteqn{
P_N \left(f(\sigma_0, \ldots, \sigma_N) \right)
= \Ebar_{\rho_i} \left[ f(\sigma_0, \ldots, \sigma_N) \right] }
\nonumber \\
&=& \sum f(\sigma_0, \ldots, \sigma_N) p(\sigma_N|\sigma_{N-1}) \ldots
p(\sigma_1|\sigma_0) \rho_i(\sigma_0)
\end{eqnarray}
with transition probabilities $p(\xi|\xi')>0$, $\xi, \xi' \in X$.
It is immediately verified that for all pairs of probability
measures $\rho$, $\rho'$ on $X$ with $\rho(\xi)$, $\rho'(\xi)>0$,
$\xi
\in X$, we have the relation
\begin{equation}
\label{rhoaccent}
\Ebar_{\rho'} \left[ f \circ \pi_N \right]
= \Ebar_{\rho} \left[ f \mbox{e}^{-\dot{S}_N + \log \rho'(\sigma_N)- \log
\rho(\sigma_0)} \right]
\end{equation}
where $(\pi_N \sigma)_n = \sigma_{N-n}$ and
\begin{equation}
\dot{S}_N(\sigma)= \sum_{k=1}^N \log \frac{p(\sigma_k|\sigma_{k-1})}
{p(\sigma_{k-1}|\sigma_k)}
\end{equation}
corresponds to the steady state entropy production, see \cite{M}:%also
%(\ref{pcacur}):
\[
\frac{\dot{S}_N(\sigma)}{N} \rightarrow \mu(\dot{s}) \]
$\mu$-almost surely, in the unique stationary measure. We now write
\begin{equation}
\rho(\xi) = \frac{\mbox{e}^{-\beta H(\xi)}}{Z}
\end{equation}
\begin{equation}
\rho'(\xi) = \frac{\mbox{e}^{-\beta H'(\xi)}}{Z'}
\end{equation}
and we choose
\begin{equation}
f(\sigma_0, \ldots, \sigma_N) =
\mbox{e}^{(1-\lambda)[\dot{S}_N(\sigma) - \log \rho'(\sigma_N)+ \log
\rho(\sigma_0)]} \quad (\lambda \in \Cbar).
\end{equation}
Substitution in (\ref{rhoaccent}) gives
\begin{equation}
\Ebar_{\rho'} \left[
\mbox{e}^{-(1-\lambda)[\dot{S}_N(\sigma) + \beta( H(\sigma_N) - H'(\sigma_0))]}
\right] = \frac{Z}{Z'}
\Ebar_{\rho} \left[
\mbox{e}^{-\lambda [\dot{S}_N(\sigma) + \beta( H'(\sigma_N) - H(\sigma_0))]}
\right].
\end{equation}
In particular, for $\lambda=1$, we get
\begin{equation}
P_N \left[ \mbox{e}^{-[\dot{S}_N(\sigma) + \beta( H'(\sigma_N) -
H(\sigma_0))]} \right] = \frac{Z'}{Z}
\end{equation}
where started in $\rho_i=\rho$. This is a variant of the
nonequilibrium work relation appearing in \cite{cr2,ja4}.
More generally, such relations are easy to produce in
the context of stochastic dynamics (as in Section \ref{Sec3}) by
using the appropriate form of the trivial identity $\mu \left(
\frac{d(\pi_{N,L} \mu)}{d\mu} \right)=1$.
Instead of considering a stochastic dynamics on our system, we
could also consider a Hamiltonian time evolution on our system plus
reservoir. To be specific, let us take the configuration space of
the form $X= X_s \times X_r$ with a deterministic invertible
transformation $T:X \rightarrow X$. The momenta and positions of
the particles in the system are collected in the (first) variable
$v$ and $w$ will stand for the reservoir variable. $T$ preserves
the elementary volume: $d(T^{-1}(v,w))/d(v,w)
=1$, $d(v,w)=dvdw$. Suppose that the initial state $\rho_i$ has a
density
\begin{equation}
\rho_i(v,w)= \frac{\mbox{e}^{-H(v)/T_i}}{Z_i} \mbox{e}^{-S_i(w)}
\end{equation}
with partition function $Z_i=\int dv \mbox{e}^{-H(v)/T_i}$ and $\int
\mbox{e}^{-S_i}dw = 1$.
We also define
\begin{equation}
\rho_f(v,w)= \frac{\mbox{e}^{-H(v)/T_f}}{Z_f} \mbox{e}^{-S_f(w)}.
\end{equation}
Then,
\begin{eqnarray}
\lefteqn{
\int f \left( (v,w),T(v,w) \right) \rho_i(v,w)dvdw} \nonumber \\
&=& \int f \left(T^{-1}(v,w),(v,w) \right)
\frac{\rho_i(T^{-1}(v,w))}{\rho_f(v,w)}
\rho_f(v,w)dvdw
\end{eqnarray}
and thus, taking
\begin{equation}
f\left((v,w),(v'w') \right) = \mbox{e}^{-(1-\lambda)[H(v')/T_f - H(v)/T_i +
S_f(w')-S_i(w)]},
\end{equation}
we have
\begin{eqnarray}
\lefteqn{
\rho_i \left[ \mbox{e}^{-(1-\lambda)[H(v')/T_f - H(v)/T_i +
S_f(w')-S_i(w)]}
\right] } \nonumber \\
&=& \rho_f \left[
\mbox{e}^{- \lambda [H(v_{-1})/T_i - H(v)/T_f + S_i(w_{-1})-S_f(w)]}
\right] \ \frac{Z_f}{Z_i},
\end{eqnarray}
where $(v_i,w_i)= T^i (v,w), \ i=\pm 1$. Taking $\lambda=0$ recovers
the main relation of \cite{ja4}. If $\rho_f$ would really
correspond to the `final' state (the image of $\rho_i$ under $T$)
then $\rho_f(S_f) - \rho_i(S_i)$ would be the change in entropy and
$\log Z_f/Z_i = \exp[-( F_f/T_f - F_i/T_i)]$ would correspond to a
change in Helmholtz free energy $F$.
{\bf Acknowledgement:} We thank Prof. H. Wagner for stimulating discussions and
for suggesting the molecular motor model of Section 3.2.
\begin{thebibliography}{99}
\small
\bibitem{an1} Antal, T., R\'{a}cz, Z. and Sasv\'{a}ri, L. (1997)
Nonequilibrium steady state in a quantum system: one-dimensional transverse
Ising model with energy current,
{\sl Phys. Rev. Lett.} {\bf 78}, 167.
%\bibitem{an2} Antal, T., R\'{a}cz, Z., R\'{a}kos A. and Sch\"{u}tz, G.M. (1998)
%Isotropic transverse XY chain with energy and magnetization currents,
%{\sl Phys. Rev. E} {\bf 57}, 5184-5189.
\bibitem{BGG} Bonetto, F., Gallavotti, G. and Garrido, P. (1997)
Chaotic principle: an experimental test, {\sl Physica D} {\bf 105},
226.
\bibitem{BK} Bricmont, J. and Kupiainen, A. (1997) Infinite dimensional
SRB measures, {\sl Physica D} {\bf 103}, 18--33.
\bibitem{BK1} Bricmont, J. and Kupiainen, A. (1996) High temperature
expansions and dynamical systems, {\sl Comm. Math. Phys.} {\bf
178}, 703--732.
\bibitem{Bo} Bowen, R. (1970) Markow partitions for Axiom A diffeomorphisms,
{\sl Amer. J. Math.} {\bf 92}, 725-747.
\bibitem{Bo1} Bowen, R. (1973) Symbolic dynamics for hyperbolic flows,
{\sl Amer. J. Math.} {\bf 95}, 429-460.
%\bibitem{BS} Bunimovich, L.A. and Sinai, Ya. G. (1988)
%Space-time chaos in coupled map lattices, {\sl Nonlinearity} {\bf
%1}, 491--516.
\bibitem{Ca} Capocaccia, D. (1976) A definition of Gibbs' states
for a compact set with $\Z^{\nu}$-action, {\sl Comm. Math. Phys.}
{\bf 48}, 85--88.
%\bibitem{cr1} Crooks, G.E. (1998)
%Nonequilibrium measurements of free energy differences for microscopically
%reversible Markovian systems,
%{\sl J. Stat. Phys.} {\bf 90}, 1481.
\bibitem{cr2} Crooks, G.E. (1999)
The Gallavotti-Cohen fluctuation theorem and the nonequilibrium
work relation for free energy defferences, {\sl preprint},
cond-mat/9901352 v2.
\bibitem{EFS} van Enter, A.C.D., Fern{\' a}ndez, R. and Sokal A.D. (1993) Regularity
properties and pathologies of position-space renormalization
transformations: scope and limitations of Gibbsian theory, {\sl J.
Stat. Phys.} {\bf 72}, 879--1167.
\bibitem{ECM} Evans, D.J., Cohen, E.G.D and Morriss, G.P. (1993)
Probability of
second law violations in steady flows, {\sl Phys. Rev. Lett.} {\bf
71}, 2401--2404.
\bibitem{ELS1} G. Eyink, J.L. Lebowitz, H. Spohn, {\it Microscopic origin
of hydrodynamic behavior: entropy production and the steady state}, in:
`Chaos, Soviet-American Perspectives in Nonlinear Science',
Hg. D.K Campbell, p. 367. American Institute of Physics, 1990
\bibitem{Gaa} Gallavotti, G. (1994)
Perturbation theory, {\sl Mathematical physics towards the 21st century},
eds. R.N. Sen and A Gersten, Ben Gurion University of the Negev Press.
\bibitem{G1} Gallavotti, G. (1996) Chaotic hypothesis:
Onsager reciprocity and fluctuation-dissipation theorem, {\sl J.
Stat. Phys.} {\bf 84}, 899-926.
\bibitem{G} Gallavotti, G. (1996) Extension of Onsager's reciprocity to
large fields and the chaotic hypothesis, {\sl Phys. Rev. Lett.}
{\bf 77}, 4334--4337.
\bibitem{G2} Gallavotti, G. (1998) Chaotic dynamics, fluctuations,
nonequilibrium ensembles, {\sl Chaos} {\bf 8}, 384--392.
\bibitem{gc} Gallavotti, G. and Cohen, E.G.D. (1995) Dynamical
ensembles in nonequilibrium statistical mechanics, {\sl Phys. Rev. Lett.}
{\bf 74}, 2694--2697.
\bibitem{gc1} Gallavotti, G. and Cohen, E.G.D. (1995) Dynamical
ensembles in stationary states, {\sl J. Stat. Phys.} {\bf 80},
931--970.
\bibitem{Geo} Georgii, H.-O. (1988) {\em Gibbs measures and phase transitions}, de Gruyter,
Berlin $\cdot $ New York.
%\bibitem{Gol} Goldstein, S., Kuik, R., Lebowitz, J.L. and Maes, C. (1989)
%From PCA's to Equilibrium Systems and Back, {\sl Comm. Math. Phys.}
%{\bf 125}, 71--79.
%\bibitem{Ha1} Haydn, N. T. (1994)
%Classification of Gibbs states on Smale spaces and one-dimensional lattice
%systems,
%{\sl Nonlinearity} {\bf 7}, 345-366.
%\bibitem{Ha2} Haydn, N. T. (1998)
%Generalised GibbsU states for expanding maps,
%{\sl preprint}.
\bibitem{Har} Haydn, N. T. and Ruelle, D. (1992)
Equivalence of Gibbs and equilibrium states for homeomorphisms
satisfying expansiveness and specification, {\sl Comm. Math. Phys.}
{\bf 148}, 155-167.
%\bibitem{ja1} Jarzynski, C. (1997)
%Nonequilibrium equality for free energy differences, {\sl Phys. Rev. Lett.}
%{\bf 78}, 2690-2693.
%\bibitem{ja2} Jarzynski, C. (1997)
%Equilibrium free-energy from nonequilibrium measurements: a master-equation
%approach, {\sl Phys. Rev. E} {\bf 56}, 5018-5035.
%\bibitem{ja3} Jarzynski, C. (1998)
%Equilibrium free energies from nonequilibrium processes,
%{\sl Act. Phys. Pol. B} {\bf 6}, 1609-1622.
\bibitem{ja4} Jarzynski, C. (1998)
Microscopic analysis of Clausius-Duhem processes, {\sl preprint},
cond-mat/9802249.
\bibitem{JM} Jarzynski, C., Mazonka, O. (1999)
Feynman's ratchet and pawl: an exactly solvable model, {\sl preprint}.
\bibitem{Ja} Jaynes, E.T. (1989) Clearing up Mysteries; the
Original Goal, in: Proceedings of the 8'th International Workshop
in Maximum Entropy and Bayesian Methods, Cambridge, England, August
1--5, 1988; J. Skilling, Editor; Kluwer Academic Publishers,
Dordrecht, Holland. See also in {\em Papers on Probability,
Statistics, and Statistical Physics}, D. Reidel Publishing Co.,
Dordrecht, Holland, R.D. Rosenkrantz, Editor. Reprints of 13
papers. See also http://bayes.wustl.edu/etj/node1.html.
%\bibitem{PY} Jiang, Miaohuang and Pesin, Y.B. (1997) Equilibrium
%Measures for Coupled Map Lattices: Existence, Uniqueness and
%Finite-Dimensional Approximations. Preprint.
\bibitem{Julicher} J\"{u}licher, F., Ajdari, A. and Prost, J. (1997)
Modeling molecular motors,
{\sl Rev. Mod. Phys.} {\bf 69}, No {\bf 4}, 1269-1281.
%\bibitem{KLS} S. Katz, J.L. Lebowitz, H. Spohn, {\it Stationary
%nonequilibrium states for stochastic lattice gas models of ionic
%superconductors}, J. Stat. Phys. {\bf 34}, 497 (1984)
\bibitem{K} Kurchan, J. (1998) Fluctuation theorem for stochastic dynamics,
{\sl J. Phys. A: Math. Gen.} {\bf 31}, 3719--3729.
\bibitem{Ku} K\"unsch, H. (1984) Non reversible stationary measures for infinite
interacting particle systems,
{\sl Z. Wahrsch. Verw. Gebiete} {\bf 66}, 407.
\bibitem{Lan} Lanford III, O.E. (1973) Entropy and equilibrium states in classical
statistical mechanics, in {\sl Statistical Mechanics and
Mathematical Problems (Batelle Seattle Rencontres 1971)}, Lecture
Notes in Physics No. 20 (Springer-Verlag, Berlin), 1--113. Comets,
F. (1986) Grandes d\'eviations pour des champs de Gibbs sur $\Z^d$,
{\sl C.R. Acad. Sci. Paris I} {\bf 303}, 511--513. Olla, S. (1988)
Large deviations for Gibbs random fields, {\sl Prob. Th. Rel.
Fields} {\bf 77}, 343--357.
%\bibitem{LMS}
%Lebowitz, J.L., Maes, C. and Speer, E.R. (1990) Statistical
%mechanics of probabilistic cellular automata, {\sl J. Stat. Phys.}
%{\bf 59}, 117--170.
\bibitem{LS} Lebowitz, J.L. and Spohn, H. (1999) A
Gallavotti-Cohen Type Symmetry in the Large Deviation Functional for Stochastic Dynamics,
to appear in J. Stat. Phys.
\bibitem{Ligg} Liggett, T.M. (1985) {\em Interacting Particle
Systems}, Springer, Berlin, Heidelberg, New York.
\bibitem{Lips} Lipster, R.S. and Shiryayev, A.N. (1977)
{\em Statistics of Random Processes I \& II}, springer-Verlag,
New York, Heidelberg, Berlin.
\bibitem{M} Maes, C. (1999) The Fluctuation Theorem as a Gibbs
Property, to appear in J. Stat. Phys.
\bibitem{Magnasco} Magnasco, M.O. (1994)
Molecular combustion motors,
{\sl Phys. Rev. Lett.} {\bf 72}, No. {\bf 16}, 2656-2659.
%\bibitem{MV}
%Maes, C. and Vande Velde, K. (1997) Relative energies for
%non-Gibbsian states, {\sl Comm. Math. Phys.} {\bf 189}, 277--286.
%\bibitem{mort} Mortensen, R. E. (1969)
%Mathematical problems of modeling stochastic nonlinear dynamic systems,
%{\sl J. Stat. Phys.} {\bf 1}, No. {\bf 2}, 271-296.
%\bibitem{PS} Pesin, Y.B. and Sinai, Y.G. (1991) Space-time chaos
%in chains of weakly interacting hyperbolic mappings, {\sl Adv. Sov.
%Math.} {\bf 3}, 165--198.
\bibitem{Ru1} Ruelle, D. (1996) Positivity of
entropy production in nonequilibrium statistical mechanics,
{\sl J. Stat. Phys.} {\bf 85}, 1--25.
\bibitem{Ru2} Ruelle, D. (1997) Entropy production in nonequilibrium
statistical mechanics, {\sl Comm. Math. Phys.} {\bf 189}, 365--371.
\bibitem{Ru3} Ruelle, D. (1978) Sensitive dependence on initial
conditions and turbulent behavior of dynamical systems, {\sl Annals
of the New York Academy of Sciences} {\bf 356}, 408--416.
\bibitem{Ru4} Ruelle, D. (1998) Smooth dynamics and new
theoretical ideas in nonequilibrium statistical mechanics. Rutgers
University Lecture Notes, October-November 1997-98 (unpublished).
\bibitem{Sim} Simon, B. (1993) {\em The Statistical Mechanics of
Lattice Gases}, Volume 1, Princeton University Press, Princeton.
\bibitem{Si} Sinai, Ya. G. (1968) Markov partitions and C-diffeomorphisms,
{\sl Functional Anal. Appl. } {\bf 2}, 61-82.
\bibitem{Si1} Sinai, Ya. G. (1968) Construction of Markov partitions,
{\sl Functional Anal. Appl. } {\bf 2}, 245-253.
\bibitem{S} H. Spohn, Large Scale Dynamics of Interacting Particles. Springer,
Heidelberg 1991.
\end{thebibliography}
\end{document}