\documentclass[11pt,twoside]{amsart}
\usepackage{amsmath, amssymb,latexsym}
%\usepackage{showkeys}
% SECTIONS
\newcommand\Section[2]{\section{#1}\label{sec:#2}}
\newcommand\Subsection[2]{\subsection{#1}\label{sec:#2}}
\newcommand\Subsubsection[2]{\subsubsection{#1}\label{sec:#2}}
\newcommand\Paragraph[1]{\paragraph{#1.-}}
\newcommand\refsec[1]{\ref{sec:#1}}
% THEOREMS AND SUCH
\newtheorem{Theorem}{Theorem}[section]
\newtheorem{Proposition}[Theorem]{Proposition}
\newtheorem{Lemma}[Theorem]{Lemma}
\newtheorem{Corollary}[Theorem]{Corollary}
\newtheorem{Algorithm}[Theorem]{Algorithm}
\newtheorem{Definition}[Theorem]{Definition}
\newtheorem{preremark}[Theorem]{Remark}
\newenvironment{remark}{\begin{preremark}\rm}{\end{preremark}}
\newtheorem{preexample}[Theorem]{Example}
\newenvironment{example}{\begin{preexample}\rm}{\end{preexample}}
\def\bproof{{\noindent {\it Proof: }}}
\def\Box{$\sqcup\!\!\!\!\sqcap$}
\def\eproof{\hfill \Box \medskip}
% EQUATIONS
\def\beqn{\begin{eqnarray*}}
\def\eeqn{\end{eqnarray*}}
\def\disp{\displaystyle}
%Maps
\def\To{\longrightarrow}
%Symplectic objects
\def\a{{\mbox{\boldmath $\alpha$}}}
\def\o{{\mbox{\boldmath $\omega$}}}
\newcommand\Lie[2]{ {\bf L}_{#1} #2 }
\newcommand\pint[2]{ {\bf i}_{#1} #2 }
\newcommand\PPoi[2]{ {\bf\{} #1,#2 {\bf\}} }
\newcommand\PLie[2]{ \mbox{\bf [} #1,#2 \mbox{\bf ]} }
\newcommand\comb[2]{\left(\begin{array}{c} {#1} \\ {#2}\end{array}\right)}
\def\Id{ {\mbox{\rm Id}}}
\def\diag{ {\mbox{\rm diag}} }
\def\blockdiag {{\mbox{\rm blockdiag}}}
\def\dif{ {\mbox{\rm d}} }
\def\Dif{ {\mbox{\rm D}} }
\def\M{{\mathcal M}}
\def\Tau{{\mathcal T}}
%Initial points
\def\x{{\bf x}}
\def\y{{\bf y}}
\def\t{{\bf\theta}}
% DEFINITIONS AND REDEFINITIONS OF COMMANDS
\def\nr{{\mathbb R}}
\def\nc{{\mathbb C}}
\def\nz{{\mathbb Z}}
\def\nn{{\mathbb N}}
\def\nq{{\mathbb Q}}
\def\nt{{\mathbb T}}
\def\na{{\mathbb A}}
\def\ns{{\mathbb S}}
\def\nca{\tilde \na}
\def\np{{\mathbb P}}
\newcommand{\cov}[1]{\tilde{#1}}
\newcommand{\norm}[1]{|\!|{#1}|\!|}
\newcommand{\nnorm}[1]{|\!|\!|{#1}|\!|\!|}
\def\bx{\bar x}
\def\by{\bar y}
\def\dx{\dot x}
\def\dy{\dot y}
\def\th{\theta}
\def\bth{{\bar\theta}}
\def\ep{\varepsilon}
\def\comp{{\raise 1pt\hbox{\tiny $\circ$}}}
\def\pf{\mbox{\rm pf}}
\def\mod{\!\pmod{1}}
\def\N{{\mathcal N}}
\def\M{{\mathcal M}}
\def\HH{{\mathcal H}}
\def\P{{\mathcal P}}
\def\U{{\mathcal U}}
\def\L{{\mathcal L}}
\def\D{{\mathcal D}}
\def\T{{\mathcal T}}
\def\S{{\mathcal S}}
\def\SSS{{\mathcal S}}
\def\B{{\mathcal B}}
\def\A{{\mathcal A}}
\def\C{{\mathcal C}}
\def\K{{\mathcal K}}
\def\W{{\mathcal W}}
\def\F{{\mathcal F}}
\def\V{{\mathcal V}}
\def\Spec{\mbox{\rm Spec}}
\def\Res{\mbox{\rm Res}}
\def\sym{\mbox{\rm sym}}
\def\antisym{\mbox{\rm antisym}}
\def\Eig{\mbox{\rm Eig}}
\def\st{\ |\ }
\def\re{\mbox{\rm Re}}
\def\im{\mbox{\rm Im}}
\def\ii{{\mbox{\bf i}}}
\def\I{{\mbox{I}}}
\def\dist{\mbox{\rm dist}}
\newcommand{\Sec}[2]{\Gamma^{#1}_{#2}}
\newcommand{\note}[2]{\marginpar{\footnotesize{{#1}: {#2}}}}
\usepackage{rotating}
%\renewcommand\psfig[1]{{}}
\title[Computation of invariant tori and their whiskers (II)]
{A parameterization method for the computation of
invariant tori and their
whiskers in quasi-periodic maps: \\
numerical algorithms.}
\author{A. Haro}
\address{
Departament de Matem\`atica Aplicada i An\`alisi, Facultat de Matem\`atiques,
Universitat de Barcelona.
Gran Via de les Corts Catalanes 585, 08007 Barcelona (Spain).}
\email[A. Haro]{\tt haro@cerber.mat.ub.es}\author{R. de la Llave}
\address{
Dept. of Mathematics,
University of Texas at Austin.
Austin TX 78712 (USA)}
\email[R. de la Llave]{llave@math.utexas.edu}
\setcounter{tocdepth}{3}
\begin{document}
\maketitle
\contentsline {section}{\tocsection {}{1}{Introduction: quasi-periodically forced systems and invariant manifolds.}}{2}
\contentsline {subsection}{\tocsubsection {}{1.1}{The parameterization method}}{5}
\contentsline {subsubsection}{\tocsubsubsection {}{1.1.1}{Quasi-periodic maps}}{5}
\contentsline {subsubsection}{\tocsubsubsection {}{1.1.2}{Equations for invariant tori}}{5}
\contentsline {subsubsection}{\tocsubsubsection {}{1.1.3}{Equations for invariant whiskers}}{6}
\contentsline {subsubsection}{\tocsubsubsection {}{1.1.4}{Periodic tori and subharmonic tori}}{8}
\contentsline {subsubsection}{\tocsubsubsection {}{1.1.5}{The case of differential equations}}{9}
\contentsline {subsection}{\tocsubsection {}{1.2}{Some information on the literature on numerical computation of invariant manifolds}}{10}
\contentsline {section}{\tocsection {}{2}{Numerical algorithms: an overview}}{11}
\contentsline {subsection}{\tocsubsection {}{2.1}{Discretization in terms of Fourier-Taylor series}}{12}
\contentsline {subsection}{\tocsubsection {}{2.2}{Newton method for invariant tori}}{13}
\contentsline {subsection}{\tocsubsection {}{2.3}{Normal dynamics: linear cocycle and transfer operator}}{14}
\contentsline {subsection}{\tocsubsection {}{2.4}{Computation of the stable and unstable subbundles}}{16}
\contentsline {subsection}{\tocsubsection {}{2.5}{Computation of Whiskers}}{19}
\contentsline {section}{\tocsection {}{3}{Computation of invariant tori}}{19}
\contentsline {subsection}{\tocsubsection {}{3.1}{Validation of numerical results}}{19}
\contentsline {subsection}{\tocsubsection {}{3.2}{Projection method}}{28}
\contentsline {subsection}{\tocsubsection {}{3.3}{Reducibility method}}{31}
\contentsline {subsection}{\tocsubsection {}{3.4}{Almost reducibility}}{37}
\contentsline {subsection}{\tocsubsection {}{3.5}{Continuation of invariant tori}}{38}
\contentsline {section}{\tocsection {}{4}{Computation of invariant subbundles}}{39}
\contentsline {subsection}{\tocsubsection {}{4.1}{Computation of spectral subbundles}}{40}
\contentsline {subsection}{\tocsubsection {}{4.2}{Computation of Floquet transformations}}{41}
\contentsline {subsubsection}{\tocsubsubsection {}{4.2.1}{Discretization method}}{42}
\contentsline {subsubsection}{\tocsubsubsection {}{4.2.2}{Rational approximation of the frequencies}}{42}
\contentsline {subsubsection}{\tocsubsubsection {}{4.2.3}{Power method applied to rank-one bundles}}{44}
\contentsline {subsubsection}{\tocsubsubsection {}{4.2.4}{Reducibility method}}{46}
\contentsline {section}{\tocsection {}{5}{Geometric properties and computation}}{47}
\contentsline {section}{\tocsection {}{6}{Computation of the whiskers of an invariant torus}}{49}
\contentsline {subsection}{\tocsubsection {}{6.1}{Computation of stable and unstable and other invariant manifolds}}{49}
\contentsline {subsection}{\tocsubsection {}{6.2}{The case of stable and unstable manifolds}}{52}
\contentsline {subsection}{\tocsubsection {}{6.3}{Invariant manifolds of rank 1 attached to an invariant torus}}{53}
\contentsline {section}{\tocsection {}{}{Acknowledgments}}{55}
\contentsline {section}{\tocsection {}{}{References}}{55}
\begin{abstract}
We develop numerical algorithms
for the computation of invariant manifolds in quasi-periodically
forced systems.
We show how to compute invariant tori
and invariant manifolds associated to them. In particular,
the stable and unstable manifolds of invariant tori,
but also {\sl non-resonant} invariant manifolds
associated to spaces invariant under the linearization.
These non-resonant manifolds include the slow manifolds
which dominate the asymptotic behavior.
The algorithms are based on the parameterization method. Rigorous
results about this method are proved in
\cite{HLlth}. In this paper, we concentrate on
numerical issues of algorithm. Examples of implementations of the
algorithms appear in the companion paper \cite{HLlex}.
\end{abstract}
\section{Introduction: quasi-periodically forced systems
and invariant manifolds.}
\label{Introduction}
In this paper we develop numerical algorithms
for the computation of invariant manifolds
in quasi-periodically forced systems.
The algorithms we present compute
quasiperiodic
{\em invariant tori} as well as {\em whiskers}
around these tori. See respectively,
Section~\ref{Computation of invariant tori}
and Section~\ref{Computation of the whiskers}.
In this paper, invariant tori will always mean
a torus in which the motion is quasi-periodic.
Whiskers of an invariant torus are
invariant manifolds that contain the torus and
characterized by the fact that they are tangent to
a chosen invariant space of the linearization
around the torus.
It is obvious that the tangent spaces of invariant manifolds
are invariant under the linearization.
There is kind of converse to this observation.
It has been show that provided that the rates of
expansion of the linearization on an invariant space
satisfy some non-resonance condition, then there is a
smooth invariant manifold.
We note that the usual (un)stable and strong (un)stable manifolds
satisfy automatically these non-resonance conditions. Hence, in
particular, we develop algorithms to compute stable and unstable
manifolds. We can, however compute other invariant manifolds which include
sometimes the {\sl slow} invariant manifolds which are associated
to the less contractive (or less expansive) space and, hence,
control the approach to this stationary state.
In applications, one often has to consider
a system subject to
quasi-periodic perturbations. In this case, hyperbolic periodic orbits
(in particular stable steady states)
of the original system, persist as quasi-periodic
orbits in the perturbed system.
Moreover, the stable and unstable
manifolds of the periodic orbits persist as stable and unstable
manifolds of the quasi-periodic orbit.
The persistence of these manifolds can be proved using the theory
of normally hyperbolic manifolds. Classic works in this area
are \cite{HirschP68,Fenichel71,Fenichel74,Fenichel77,HirschPS77}.
We remark, however, that taking into account that the dynamics
on the torus is a rotation, one can improve significantly the results
of the general theory. In particular, one can improve the regularity
and show that, when the map is analytic, the varieties remain
analytic. Normally hyperbolic rotational tori are shown
in \cite{HLlth} to
depend analytic dependence on
parameters. As it is well known, these
results are false for more general normally hyperbolic
manifolds, for which the bundles are only
finitely differentiable and there is only
finite differentiability with respect to parameters.
Besides the usual stable and unstable manifolds, one is often interested
in other invariant manifolds. For example, when we consider an
stable steady state, the manifolds that govern the asymptotic
behavior of the convergence to the
stable steady state are the manifolds associated to the least stable
eigenvalue. Even if these manifolds
associated to an invariant space are not always smooth,
provided that the eigenvalues satisfy certain non-resonance
assumptions, it has been shown in
\cite{Llave97, ElBialy01, CabreFL03a, CabreFL03b, Llave03}
that one can associate smooth invariant slow manifolds to
this invariant spaces.
When a system is perturbed quasi-periodically, the stable steady
state persists and so do the non-resonant invariant manifolds.
The persistence of these manifolds under quasi-periodic perturbations
is studied rigorously in \cite{HLlth}.
The method we use is the parameterization method
-- we will describe it in more detail in Section~\ref{sec:parameterization} --
which seeks to study functional equations of a parameterization
of the invariant manifold. As we will see later,
the parameterization method has important numerical advantages since it allows
to work with functions of an small number of variables
(we only need to deal with functions of $d$ variables to compute
objects of dimension $d$). As it is remarked in
\cite{HLlth}, the functional equations to be solved have the advantage
that they are differentiable in the unknowns. Hence, one can use
methods based on the derivatives and use the differentiability in
estimates etc. This allows to derive efficient numerical
algorithms and to asses the reliability.
A theory of these invariant manifolds
for quasi-periodic systems (in particular,
quasi-periodic perturbation) is developed in
\cite{HLlth}. The theory of \cite{HLlth}, takes advantage of
the fact that the system restricted to the torus is a rotation and
obtains regularity results that are false for general
normally hyperbolic manifolds. The theory in \cite{HLlth}
is developed very closely to the methods whose implementation
we describe here. We will refer to \cite{HLlth} for
mathematical details. We will not repeat the mathematical
details since the goal of
this paper is numerical algorithms.
On the other hand, we will briefly summarize in
the following Section~\ref{sec:parameterization}
the equations we will consider
and summarize their geometric meaning.
One of the consequences of the theory in \cite{HLlth} is that
the algorithms satisfy \emph{a posteriori bounds}. That is,
if we compute an object that satisfy a functional equation
up to an small accuracy and can show that it satisfy some
non-degeneracy assumptions, we can conclude that there exists
an exact solution and that the distance from the
exact solution to the computed one is bounded by the error in the
solution of the functional equation.
This paper is organized as follows:
In Section~\ref{sec:parameterization}
we will present some generalities on the
parameterization method. In particular, we present the
functional equations we will be studying.
In Section~\ref{numtori} we will present some generalities on
the numerical algorithms and some general purpose
numerical algorithms for nonlinear problems
(e.g. the Newton method) applied to our case. We will also
discuss
the choices of discretizations
consider.
More sophisticated algorithms that take advantage of
the structure of the problem, its geometry and
special features such as reducibility
are discussed in Section~\ref{Computation of invariant tori}
and Section~\ref{Computation of the whiskers}. These sections
also include discussions of computation of other invariant
objects such as invariant subbundles.
The details of concrete implementations of these
algorithms as well as representative results
are discussed in more detail in \cite{HLlex}.
\begin{remark} \label{ellipticcomputation}
Even if the algorithms we present
are justified here by the theory of
normally hyperbolic manifolds, it is interesting to
note that the methods we present, notably, the
method of reducibility in Section~\ref{Reducibility_method}
work empirically when the
manifolds present some elliptic component.
The theory of persistence of manifolds with normally
elliptic components is less developed than the theory of
normally hyperbolic manifolds, since it is known since
\cite{Mane78} that the only manifolds which are
topologically robust under all $C^1$ perturbations
are normally hyperbolic. On the
other hand, there are many theories of persistence of
normally elliptic manifolds. For example, in the case
that the systems have some Hamiltonian structure and that there
are extra parameters, one can prove persistence for large sets
of parameters, \cite{Eliasson88a,Poschel89,JorbaS96}.
Particularly interesting for applications to numerical
computation is the theorem
in \cite{JorbaLZ99} which shows that if there is a family
which solves approximately the invariance and reducibility
equation and which is not degenerate, then, excluding a
set of small measure, we can find invariant tori
which are also reducible which are very close to the numerically
computed ones.
The estimates presented in \cite{JorbaLZ99} show
that the measure of parameters that needs to be excluded
is extremely small.
{From} the numerical point of view, this means that if
we run the algorithms presented here in a system
with normally elliptic
\note{A}{He cambiado hyperbolic por elliptic}
components, provided that the
non-degeneracy conditions in \cite{JorbaLZ99}
are satisfied, then, there will be an true invariant
torus if we choose the parameter in a set of a very
large measure in a small ball.
In the paper \cite{HLlex} we will present some examples of
this calculations. In particular we will present examples of
computations of bifurcations from a normally elliptic
torus to a normally hyperbolic one.
(The phenomenon of bifurcation of tori has
been studied rigorously in \cite{ChencinerI79a,ChencinerI79b,Sell79, BroerHTB90}).
\end{remark}
\subsection{The parameterization method}
\label{sec:parameterization}
In this section, we formulate the
functional equations that we will be considering
in the rest of the paper.
For simplicity, we will start discussing maps.
In Section \ref{paramflows}, we will discuss
the easy modifications needed to discuss
flows. We note that the mathematical results for
flows can be deduced from those for maps
(by taking the time one maps). See \cite{HLlth}.
On the other hand, in practical
applications, it may be advantageous to apply methods that are
specifically designed for flows. A similar situation happens
in the simpler case of periodic orbits \cite{Capella}.
\subsubsection{Quasi-periodic maps}
A {\em rotating map} with frequency vector $\omega\in\nr^d$ is a
diffeomorphism $\bar F:\nr^n\times\nt^d\to\nr^n\times\nt^d$
given by the skew product
\begin{equation}
\label{qpsystem}
\begin{array}{lcl}
\left(\begin{array}{c} x \\ \th \end{array}\right)
&\To&
\left(\begin{array}{c} F(x,\th) \\ \th + \omega
\end{array}\right).
\end{array}
\end{equation}
One should think of $\th$ as an internal ($d$-dimensional)
phase which keeps evolving at a linear speed. The internal
phase affects the motion of the variables $x$ but it is
not affected by them.
\begin{remark}
We will almost always assume that
$\omega\in\nr^d$ is non resonant
(that is, for all $k\in\nz^d\setminus\{0\}$
we have $ k\cdot \omega \notin \nz$), which means that the dynamics on the
base torus $\nt^d$ is ergodic.
If $\omega \cdot k \in \nz$ for some $k \in \nz^d - \{0\}$,
the torus $\nt^d$ is
foliated by a one-parameter family of $(d-1)$-dimensional tori.
The motion on each of these $(d-1)$-dimensional tori
has the same frequency $\hat \omega\in\nr^{d-1}$.
By repeating the process, if necessary, we
arrive to the study of $r$-parameter families of $d-r$-dimensional tori
on which the motion is non-resonant.
Hence, the results for resonant
$\omega$ reduce to results for families of non-resonant
tori.
\end{remark}
\subsubsection{Equations for invariant tori}
In this paper we will study invariant tori
by looking for parameterizations
in which the the motion is given by a
a rotation with the frequency of the external perturbation.
That is, we seek maps $K: \nt^d \to \nr^n $
in such a way that
\begin{equation}
\label{ieit}
F(K(\th),\th)= K(\th+\omega)\ .
\end{equation}
If we consider the graph of $K$
\[
\K= \{ (K(\th),\th) \st \th\in \nt^d\} \ .
\]
We observe that \eqref{ieit}
is equivalent to saying that $\K$ is invariant
under the skew-product \eqref{qpsystem}. Conversely, in
\cite{HLlth} it is shown that
a wide class of tori can be written in this way.
Moreover, it is also shown in \cite{HLlth}
that, assuming that $\omega$ is non-resonant,
the set $\K$ -- a graph over $\th$,
determines the map $K$ uniquely.
\subsubsection{Equations for invariant whiskers}
To study whiskers of a torus, we seek
maps
$W: \nr^m \times \nt^d \rightarrow \nr^n$
and
$\Lambda: \nr^m \times \nt^d \rightarrow \nr^m$
in such a way that
\begin{eqnarray}
\label{whiskerparam}
F( W(\eta, \th), \th) &=& W( \Lambda(\eta,\th), \th + \omega) , \\
\label{Lambdaeq}
\Lambda(0, \th) &=& 0 \ .
\end{eqnarray}
We note that the equation \eqref{whiskerparam}
again implies that the graph of $W$
\[
\W = \{ (W(\eta, \th), \th) \st \eta\in \nr^m\ ,\th \in \nt^d\}
\]
is invariant under the
map \eqref{qpsystem}. The equation \eqref{Lambdaeq} implies that
\[
\K = \{ (W(0, \th), \th) \st \th \in \nt^d \}
\]
is invariant.
Indeed, using $\eqref{Lambdaeq}$, we
see that setting $K(\th) = W(0, \th) $ we obtain a $K$
which satisfies the invariance equation
\eqref{ieit}. Therefore, the manifold $\W$ contains the torus
$\K$.
\begin{remark}
In the following,
even if the tori and its whisker are the
range of $K$, $W$ respectively, we will often abuse the
language and refer to
the language and refer to
$K$ and $W$ as a torus and a whisker.
\end{remark}
\begin{remark}\label{nonuniqueness}
We note that, in contrast with \eqref{ieit}, which have a
unique solution, the equations \eqref{whiskerparam},
\eqref{Lambdaeq} have always several solutions.
If $W, \Lambda$ are a solution, for any $ \mu \in \nr - \{0\}$,
so are
$\tilde W, \tilde \Lambda$ defined
by
\begin{equation}
\label{scaling}
\begin{split}
\tilde W(\eta, \th) & = W(\mu\eta, \th) \\
\tilde \Lambda(\eta, \th) & = \mu^{-1} \Lambda(\mu \eta, \th)
\end{split}
\end{equation}
The change of variables in \eqref{scaling} corresponds
to changing the units in the variables
along the directions transversal to the torus.
This non-uniqueness, affects the algorithms we can use
to solve the equations (e.g. a Newton method or
a contraction method, which imply uniqueness cannot
be applied without modifications). Also,
once we decide on a numerical algorithm, a proper choice of
the scale helps with the numerical properties.
For the purposes of the algorithmic implementation, we
just note that the linearizations of the whisker
$W_1(\th)= \Dif_\eta W(0,\th)$ and $\tilde W_1(\th)= \Dif_\eta W(0,\th)$
satisfy $\tilde W_1(\th) = \mu W_1(\th)$. Hence
the choice of the different parameterizations is related to
the choice of solutions for $W_1$. As we will see,
$W_1$ will be required to satisfy an equation
\eqref{linearization} which determines it up to a multiple.
\end{remark}
In the case that $m$, the dimension of
the whiskers, is $1$, that $\omega$, the frequency of the external
perturbation, is Diophantine
and that there are appropriate non-resonance conditions
-- satisfied e.g. in the case that the one-dimensional manifold
is the strong stable manifold --,
the equation \eqref{whiskerparam}
can be taken just
\begin{equation} \label{whiskerparam1d}
F( W(s, \th), \th) = W(\lambda s, \th + \omega) \ ,
\end{equation}
where $s= \eta$ is a one-dimensional parameter.
See Section~\ref{Computation of the whiskers} for
details of the computation in this case.
In the case that the $|\lambda| < 1$ in \eqref{whiskerparam1d},
we see that, since
\[
F^k( W(s, \th), \th) =
W(\lambda^k s, \th + k \omega)\ ,
\]
the iterates of a point in $\W$ converge to a point in $\K$.
That is, the manifold $\W$ is a submanifold of the
stable manifold.
In general, to obtain submanifolds of the stable manifold,
it suffices that
\begin{equation} \label{contraction}
|\Lambda(\eta,\th) | \leq \lambda |\eta|, \qquad
\lambda < 1
\end{equation}
which does not assume that the
contraction rate is uniform. Even if the theory allows
more general conditions,
for practical applications, \eqref{contraction} will be
enough. Note that if we consider iterates of the original
map, and \eqref{contraction} holds for them, then applying the
algorithms to the iterates, the theory in
\cite{HLlth} shows that we obtain manifolds invariant for
the original map.
We note that $\Lambda$ -- or $\lambda$ in the one-dimensional
case -- is part of the unknowns that need to be computed.
It corresponds to finding
\note{A}{Gramatica: es ``to finding'' o ``to find''}
a representation of
the dynamics on the stable manifold. For example, the
$\lambda$ is the Lyapunov multiplier on a small neighborhood of
the invariant manifold.
In fact, by taking derivatives in \eqref{whiskerparam} with respect to
$\eta$, and evaluating them in $\eta= 0$, we obtain the equation
for the linearization of the whisker:
\begin{equation}
\label{linearization}
\Dif_x F( W(0,\th), \th) \Dif_\eta W(\th, 0) =
\Dif_\eta W(0, \th + \omega) \Dif_\eta\Lambda(0, \th) \ .
\end{equation}
Similarly, if we take derivatives with respect to $s$ on
\eqref{whiskerparam1d} and evaluate them at $s = 0$, we
obtain:
\begin{equation}
\label{linearization1d}
\Dif_x F( W(0, \th), \th) \frac{\partial W}{\partial s}(0, \th) =
\frac{\partial W}{\partial s}(0, \th + \omega) \lambda \ .
\end{equation}
Hence, we obtain that the vector space based at $K(\th)= W(\th, 0)$
spanned by $W_1(\th)= \Dif_\eta W(\th, 0)$
is mapped into the corresponding vector
space based at
$K(\th + \omega)= W(0, \th + \omega)$ spanned by
$W_1(\th+\omega)= \Dif_\eta W(0, \th+\omega)$
through the linear map
$\Lambda_1(\th)= \Dif_\eta\Lambda(0, \th)$. In the one-dimensional case,
we obtain also that the expansion rate is constant.
There may be, of course, several
bundles invariant under the linearization.
For each of these invariant bundles, we can
try to find an invariant manifold.
Once we choose the invariant bundle, we determine
$W_1(\th)$ and $\Lambda_1(\th)$.
As we will see in Section~\ref{Computation of the whiskers} (See
also Section 4 in \cite{HLlth}) under non-resonance
conditions in the spectrum, once we have made the choice
of the invariant bundle, we can determine the Taylor
expansion in $\eta$ of $W$ and of $\Lambda$.
In Section~\ref{Computation of the whiskers}, we
will discuss the numerical issues arising in these computations.
In particular, we will discuss algorithms to compute
invariant subbundles in Section~\ref{invbundles}.
We note that the invariant subbundles may
be topologically non-trivial. Examples of invariant
bundles which are not trivial happen naturally in the
study of resonant tori and their bifurcations.
\begin{remark}
The parameterization method is very well adapted to numerical
computations. Note that the functional equations that we
study involve only functions of the minimum number of
variables possible. That is, to study invariant objects
of dimension $d$ we only need to work with functions
of $d$ variables, irrespective of what is the dimension of
the ambient space or the dimension of the stable and unstable
manifolds.
Also, the parametric representation is a very efficient
way to describe
objects such as stable and unstable manifolds, which turn around
very much. Descriptions as graphs of functions run into trouble
when the functions turn around.
Given the functional analysis theory developed
in \cite{HLlth}, the method obtains directly estimates on the
derivatives of the manifold.
We also note that the functional analysis framework discussed in
\cite{HLlth} shows that the operators considered are
continuous or differentiable in the unknowns. This leads to
a systematic application of the implicit function theorem to
obtain regularity with respect to parameters. The good analytical
properties of the operators considered are presumably related
to the numerical stability properties of the algorithms
discussed more in detail in \cite{HLlex}.
We also note that we can compute not just the usual stable
and unstable manifolds of the invariant objects but also
some non-resonant invariant manifolds, which in some cases
correspond to slow invariant manifolds. The relevance of
these manifolds is that, since they correspond to the
stable eigenvalues closest to the unit circle, they
are the leading asymptotic term of the approximation of orbits
to the invariant torus.
\end{remark}
\subsubsection{Periodic tori and subharmonic tori}
\label{periodictori}
The previous theory adapts rather easily to
the discussion of periodic tori,
that is tori that get mapped to themselves after
$N$ applications of $\eqref{qpsystem}$.
It suffices to find $N$ functions
$K_i: \nt^d \to \nr^n $, $i = 1, \ldots, N$,
in such a way that
\begin{equation}
\label{ieitN}
F(K_i(\th),\th)= K_{i+1 ( \text{mod} N)}(\th+\omega)\ .
\end{equation}
Of course, the $K_i$ satisfy the invariance equation for
the $N$ power of the map and conversely, the fixed
points of the $N$ power correspond to an $N$
periodic orbit.
However, we note that sometimes it is numerically more
stable to introduce more variables and study
\eqref{ieitN} even if it requires more
storage. The discussion of trade offs in implementations
are discussed in \cite{HLlex}.
Similar considerations can be made for
the study of whiskers around periodic tori.
In the numerical applications in \cite{HLlex},
we will present calculations of tori and their whiskers
of period $2$, and higher.
The equations \eqref{ieit} and
\eqref{whiskerparam}
are easily adapted to find subharmonic tori
and their whiskers.
Subharmonic tori are invariant
tori whose basic frequencies are submultiples of those of the perturbation.
For the sake of simplicity, assume that the vector of frequencies of a subharmonic torus is $\tilde \omega= \omega/N$, where $N\in\nn$ (if the submultiples are different, say $N_1,\dots,N_d$, we can take $N$ as their minimum common multiple).
Let $\tilde K:\nt^d\to \nr^n$ the parameterization of the torus, in such a way that
\[
\tilde K= \{ (\tilde K(\varphi),N\varphi) \st \varphi\in \nt^d \}
\]
is invariant under \eqref{qpsystem}. Then, $\tilde K$ satisfies the invariance
equation
\begin{equation}
\label{liftieit}
F(\tilde K(\varphi),N\varphi) = \tilde K(\varphi + \omega/N)\ .
\end{equation}
>From the geometrical point of view, this procedure is equivalent to taking
$N$-coverings of the torus. The map $N\cdot:\nt^d\to \nt^d$ given by
$\varphi\to \th= N\varphi$
defines obviously such an $N$-covering, and the lifting of \eqref{qpsystem} is
\begin{equation}
\label{liftqpsystem}
\begin{array}{lcl}
\left(\begin{array}{c} x \\ \varphi \end{array}\right)
&\To&
\left(\begin{array}{c} \tilde F(x,\varphi) \\ \varphi + \tilde \omega
\end{array}\right)
\end{array} \ ,
\end{equation}
where $\tilde F(x,\varphi)= F(x,N\varphi)$ and $\tilde \omega= \omega/N$.
Notice that \eqref{liftieit} is the equation \eqref{ieit} for the system
\eqref{liftqpsystem}. Similar considerations
apply for the whiskers
of these subharmonic tori.
Obviously, a torus (and a whisker) that is invariant for \eqref{qpsystem}
can be lifted as an invariant torus (and a whisker) of the lifting. This
observation
plays an important role in numerical computations (see \cite{HLlex}),
because, taking double coverings
(i.e., $N= 2$), we can make $1$ dimensional bundles orientable
and subharmonic whiskers appear naturally in resonances.
\subsubsection{The case of differential equations}
\label{paramflows}
A quasi-periodic differential equation is
an equation of the form
\begin{equation}\label{qpode}
\dot x = X(x, \omega t)
\end{equation}
where $X: \nr^n \times \nt^d \rightarrow \nr^n$.
The parameterization of an invariant circle
for a quasi-periodic vector field as in
\eqref{qpode} is given by a map $K: \nt^d \rightarrow \nr^n$
which satisfies the equation
\begin{equation}
\label{paramtorusode}
\Dif_\th K(\th) \omega= X( K(\th), \th) \ .
\end{equation}
The equation \eqref{paramtorusode} indicates that the
direction of the flow is in the tangent to the image and
that, moreover, in the parameterization of
the torus considered, the flow consists in the motion of
a rotation in the parameterization by a rotation
of frequency $\omega$.
Similarly, to find a whisker for an invariant torus
we seek a map
$W:\nr^m\times \nt^d \rightarrow \nr^n$
and a quasi-periodic vector field given by
$\Lambda: \nr^m \times \nt^d
\rightarrow \nr^m$
in such a way that:
\begin{eqnarray}\label{paramwhiskerode}
\Dif_\eta W(\eta, \th)\Lambda(\eta, \th) + \Dif_\th W(\eta, \th) \omega &=&
X( W(\eta, \th), \th) \ ,
\\
\label{whiskerparamode}
\Lambda(0, \th) &=& 0 \ .
\end{eqnarray}
The map $W$ is the parameterization of
the torus and $\Lambda$ is a representation of
the dynamics along the stable direction.
The equation \eqref{paramwhiskerode}
implies that the graph of $W$ is invariant
(The vector field is in the tangent space of the graph of $W$).
The dynamics in the parameterization given by $W$ is
given by the rotation in the $\th$ variables and
by $\Lambda$ in the normal directions.
To make sure that the whiskers we consider are submanifolds of
the stable manifold (possibly, the whole stable manifold),
we impose $\Lambda(\eta, \th) \le - \lambda \eta$ for
some $\lambda > 0$.
The functional equations \eqref{qpode} and
\eqref{paramwhiskerode}, \eqref{whiskerparamode},
from the numerical point of view are very similar to
the corresponding equations for
quasi-periodic maps. The methods that we will develop
for maps have analogues for the case of differential
equations. One can study them either by an standard
Newton method or by numerical algorithms that take
advantage of the geometric features.
We will develop the easy changes in remarks.
Superficially, the equations \eqref{qpode}
\eqref{paramwhiskerode}, \eqref{whiskerparamode}
look harder to study rigorously since they involve
unbounded operators such as the derivative. This does not affect the
numerical implementation of the algorithms we describe since the
inverses of unbounded operators --- that we consider in the
Newton method --- are very reasonable. The theory of
the equations above is developed in \cite{HLlth}.
\subsection{Some information on the literature on
numerical computation of invariant manifolds}
Let us review some papers about computation of invariant objects, and see
where our methods fit in.
\cite{BroerOV97} uses the graph transform operator
\cite{HirschPS77}
to develop an algorithm to compute normally hyperbolic
invariant manifolds, and its stable and unstable manifolds.
The invariant manifold is
approximated by means of a finite simplicial complex.
The method produces linear approximations of the stable and
unstable manifolds, and their intersection provide an approximation
of the invariant manifold. So, the invariant manifold is not computed
directly but as a byproduct of
computing its stable and unstable manifolds, which are
higher dimensional objects.
Notice also that the globalization of the stable an unstable
manifolds is computationally hard, since only linear approximations are
provided. \cite{OsingaK98,OsingaF00} apply the method to compute invariant
tori in quasi-periodic systems, the ones considered in the present paper.
There are also algorithms based in set oriented numerical methods
(see \cite{DellnitzFG01} and the references therein).
These method have been applied to the numerical approximation of invariant sets
(not only invariant manifolds, but also global attractors and chain recurrent
sets)as well
as other natural objects such as
invariant measures and almost invariant sets. The basic concept behind
these methods is a subdivision algorithm that covers the invariant object by
boxes and then the dynamical behavior on the set is approximated by a Markov
chain based on transition probabilities between elements of this covering.
Notice that the previous methods can be applied to rather general cases.
We will used a method specially fitted to the objects we are computing:
invariant tori and their whiskers in quasi-periodic systems. We parameterize
those objects with a set of coordinates that not only fits the shape of the
manifold but also its dynamics. There are two steps: computation of the
invariant torus and computation of their whiskers. The approach we
present is based on studying \eqref{ieit} and \eqref{whiskerparam}
respectively.
The only discretization we are going to
considered in the applications is to expand the
functions considered in
Fourier-Taylor series and to develop a toolkit to implement
functionals such as those entering in \eqref{ieit} and \eqref{whiskerparam}
and in the algorithms for their solution. See \cite{Simo89} for an
overview of methods based on the manipulation of Fourier-Taylor series.
Some of the considerations on the algorithms apply equally well to
other possible discretizations, but our discussions on
whether matrices are full or sparse, storage
requirements, running times etc. depend on the discretization
used.
The straightforward direct application of the Newton method
to the functional equations \eqref{ieit} involves the solution
of a linear equation whose dimension is the number of Fourier modes that
we are considering \cite{CastellaJ00}.
Since in applications this could be hundreds, thousands
of millions, full matrices of this
size are large matrices.
We will see that, by taking advantage of the
geometry of the problem, we can perform algorithms that do not require
to invert such large matrix.
\note{A}{Este parrafo es nuevo}
The parameterization method for computation of invariant tori in differential
equations has precedents in \cite{DieciLR91}, where two approaches are
proposed:
\begin{itemize}
\item solution of a system of functional equations, that for
the systems considered here corresponds to solve \eqref{ieit}
once one uses the Poincar\'e return map;
\item solution of a systems of first-order partial differential equations,
that correspond to solve \eqref{paramtorusode}.
\end{itemize}
The numerical solutions in \cite{DieciLR91} are constructed using
difference methods.
Fourier methods for computation of invariant tori for
differential equations based on both approaches have been introduced in
\cite{MingyouKM97} (see also \cite{Trummer00}), where the study of
spectral and pseudospectral methods is done in detail under dissipativity
conditions. Numerical approximations of invariant KAM tori in periodically
forced Hamiltonian systems based on Fourier methods
appear in \cite{Warnock91}.
The methods we develop for the computation of invariant tori are based in
the abstract constructs of the theory of normally hyperbolic invariant tori,
for the case of normally hyperbolic tori, and in the KAM theory, for
reducible and almost reducible tori. By following the
geometric constructions closely and taking advantage of
the structure of the problem, we will be able
to avoid having to deal with large matrices. Details of
the implementation are in \cite{HLlex}.
\section{Numerical algorithms: an overview}
\label{numtori}
The bulk of this paper is devoted to
numerical methods for solving the equations \eqref{ieit}
%\begin{equation}
%\label{ieit2}
%F(K(\th),\th)= K(\th+\omega)\ .
%\end{equation}
for invariant tori,
as well as \eqref{whiskerparam}, \eqref{Lambdaeq}
for their whiskers.
%\begin{eqnarray}
%\label{wiskerparam2}
%F( W(\eta, \th), \th) &= &W(\Lambda(\eta, \th), \th + \omega) \ ,\\
%\label{Lambdaeq2}
%\Lambda(0, \th) &=& 0 \ .
%\end{eqnarray}
We will also study their analogues for differential equations
\eqref{paramtorusode}, \eqref{paramwhiskerode}
for differential equations.
We will describe algorithms that allow us
to, given a $F$,
\note{A}{Gramatica: ``a $F$'' o ``an $F$''}
produce a $K$ or a $W$ that
satisfies \eqref{ieit} or \eqref{whiskerparam} up
to an small error measured in a $C^r$ norm.
More precisely, we will present algorithms
that, given a moderately approximate solution,
produce a more approximate one.
By iterating these steps, one can
obtain extremely accurate solutions.
These refinements can be the basis
of a continuation method \cite{Homotopyreference};
in the problem of quasi-periodic perturbations, it is
natural to start from the uncoupled system and use as
a parameter the strength of the perturbation.
Another practical application is to
start the refinement from an approximate solution obtained
from asymptotic expansions or other non-rigorous,
but reasonable method.
Moreover, we will also describe algorithms that allow
us to assert that the approximate solutions constructed
satisfy hyperbolicity properties. In such a case,
the theory developed in
\cite{HLlth} establishes that there is
a true solution which is close to the
computed one. The theory
of \cite{HLlth} shows, moreover that the distance between the
computed solution and the true one,
is bounded by the error in solving the appropriate equations
(times a factor that depends on the hyperbolicity properties
and the map).
This error as well as the hyperbolicity properties,
are accessible for the computation.
We also note that, even if rigorous results
are not completely developed, it is empirically
found that the methods we present here work also
in the presence of some elliptic components. See
\cite{HLlex} and Remark~\ref{ellipticcomputation}.
We hope that this is an stimulus for the
development of rigorous results in this direction.
The numerical study of a functional equation, often involves
two different steps. The first step is the choice of
a discretization -- a set of functions specified by
a finite number of parameters. The second step is
to choose algorithms to
solve the resulting finite dimensional problem obtained
when truncating the problem. Finally, if one
wants to validate the results as indicated above, one
has also to choose spaces of functions in which to
apply the rigorous results of validation.
\subsection{Discretization in terms of Fourier-Taylor series}
In this paper and its companion \cite{HLlex}, all the methods will be based on
discretizing the
functions in terms of Fourier series for the invariant tori
\eqref{ieit}. We will use the notation
\[
f(\th) = \sum_{k\in\nz^d} f_k e^{2\pi \ii k \th}\ ,
\]
where $f$ is any function on the torus (including vector valued functions,
matrix valued functions, etc.), keeping in mind that
only a finite number of modes $f_k$ can be stored in the computer.
In the implementations in \cite{HLlex}, we have used
real Fourier series (expansions in $\cos$ and $\sin$).
This choice is very well adapted to the problem at hand, since
the quasi-periodic dynamics makes the objects we consider
very homogeneous. Hence, there is little advantage in
choosing methods such as finite elements or splines that can
be refined to deal with local features.
Fourier series are also well adapted to deal with
smooth functions and to obtain information on derivatives of
high order, a property which is useful in studies of
bifurcation problems.
Another important advantage of the basis of
Fourier series is that translations by a rotation are
diagonal in the space of Fourier series. As we will see, this
allows to implement algorithms
that take advantage of the special
structure of the equations, notably reducibility.
The resulting algorithms are not only faster, but
also much more accurate and more reliable.
A final advantage is that Fourier series can be manipulated very
systematically by implementing a toolkit that implements elementary
operations among them. Such toolkits for the manipulation of
Fourier series have been in use for a long time among
astronomers
\cite{Rom61,RicklefsJB83,Simo96,Simo97,Laskar89,Schmidt89}
and in the {\sl automatic differentiation} community
\cite{AutomaticDifferentiation}. See also the discussion of
polynomial manipulation in \cite{Knuth97}. A survey particularly
indeed, particularly close to our applications, is
\cite{Haro}.
\note{R}{The XXX should be your manuscript. I do not have
the definitive title. The following paragraph is
new.}
\note{A}{OK. A ver que te parece el titulo. Se admiten sugerencias!}
As we will see, the algorithms that
appear in this paper do not require anything
beyond the arithmetic operations from the
toolkit. A delicate step in the algorithms is
the solutions of cohomology equations, but this will require
special methods. As we will see, it can be reduced
to elementary operations and iterative
algorithms in many cases taking advantage of
the geometry of the problem.
In the companion paper \cite{HLlex} we
will discuss in more detail some of the numerical
issues involved in how to implement such a toolkit. Notably,
what are efficient and stable algorithms to multiply these series
or to perform non-linear operations.
We also note that Fourier series are natural series for
spaces of different regularities. Hence, once we decide
to work with Fourier series, there is no need to change
the algorithms to work in spaces of more or less derivatives
(as it would be the case, with splines, which would require
more coefficients and different representations to deal
with spaces of smoother functions).
As we will see in our numerical experiments \cite{HLlex}, using the
specialized
algorithms with the problem at hand, it has been possible
to use $10^5$ coefficients
\note{A}{Es 100000 y no 1000000}
working in a slightly out of date
desktop computer. This provides excellent accuracy both in the
functions and in their derivatives.
In this paper, we will not consider any other choice for
discretization of periodic functions. With the same spirit,
for the discretization of the whiskers \eqref{whiskerparam}, \eqref{Lambdaeq}
we have considered series involving polynomials
whose coefficients are Fourier series. These are Fourier-Taylor
series. Again, we note that the manipulation of these series
has been standard in the numerical literature for
a long time.
\subsection{Newton method for invariant tori}
\label{sec:Newton}
It will be interesting to start
discussing the Newton method.
This is a general purpose algorithm for nonlinear equations
and it is a natural starting point.
The discussion of the Newton method will bring forth
important features such as the role of the
differential of the functional equations considered.
The numerical shortcomings of the Newton method
will be the stimulus for a geometric analysis of
some of the elements of the Newton method.
Taking advantage of this geometric
analysis will lead to more efficient
algorithms.
The Newton method applied to
\eqref{ieit} is based on a repeated application of the following
iterative step:
{From} a guess $K$, whose error
is
\begin{equation}
\label{error}
r(\th)= F(K(\th-\omega),\th-\omega) - K(\th)\ ,
\end{equation}
construct a new approximation $\tilde K= K + h$
by solving
\begin{equation}
\label{largelinear}
\Dif F(K(\th-\omega),\th-\omega) h(\th-\omega)-h(\th)
= -r(\th) \ .
\end{equation}
Where $DF$ denotes the derivative of $F$ with respect to the first
argument.
The equation \eqref{largelinear} is obtained by
substituting $K + h $ in \eqref{ieit}, expanding to
first order in $h$ and using the error \eqref{error}.
(For mathematically rigorous precisions of the meanings in
which the formal first order expansions are indeed so, we
refer to \cite{HLlth}. In this paper, we will concentrate in
numerical analysis issues.)
As we will see
later, solubility of \eqref{largelinear} is closely related to that
of normal hyperbolicity.
In the Fourier representation, the linear equation
\eqref{largelinear} of each step of Newton method
corresponds to an infinite linear system whose unknowns are
the Fourier coefficients of $h$.
If implemented straightforwardly,
the matrix that implements the linear equation is full
and can be very large.
We will refer to this as the ``\emph{large matrix problem}''.
More precisely, the size of
the matrix is $O(N^2)$ where $N$ is the number of
coefficients considered. Of course, the number of
coefficients $N = O(n D^d)$ where $D$ is degree of
the Fourier series considered and $d$ is the dimension of
the torus considered and $n$ is the dimension of
the ambient space. Note that the dependence of
the number of coefficients on the ambient dimension is
only linear, whereas the dependence on the dimension of
the torus is exponential. When the tori are not very
smooth, the degree needed to achieve a prescribed
accuracy grows fast.
The large matrix problem causes several computational difficulties:
it increases the computational time, and the storage requirements.
Related to that, it decreases the accuracy of the calculations.
One of the main goals of this paper is to produce algorithms to
avoid both shortcomings of the large matrix problem.
The basic tool to avoid the large matrix problem will
be to take advantage of the geometric features of the linearized equation.
\subsection{Normal dynamics: linear cocycle and transfer operator}
It this section, we study geometric and
analytic properties
of the operators obtaining linearizing the functional
equation \eqref{ieit}.
A much more mathematical study appears in
\cite{trilogy1,trilogy2} and in \cite{HLlth}. Here, we will
just set up the notation and quote the results which will
be useful for the implementation of numerical algorithms.
Given a torus $K$, the monodromy matrix $M(\th)= \Dif F(K(\th),\th)$
defines a rotating linear map (or cocycle) by
\begin{equation}
\label{cocycle}
\begin{array}{lcl}
\left(\begin{array}{c} v \\ \th \end{array}\right)
&\To&
\left(\begin{array}{c} M(\th) v\\ \th + \omega
\end{array}\right).
\end{array}
\end{equation}
If the torus $K$ is invariant, this linear skew product represents the
linearization of the dynamics around the torus. If we think
of $v$ as an infinitesimal perturbation of the initial condition
$K(\th)$ \eqref{cocycle} describes how the disturbance propagates.
The transfer operator acts
on sections $v:\nt^d\to\nr^n$ of $E= \nr^n\times\nt^d$ by means of
\begin{equation}
\label{transfer}
(\M_\omega v)(\th) = M(\th - \omega) v(\th - \omega) \ .
\end{equation}
\begin{remark}
Since in this paper we are assuming that the bundle $E$ is trivial,
the space of continuous sections of $E$, $\Sec{}{C^0}(E)$, is
identified with the space
of continuous vector valued functions, $C^0(\nt^d,\nr^n)$.
\end{remark}
\begin{remark}
Since we are interested in the analysis of the transfer operator using
functional analysis and plan to use spectral theory, it is
necessary to consider
complex spaces. Hence, we use the complexification
trick and consider the real valued sections which
appear naturally in geometric considerations as particular cases
of complex sections. See \cite{trilogy1,trilogy2}.
\end{remark}
In the following, we summarize some of the properties of
the spectrum and their relation with geometric
features. We refer to \cite{trilogy1,trilogy2} and
\cite{HLlth} for proofs and references to the literature.
\begin{itemize}
\item
An annular gap in the spectrum in the space of continuous sections
of the transfer \eqref{transfer}
associated to the cocycle \eqref{cocycle}, that is assumed to be
$C^r$ smooth, produces an invariant $C^r$ splitting
\begin{equation}\label{splitting}
E_\th = \nr^n \times \{\th\} = E^{<\lambda}_\th \oplus E^{ > \mu}_\th\ .
\end{equation}
Here, $\lambda<\mu$ give the size of the gap in the spectrum, that is,
the set
$\A_{\lambda,\mu}= \{ z \in \nc | \lambda \leq |z| \leq \mu\} $ is contained
in the resolvent of the transfer operator.
That is, the spectral projections (constructed using functional
analysis) correspond to projections over two subbundles invariant
under the linear cocycle. This interplay between the functional
analysis and the geometry is crucial for many results
starting in \cite{Mather68}.
The geometric bundles in \eqref{splitting} are characterized
by
\begin{equation} \label{characterization}
\begin{split}
& v \in E^{<\lambda}_\th \iff
| \! M(\th + (m-1) \omega) \cdots M(\th) v |
\le C \lambda^m | v|\quad \forall m > 0 \\
& v \in E^{> \mu}_\th \iff
| \! M^{-1}(\th + m \omega) \cdots M^{-1}(\th - \omega) v |
\le C \mu^{m} | v| \quad \forall m < 0 \\
\end{split}
\end{equation}
\item
The transfer operators can be considered as operators on
different spaces, depending on the regularity.
A remarkable fact about the transfer operators
based on rotations is that the spectrum does not depend
on such spaces \cite{trilogy2}. (This result is false if
the motion on the base is not a rotation \cite{trilogy1}.)
\item
If $\omega$ is irrational, the spectrum is rotationally
invariant. That is
\begin{equation}
\Spec(\M_\omega) = \bigcup_{i= 1}^k \A_{\lambda_{i}^{-},\lambda_{i}^{+}}\ ,
\end{equation}
where
$\lambda_1^- \leq \lambda_1^+ < \lambda_2^- \leq \lambda_2^+ < \dots <
\lambda_k^- \leq \lambda_k^+$.
Each {\em spectral annulus} $\A_i= \A_{\lambda^-_i,\lambda^+_i}$
has associated an invariant
subbundle $E_i$, characterized by the rates of growth of its orbits:
$E_i = E^{>\lambda^-_i - \varepsilon} \cap E^{<\lambda^+_i + \varepsilon}$
, and
$E= \bigoplus_{i= 1}^k E_i$. Notice that $k\leq n$.
\end{itemize}
As a consequence of the previous observations,
an invariant torus with an irrational rotation
is normally hyperbolic
if and only if the whole unit circle
is in the resolvent set of $\M_\omega$.
Also, checking that an invariant
torus with an irrational rotation $\omega$
is normally hyperbolic
is the same as checking that
$1$ is in the resolvent set of $\M_\omega$.
This is, of course, the same as to the
solubility of
\eqref{largelinear} which appears in the Newton method.
When the torus is not invariant but only approximately
invariant -- the situation that happens in
numerical analysis --
the solubility of \eqref{largelinear}, which allows us
to take a Newton step,
amounts to the
hyperbolicity of the cocycle associated to
the linearization on the torus.
In retrospect,
one of the main motivations of the authors
for developing a general
theory of cocycles
was precisely the
study of numerical algorithms in which one
has to linearize around approximately
invariant tori.
\subsection{Computation of the stable and unstable subbundles}
\label{invbundles}
Now, we describe the computation of the invariant subbundles.
Besides their intrinsic interest, the invariant subbundles
will be crucial in the design of efficient algorithms
for the calculation of invariant tori and will also be
the base of calculation of whiskers.
In this section, we will concentrate in the stable and unstable
subbundles. In Section~\ref{Computation of invariant subbundles},
we will describe algorithms for the computation of
more general subbundles, which will be the basis of the computation
of other manifolds.
For the computer representation of the invariant subbundles,
we will assume that they are trivial. That is
$E^s \simeq \hat E^s = \nr^{n_s}\times \nt^d$ and
$E^u \simeq \hat E^u = \nr^{n_u}\times \nt^d$, where $n_s$ and $n_u$ are
the dimensions of the stable and the unstable subbundles, respectively.
Of course, these are the subbundles that are the easiest to deal with, since
they can be constructed using global frames. An {\em adapted frame}
is a set of sections
$v_1^s,\dots v_{n_s}^s,
v_1^u,\dots v_{n_u}^u: \nt^d\to\nr^n$ such that
\[
E_\th^s=
\mbox{\rm span}\{v_1^s(\th),\dots v_{n_s}^s(\th)\} \ , \
E_\th^u=
\mbox{\rm span}\{v_1^u(\th),\dots v_{n_u}^u(\th)\} \ .
\]
By defining the matrices
\[
V^s(\th)=
\left(v_1^s(\th),\dots v_{n_s}^s(\th)\right) \ , \
V^u(\th)= \left(v_1^u(\th),\dots
v_{n_u}^u(\th)\right) \ .
\]
we produce the matrices
$\Lambda^s(\th)$ and $\Lambda^u(\th)$, of dimension $n_s\times n_s$
and $n_u \times n_u$, respectively. The condition that the
bundle is invariant amounts to:
\begin{equation}\label{invariantbundle}
M(\th) V^s(\th)=
V^s(\th+\omega) \Lambda^s(\th) \ , \
M(\th) V^u(\th)=
V^u(\th+\omega) \Lambda^u(\th) \ .
\end{equation}
The matrix $\Lambda^s(\th)$ represents the dynamics on the bundle
$E^s(\th)$ expressed in the
coordinates we have chosen.
The matrix $\Lambda^u(\th)$ expresses the dynamics
on $E^u(\th)$ in the coordinates chosen.
The transfer operators $\L^s_\omega$ and $\L^u_\omega$,
associated to the vector bundle maps
$\Lambda^s$ and $\Lambda^u$
over the rotation $\omega$, respectively, satisfy
\[
r_s(\L^s_\omega) < 1 \ , r_s((\L^u_\omega)^{-1}) < 1 \ ,
\]
where $r_s$ means the spectral radius of a linear operator.
The theory of transfer operators
shows that it is possible to find an (adapted) Finsler metric
on $\hat E= (\nr^{n_s} \times \nr^{n_u}) \times \nt^d$.
That is a $\th$-depending norm on each fiber of the form
$|\hat v|_\th = |\hat v^s|_\th + |\hat v^u|_\th$ for
$\hat v= (\hat v^s,0)+(0,\hat v^u)
\in (\nr^{n_s}\times\nr^{n_u})\times \{\th\}$.
The dependence on $\th$ of the adapted Finsler metric may be assumed to be
analytic.
As it is standard, a Finsler metric
induces a norm on the space of continuous sections
$\hat v:\nt^d\to\nr^{n_s}\times\nr^{n_u}$:
\begin{equation}
\label{norm}
\norm{\hat v} = \max_{\th\in\nt^d} |\hat v(\th)|_\th.
\end{equation}
The property of adapted Finsler metrics that
is more delicate and more important for us is
that, when we consider the norms of the operators
measured in \eqref{norm}, we have:
\begin{equation}\label{arecontractions}
\begin{split}
& \norm{\L^s_\omega} \leq \lambda \ ,\ \\
& \norm{(\L^u_\omega)^{-1}} \leq \lambda\ ,
\end{split}
\end{equation}
where $\lambda<1$.
Notice we consider $\hat v_\th$
as the representation of a vector $v_\th$ in the adapted frame.
The use of the adapted metric allows to simplify
significantly the expression of the a-posteriori bounds.
For practical implementations, we note that it suffices to
check that, in the usual coordinates \eqref{arecontractions}
is satisfied. If they are not, it suffices in practice to
study a power of the operator -- which is very close to
the theoretical construction of the adapted metric --.
Therefore, the adapted metrics do not need to
be computed explicitly in many cases.
We also note that, when we can apply the
reducibility method, described in Section \ref{Reducibility_method},
one of the byproducts is the calculation of an adapted norm.
See, in particular the discussion in Remark~\ref{validationreducibility}.
In summary, the vector bundle map
$P:(\nr^{n_s}\times\nr^{n_u})\times\nt^d \to \nr^n\times\nt^d$
(over the identity)
defined by
\[
P(\th) = (V^s(\th) , V^u(\th))\,
\]
represent the change of variables giving the adapted frame,
and the vector bundle map
$\Lambda: (\nr^{n_s}\times\nr^{n_u})\times\nt^d \to
(\nr^{n_s}\times\nr^{n_u})\times\nt^d$
(over the rotation $\omega$), defined by
\[
\Lambda(\th)= \blockdiag(\Lambda^s(\th),\Lambda^u(\th))
\]
represent the dynamics on the bundles in the adapted coordinates.
The invariance of the splitting reads
\begin{equation}
\label{inv_frame}
P(\th + \omega)^{-1} \Dif F(K(\th),\th) P(\th) -
\Lambda(\th) = 0 \ .
\end{equation}
Note that the equation \eqref{inv_frame} allows us
to parameterize non-trivial bundles in the
manifold. One example that we will see in
\cite{HLlex} that appears in practice is
when $d=1$ and $n = 2$, The map $P$ can be
considered as a mapping from $\nt^1$ -- the
base space of the invariant torus to $\np^1$,
the space of one-dimensional directions. It is quite
possible that the map $P$ has an index, which makes
the bundle non-trivial. Nevertheless, the
fact that the bundle can be given by
a representation of the form \eqref{inv_frame}
is not as general as the theory in \cite{HLlth}
allows. On the other hand, in order to be able to use
numerical methods based on Fourier series, it seems
that some weak version of trivializability is needed.
\begin{remark}
The assumption of the fact that the bundles are given by
an embedding as in \eqref{inv_frame} is
a strong assumption in general.
However, we note that in the case that the bundles have
rank one (i.e. they have one-dimensional fibers), the bundles can
be made trivial by just taking covers.
In \cite{HLlex}
we show some explicit examples in which this situation happens.
Notice that taking double coverings on the torus is well suited for
Fourier series (we have to double the angles).
When the bundles are higher dimensional there are other
obstructions for trivializations, notably characteristic
classes \cite{MilnorStasheff}.
Since the characteristic classes agree
with integrals of the curvature
\note{A}{Ponia ``agree with as an integrals ...''}
of
a connection
over the torus, they do not vanish by taking covers. Taking covers
will only multiply the class by an integer.
We however note that in \cite{HirschP68} one can find
a construction that shows that that all bundles can be considered
as subbundles of a trivial bundle. The dynamics in the
complementary variables introduced in the construction
can be made very contractive.
There are other possibilities such as using local patches.
In this paper, we have not studied the numerical representation of
non-trivial bundles using local patches.
This problem deserves further investigation
since non-trivial bundles
given by \eqref{inv_frame} appear naturally in applications.
\end{remark}
\begin{remark}\label{rem:reducibility}
Notice that, in the present formulation,
\eqref{invariantbundle} is not an equation,
but just an identity which follows from the choice of
an adapted frame.
In Section~\ref{Projection_method} we will discuss
the computation of adapted frames.
As we will see later, it is possible to choose the
adapted frames in such a way that the
$\Lambda^{s,u}$ are particularly simple.
In particular in Section \ref{Reducibility_method}
we will show that, under some additional
conditions -- which are, however
verified in practice several times --
it is possible to make the matrices $\Lambda^{s,u}$
diagonal and independent of $\th$.
\end{remark}
\subsection{Computation of Whiskers}
Once we have a $K$ satisfying \eqref{ieit} -- or
at least satisfying it very approximately --
and invariant bundles of the linearization around it,
it is natural to wonder about
the existence of invariant manifolds containing the torus
and that are tangent to the invariant bundles.
These manifolds, the whiskers,
are non-linear analogues of the invariant bundles
for the linearization.
Following the philosophy of the parameterization method,
we will study the whiskers by
finding parameterizations for them.
To find a whisker $\W$, we will seek a function
$W: \nr^m \times \nt^d \to \nr^n$
and a function $\Lambda: \nr^m \times \nt^d \to \nr^m$,
such that they satisfy the invariance equation
\begin{equation}
\label{hyp}
F(W(\eta, \th),\th) = W(\Lambda(\eta,\th),\th+\omega),
\end{equation}
as well as
with $\Lambda(0,\th)= 0$ and $W(0, \th)= K(\th)$.
Note that \eqref{hyp} implies that $\W$, the range of $W$ is invariant under
the skew-product. The interpretation of $\Lambda:\nr^m\times \nt^d\to\nr^m$
is the dynamics on the manifold.
We emphasize that the unknowns in \eqref{hyp}
are both $W$ and $\Lambda$. That is, we look for a parameterization
of an invariant manifold $\W$
through $\K$ such that the dynamics is a skew product.
Note that in the representation in \eqref{hyp} we are
assuming that the invariant bundle we are considering
is of the form considered in \eqref{inv_frame},
This assumption does not belong
and the theory of \cite{HLlth} does not require that.
On the other hand, this assumption allows us to
work with globally defined functions parameterizing the bundles
and to use numerical methods based on Fourier series.
\section{Computation of invariant tori}
\label{Computation of invariant tori}
\subsection{Validation of numerical results}
In this section we will establish some rigorous bounds for the convergence of
Newton method.
We reproduce below the standard Newton-Kantorovitch theorem \cite{Kantor2}.
\begin{Theorem}[Newton-Kantorovitch]
\label{NK}
Let $B$ be a Banach space. Let $\norm{\cdot}$ be the $B$-norm.
Let $\F:\Omega\subset B \to B$ a $C^2$ operator, defined in an open
set $\Omega$.
Let $r>0$ such that
$\bar B_r = \bar B(x_0,r) \subset \Omega$ such that:
\begin{itemize}
\item $\Dif \F(x_0)$ has continuous inverse. Denote
$\Gamma_0= \left(\Dif \F(x_0)\right)^{-1}$;
\item $\norm{\Gamma_0 (\F(x_0))}\leq \ep$, where $\ep>0$;
\item $\norm{\Gamma_0 \Dif^2 \F(x)} \leq \beta$ for all $x\in \bar B_r$,
where $\beta>0$.
\end{itemize}
Suppose that
\[
h= \beta\ep \leq \frac{1}{2}\ ,
\]
and define the constants
\begin{equation}
r_0 = \frac{1-\sqrt{1-2h}}{h}\cdot \ep \ , \
\ r_1 = \frac{1+\sqrt{1-2h}}{h}\cdot \ep \ .
\end{equation}
Then, if
\begin{equation}
r_0 \leq r\ ,
\end{equation}
then, it is possible to define recursively
a sequence
\[
x_{n+1} = x_n - (\Dif \F(x_n))^{-1} \F(x_n)\ ,
\]
from an initial approximation $x_0\in B$.
The sequence $x_n$ converges to a point $x^* \in B_r$,
which satisfies
\begin{equation}
\label{equation}
\F(x^*) = 0\ .
\end{equation}
Moreover,
\[
\norm{x^* - x_0} \leq r_0\leq 2\ep\ ,
\]
and the convergence is quadratic:
\begin{eqnarray}
\norm{x^* - x_n} \leq \frac{1}{2^n} (2h)^{2^n} \beta \ ,
\end{eqnarray}
for $n= 0, 1, \dots$
Finally, if
\begin{eqnarray}
h<\frac12 \ , \ r < r_1 \ &\mbox{or}&\ h= \frac12 \ , \ r \leq r_1
\end{eqnarray}
then $x^*$ is the only solution of \eqref{equation} in $\bar B_r$.
\end{Theorem}
The following Theorem~\ref{validation} makes explicit the conditions for the
application
Newton method to the problem of computation
of normally
hyperbolic invariant tori and the
invariant bundles of its linearization.
It is important to remark that the
conditions of Theorem~\ref{validation}
can be computed once we have
some mild information about the system
considered (upper bounds of derivatives in a ball)
and bounds on the size of the right hand side of
the invariance equations evaluated in the approximate objects.
These bounds are easily available from a knowledge of
the approximate solutions. As we will see later, in
practice, we can often obtain solutions which satisfy
the equation with an error -- measured in an analytic norm or
a $C^r$ norm ---
which is a few thousand times the round off error.
It is shown in \cite{HLlth} that the normally hyperbolic tori are
as smooth as the map. Hence, for smooth maps, it is easy to
choose enough Fourier coefficients so that the
truncation error is negligible and, indeed the
main part of the numerical error is just the round--off.
The conditions of differentiability on the map are not
very restrictive and in \cite{HLlex}
we present the results of numerical implementations
for maps which are just Lipschitz. We will also see in \cite{HLlex}
that, for large perturbations,
\note{A}{Habia palabras repetidas.
A ver que tal queda la frase ahora}
it could happen that
there may be tori with very weak
hyperbolicity properties in such a way that, even if
they become smooth, their derivatives are large. These
tori also require a large number of Fourier coefficients.
Hence, once we have a computed solution, we just need to verify
some conditions and, if they are verified, Theorem~\ref{validation}
implies the existence of a true solution which is close -- in an
analytic or a $C^r$ norm --
to the computed solution.
Note that, in particular, Theorem~\ref{validation}
is independent of the method that was used to compute the approximate
solution. The numerical method used may include uncontrolled approximations
which are justified a posteriori by the fact that the error
obtained is small. Of course, the approximate solutions may
be produced by other methods than numerical calculation, for
example, asymptotic expansions. To validate asymptotic
expansions, it is enough to verify that the
asymptotic expansion verifies the equation with
a very small remainder. (In \cite{HLlth}, this
technique is used to show that a formal Taylor expansion of
$K$ is a bona-fide Taylor expansion and that, therefore,
$K$ is differentiable.)
\note{A}{Repasar la referencia de Lanford. Creo que esta}
If one was interested in \emph{``computer assisted proof''}
it would be enough to establish -- by computer, using, e.g.
interval arithmetic and rigorous functional analysis
\cite{Lanford87,Rana87,LlaveR90,KochSW96} -- the hypothesis of
Theorem~\ref{validation}. Of course, in normal
practice, one can obtain reliable assessments of
the error by the standard methods of changing the level
of truncation or the precision of the calculations.
The important point is that Theorem~\ref{validation} reduces the estimates of
the error in the result to the error in the evaluation the functional.
\begin{Theorem}
\label{validation}
Let $F:\nr^n\times\nt^d\to\nr^n$ be a continuous map, $C^2$ with respect to
the $x$ variables, defining a skew-product
\begin{equation}
\label{skewproduct}
\begin{array}{lcl}
\left(\begin{array}{c} x \\ \th \end{array}\right)
&\To&
\left(\begin{array}{c} F(x,\th) \\ \th + \omega
\end{array}\right),
\end{array}
\end{equation}
where $\omega\in\nr^n$.
Suppose that for a continuous torus $K_0:\nt^d\to\nr^n$ there exists
\begin{itemize}
\item[1)] Two continuous matrix-valued maps
$P_1,P_2:\nt^d\to M_n(\nr)$, where $P_1$ represents
a vector bundle map (over the identity) giving
the change of variables to an adapted frame,
and $P_2$ is its approximate inverse;
\item[2)] A continuous block diagonal matrix-valued map
\[
\Lambda_0(\th) = \blockdiag(\Lambda_0^s(\th),\Lambda_0^u(\th))\ ,
\]
where
$\Lambda_0^s:\nt^d\to M_{n_s}(\nr)$ and
$\Lambda_0^u:\nt^d\to M_{n_u}(\nr)$ ($n= n_s + n_u$).
We recall that, as we will see later, the
$\Lambda_0^{s,u}$ are approximations of the linear
contraction on the bundles.
\item[3)] An (adapted) Finsler metric $|\cdot|_\th$, of the form
$|\hat v|_\th= |\hat v^s|_\th + |\hat v^u|_\th$
for $\hat v= (\hat v^s,0) + (0,\hat v^u) \in
\nr^n = \nr^{n_s}\times\nr^{n_u}$, and the induced norm
on continuous sections and vector bundle maps is denoted by $\norm{\cdot}$;
\item[4)] Positive constants $\rho,\sigma,\tau,\lambda, r, b$ with
$\lambda+\sigma+\tau<1$;
\end{itemize}
such that
\begin{itemize}
\item[5.1)]
$R(\th) = P_2(\th) (F(K_0(\th\!-\!\omega),\th\!-\!\omega) -
K_0(\th))$
satisfies $\norm{R}_{C^0}\leq \rho$ (as a section);
\item[5.2)]
$S(\th) =
P_2(\th+\omega) \Dif F(K_0(\th),\th) P_1(\th) - \Lambda_0(\th)$
satisfies
\[
\norm{S}_{C^0}\leq \sigma
\] (as a vector bundle map over
the rotation $\omega$);
\item[5.3)]
$T(\th) = P_2(\th) P_1(\th) - \Id$
satisfies
\[
\norm{T}_{C^0}\leq \tau
\] (as a vector bundle map over the identity);
\item[5.4)]
Let ${\mathcal L}^s_\omega$, ${\mathcal L}^{u}_\omega$
be the transfer operators
over the rotation $\omega$ induced by the vector
bundle maps $\Lambda^s_0$ and $\Lambda^u_0$ in $\nr^s\times\nt^d$ and
$\nr^u\times\nt^d$.
We assume we have
\[
\begin{split}
& \norm{{\mathcal L}^s_\omega} \leq \lambda \\
& \norm{({{\mathcal L}^u_\omega})^{-1}} \leq \lambda
\end{split}
\]
\item[6)] For all points $(x,\th)$ in the strip
\[
\bar D(K_0,\bar r) =
\{ (x,\th) \in \nr^n\times\nt^d \st
|P_2(\th) (x-K_0(\th))|_\th \leq \bar r\}\ ,
\]
where $\bar r= r(1+\tau)$,
\[
\norm{P_2(\th+\omega) \Dif^2 F(x,\th)
[P_1(\th)\cdot,P_1(\th)\cdot]} \leq b \
\]
(as a bilinear vector bundle map over the rotation $\omega$).
\end{itemize}
Define the constants
\[
\ep= \frac{\rho}{1-(\lambda+\sigma+\tau)} \ ,\
\beta= \frac{b}{1-(\lambda+\sigma+\tau)}\ , \
h= \beta \ep\ .
\]
Assume that
\begin{itemize}
\item[7)] $h< \frac{1}{2}$
\end{itemize}
and define
\[
r_0= \frac{1-\sqrt{1-2h}}{h}\cdot \ep \ .
\]
Finally. Assume that
\begin{itemize}
\item[8)] $r_0\leq r$.
\end{itemize}
Under the hypothesis 1)-8):
\begin{quote}
\begin{itemize}
\item[a)] There exists an invariant torus $K_*:\nt^d\to\nr^n$
to which Newton method converges quadratically from the initial approximation
$K_0$, and
\[
|P_1(\th)^{-1} (K_*(\th)- K_0(\th))|_\th \leq r_0 < 2\ep\ .
\]
\item[b)] The torus $K_*$ is normally hyperbolic.
\end{itemize}
\end{quote}
Let $\hat\lambda= \norm{\Lambda_0}$.
Define the constant
\[
\mu= \frac{\lambda}{1-\lambda^2} \frac{1}{1-\tau} \left( b r_0 + \sigma
+ \hat\lambda \tau \right)\ .
\]
and suppose that, moreover;
\begin{itemize}
\item[9)] $\mu<\frac{1}{4}$.
\end{itemize}
Then:
\begin{quote}
\begin{itemize}
\item[c)] The stable and unstable bundles differ from the initial
approximate invariant bundles in a distance smaller than
$\frac{\mu}{\sqrt{1-4\mu}}$, and can be computed using the contraction
principle.
\end{itemize}
\end{quote}
\end{Theorem}
We note that the proof of Theorem~\ref{validation} we present is
very constructive and leads naturally to algorithms. Basically,
the idea of the proof is to describe an iterative algorithm
for the objects considered and estimate its convergence properties.
Later, we will develop other algorithms that are more
efficient from the numerical point of view but harder to estimate.
\begin{remark}
Notice that to apply Newton-Kantorovich theorem presented in
this paper we need $\F$ to be
$C^2$ when acting on continuous tori. To conclude
that, it suffices that $F$ is $C^2$
with respect to the $x$ variables and that it is continuous
with respect to $\th$. Of course, there are
less demanding Newton-Kantorovich theorems,
but they seem to be less quantitative. It is notable
that we need much less regularity in the $\th$ variables.
\end{remark}
\begin{proof}
a) By using $P_1$ as a change of coordinates on the bundle $\nr^n\times\nt^d$,
we write $\hat K= P_1^{-1} K$
and the functional on $C^0(\nt^d,\nr^n)$ we will consider is
\[
\hat\F (\hat K) (\th) =
P_2(\th) ( F(P_1(\th\!-\!\omega) \hat K(\th\!-\!\omega),\th\!-\!\omega) -
P_1(\th) \hat K (\th)) \ .
\]
The norm we consider in $C^0(\nt^d,\nr^n)$ is that induced by the Finsler:
\[
\norm{\hat \Delta} = \max_{\th\in\nt^d} |\hat \Delta(\th)|_\th\ .
\]
{From} the regularity properties of the composition operator,
discussed in \cite{HLlth} and \cite{LlaveO99} we have
$\hat F$ is $C^2$ when acting on $C^0$ functions. This will allow us
to use the mean value theorem, etc.
First, we estimate the error in the approximate invariant torus, that in
the new coordinates is $\hat K_0= P_1^{-1} K_0$. Hence, from 5.1):
\begin{equation}
\label{bound_error}
\norm{\hat \F(\hat K_0)} = \norm{R}\leq \rho \ .
\end{equation}
Now, we compute $(\Dif \hat \F(K_0))^{-1}$ and estimate its norm. The
differential of $\hat F$ is defined by
\begin{eqnarray*}
(\Dif \hat \F(\hat K_0) \hat\Delta) (\th)
&=&
P_2(\th)
\Dif F(P_1(\th\!-\!\omega) \hat K_0(\th\!-\!\omega),\th\!-\!\omega)
P_1(\th\!-\!\omega) \hat\Delta(\th\!-\!\omega) - \\
& & P_2(\th) P_1(\th) \hat\Delta(\th) \\
&=&
(\Lambda_0(\th\!-\!\omega) + S(\th\!-\!\omega))
\hat\Delta(\th\!-\!\omega)
- \hat \Delta(\th) - T(\th) \hat\Delta(\th)\ .
\end{eqnarray*}
Denoting $\L_\omega$,$\S_\omega$ the transfer operators associated to
$\Lambda_0(\th)$ and $S(\th)$ (over
the rotation by $\omega$), and $\T$ the transfer operator associated to
$T(\th)$ (over the identity), i.e.
%\begin{eqnarray*}
%\L_\omega \hat\Delta (\th)&=&
%\Lambda_0(\th\!-\!\omega) \hat\Delta(\th\!-\!\omega) \ ,\\
%\S_\omega \hat \Delta (\th)&=&
%S(\th\!-\!\omega) \hat\Delta(\th\!-\!\omega) \ , \\
%\T \hat \Delta (\th)&=& T(\th) \hat\Delta(\th)\ ,
%\end{eqnarray*}
\[
\L_\omega \hat\Delta (\th)=
\Lambda_0(\th\!-\!\omega) \hat\Delta(\th\!-\!\omega) \ ,\
\S_\omega \hat \Delta (\th)=
S(\th\!-\!\omega) \hat\Delta(\th\!-\!\omega) \ , \
\]
\[
\T \hat \Delta (\th)= T(\th) \hat\Delta(\th)\ ,
\]
we write
\[
\Dif \hat \F(\hat K_0) = (\L_\omega -\Id) + \S_\omega - \T\ .
\]
Notice that, from 5.3), both $\L_\omega^s-\Id$
and $\L_\omega^u-\Id$ have continuous inverses, with
\[
\norm{(\L_\omega^s -\Id)^{-1}} \leq \frac{1}{1-\lambda}\ , \
\norm{(\L_\omega^u -\Id)^{-1}} \leq \frac{\lambda}{1-\lambda}\ ,
\]
and then $\L_\omega -\Id$ has continuous inverse with
\begin{equation}
\label{transfer_gap}
\norm{(\L_\omega -\Id)^{-1}} \leq \frac{1}{1-\lambda}\ .
\end{equation}
Hence, we write
\[
\Dif \hat \F(\hat K_0)^{-1} =
(\Id + (\L_\omega -\Id)^{-1} (\S_\omega - \T))^{-1} (\L_\omega -\Id)^{-1}
\]
and, from 5.2) and 5.3), we have the estimate
\begin{equation}
\label{bound_inverse}
\norm{ \Dif \hat \F(\hat K_0)^{-1} } \leq
\frac{1}{1-\frac{\sigma+\tau}{(1-\lambda)}}
\frac{1}{1-\lambda} = \frac{1}{1-(\lambda+\sigma+\tau)}\ .
\end{equation}
Notice that we use condition 4): $\lambda+\sigma+\tau<1$.
We estimate now the norm of $\Dif^2\hat\F$ on
functions in
\begin{eqnarray*}
\bar B(\hat K_0,r)
&=& \{ \hat K\in C^0(\nt^d,\nr^n) \st \norm{\hat K-\hat K_0}\leq r \} \ .
\end{eqnarray*}
Notice that, if $\hat K\in \bar B(\hat K_0,r)$, then
\[
|P_2(\th) (P_1(\th) \hat K(\th) -
P_1(\th) \hat K_0(\th))|_\th
\leq (1+\tau) r = \bar r\ ,
\]
where $\bar r$ is defined in 6), and we can apply condition 6) to obtain
\begin{equation*}
\begin{split}
\Dif^2\hat \F(\hat K)& [\hat \Delta_1,\hat \Delta_2] (\th+\omega) \\
&=
P_2(\th+\omega) \Dif^2 F(P_1(\th) \hat K(\th),\th)
[P_1(\th) \hat \Delta_1(\th),
P_1(\th) \hat \Delta_2(\th)] .
\end{split}
\end{equation*}
That is,
\begin{equation}
\label{bound_hessian}
\norm{\Dif^2\hat \F (\hat K)} \leq b\
\end{equation}
for any $\hat K\in \bar B(\hat K_0,r)$.
Once we have the estimates \eqref{bound_error},
\eqref{bound_inverse} and \eqref{bound_hessian},
the only argument we need to prove a) is to verify the conditions of
Newton-Kantorovitch theorem, that is, to compute the constants $\ep$ and
$\beta$ of Theorem~\ref{NK}.
>From \eqref{bound_error} and \eqref{bound_inverse} we obtain the estimate
\begin{eqnarray*}
\norm{\Dif\hat \F(\hat K_0)^{-1} \hat \F(\hat K_0)}
&\leq& \frac{1}{1-(\lambda+\sigma+\tau)}\cdot \rho= \ep \ ,
\end{eqnarray*}
and using
\eqref{bound_hessian} and \eqref{bound_inverse} we obtain
\begin{eqnarray*}
\norm{\Dif\hat \F(\hat K_0)^{-1} \Dif^2\hat \F (\hat K)} &\leq&
\frac{b}{1-(\lambda+\sigma+\tau)} = \beta\ ,
\end{eqnarray*}
for any $\hat K\in \bar B(\hat K_0,r)$.
The rest of the proof of a) is a
straightforward application of Theorem~\ref{NK}. So, the Newton method converges
quadratically to an invariant torus $K_*(\th)= P_1(\th) \hat K_*(\th)$ from the initial
approximation $K_0(\th)= P_1(\th) \hat K_0(\th)$, and
$\norm{\hat K_* - \hat K_0} \leq r_0$, where $r_0$ is defined in 7).
b) We will prove that, for any
$\hat K(\th)\in \bar B(\hat K_0,r_0)$, the resolvent of the transfer
operator associated to $M(\th)= \Dif F(K(\th),\th)$, with
$K(\th) = P_1(\th) \hat K(\th)$, contains the whole
unit circle. (If $\omega$ is irrational, we just need to check that
$1$ is in the resolvent). In particular, this will prove that the
invariant torus $K_*$ is normally hyperbolic.
Hence, for any $z\in\nc$ with $|z|= 1$, and
given $\nabla\in C^0(\nt^d,\nr^n)$, we have to solve the equation
\[
\nabla(\th)=
\Dif F(K(\th\!-\!\omega),\th\!-\!\omega) \Delta(\th\!-\!\omega) -
z \Delta(\th)\ ,
\]
which is equivalent to
\begin{equation}
\label{tobesolved0}
\begin{split}
P_2(\th) P_1(\th) \hat \nabla(\th) = &
P_2(\th) \Dif F(K(\th\!-\!\omega),\th\!-\!\omega)
P_1(\th\!-\!\omega)
\hat\Delta(\th\!-\!\omega) - \\ &
z P_2(\th) P_1(\th) \hat \Delta(\th) \ ,
\end{split}
\end{equation}
once we perform the change of variables giving rise
$\Delta= P_1\hat\Delta$ and $\nabla= P_1\hat\nabla$.
A first step is to compare the transfer operators associated to
$M(\th)= \Dif F(K(\th),\th)$ and
$M_0(\th) = \Dif F(K_0(\th),\th)$. To do so, we consider
the vector bundle map (over the rotation $\omega$) defined by
\begin{equation}
\begin{split}
\label{B_definition}
B(\th) =&
P_2(\th) (\Dif F(K(\th),\th) -
\Dif F(K_0(\th),\th)) P_1(\th\!-\!\omega) \\
=&
\displaystyle
\int_0^1
P_2(\th) \Dif^2 F( K_t(\th), \th )
[ P_1(\th) (\hat K(\th) - \hat K_0(\th)) ,
P_1(\th) \cdot ] \ dt \ .
\end{split}
\end{equation}
where $K_t(\th)= t K(\th) + (1\!-\!t) K_0(\th)$.
Notice that $\norm{\B_\omega} \leq b r_0$.
Then, the equation to be solved \eqref{tobesolved0} is:
\begin{equation}\label{tobesolved1}
\begin{split}
(\Id + T(\th)) \hat \nabla(\th) =
(\Lambda_0( & \th\!-\!\omega) + S(\th\!-\!\omega) + B(\th\!-\!\omega))
\hat \Delta(\th\!-\!\omega) \\
& - z (\Id + T(\th)) \hat \Delta(\th).
\end{split}
\end{equation}
Using transfer operator notation, \eqref{tobesolved1} becomes:
\begin{equation}\label{tobesolved2}
(\Id + \T) \hat \nabla =
((\L_\omega-z\Id) + \S_\omega + \B_\omega - z \T) \hat \Delta \ ,
\end{equation}
The solution of \eqref{tobesolved2} is, using the contraction
mapping principle:
\[
\hat \Delta =
\left( \Id + (\L_\omega-z\Id)^{-1} (\B_\omega + \S_\omega - z \T) \right)^{-1}
(\L_\omega-z\Id)^{-1} (\Id + \T) \hat \nabla \ .
\]
Notice that
\[
\norm{\hat \Delta} \leq \frac{1+\tau}{1-(\lambda+\sigma+\tau+b r_0)}
\norm{\hat \nabla} \ ,
\]
If $\lambda+\sigma+\tau+b r_0< 1$, we can ensure
that the operator is a contraction.
To prove this bound, use hypothesis 7) of Theorem~\ref{validation}
and $r_0 < 2\ep$ to obtain:
\[
\frac{b}{1-(\lambda+\sigma+\tau)} r_0 < 2 \beta \ep = 2h < 1 \ .
\]
c) Once we have computed the invariant torus $K_*$, we are ready to compute
its stable and unstable subbundles from the approximate invariant bundles.
The equation to be solved is
\begin{equation}
\label{inv_bundles}
P(\th+\omega)^{-1} \Dif F(K_*(\th),\th) P(\th) -
\Lambda(\th)
= 0\ .
\end{equation}
The unknowns in \eqref{inv_bundles} are
$P$ and $\Lambda= \blockdiag(\Lambda^s$, $\Lambda^u)$.
Instead of using these unknowns, we will introduce new
variables which take advantage of the fact that we
have an approximate solution.
We introduce the new variables $Q$, $\Delta^s$, $\Delta^u$
by:
\[
P(\th)= P_1(\th) (\Id + Q(\th)) \ ,\
\Lambda^s(\th)= \Lambda_0^s(\th) + \Delta^s(\th)\ ,\
\Lambda^u(\th)= \Lambda_0^u(\th) + \Delta^u(\th)\ ,
\]
with
\begin{equation}
\label{Q_definition}
Q(\th)= \left(\begin{array}{cc}
0 & Q^{su}(\th) \\
Q^{us}(\th) & 0 \end{array} \right) \ .
\end{equation}
{From} the geometrical point of view, this construction is just
writing the stable bundle as a graph of a vector bundle map
$Q^{us}(\th): \nr^{n_s} \to \nr^{n_u}$ and
the unstable bundle as a graph of a vector bundle map
$Q^{su}(\th): \nr^{n_u} \to \nr^{n_s}$.
This is an standard procedure in the study of stability of
invariant splittings. See, for example \cite{HirschP68}.
In Section~\ref{Computation of invariant subbundles}
we will generalize the construction to more general splittings.
We will use the contraction principle to analyze \eqref{inv_bundles}.
First, notice that the initial error (we take is $Q^{us}= 0$ and
$Q^{su}= 0$), is
\begin{eqnarray*}
\tilde S(\th)
&=& P_1(\th+\omega)^{-1} \Dif F(K_*(\th),\th) P_1(\th)
-\Lambda_0 (\th) \\
&=& (\Id + T(\th+\omega))^{-1}
(\Lambda_0(\th) + S(\th) + B(\th)) - \Lambda_0 (\th) \\
&=& ((\Id + T(\th+\omega))^{-1} - \Id) \Lambda_0 (\th) +
(\Id + T(\th+\omega))^{-1} (S(\th) + B(\th))\ ,
\end{eqnarray*}
where $B$ is defined in \eqref{B_definition} with $K= K_*$.
The norm is
\begin{equation}
\label{tildesigma}
\norm{\tilde S} \leq
\frac{\tau}{1-\tau} \hat\lambda +
\frac{1}{1-\tau} (\sigma + b r_0) = \tilde \sigma\ .
\end{equation}
The equation~\eqref{inv_bundles} reads
\begin{eqnarray*}
0 &=& P_1(\th+\omega)^{-1} \Dif F(K_*(\th),\th) P_1(\th)
(\Id + Q(\th)) - \\ & & (\Id + Q(\th+\omega))
(\Lambda_0(\th) + \Delta(\th)) \\
&=& (\tilde S(\th) + \Lambda_0(\th)) (\Id + Q(\th)) -
(\Id + Q(\th+\omega))
(\Lambda_0(\th) + \Delta(\th)) \\
&=& \Lambda_0(\th) Q(\th) - Q(\th+\omega) \Lambda_0(\th) -
\Delta(\th) + \tilde S(\th) (\Id + Q(\th)) -
Q(\th+\omega) \Delta(\th)\ .
\end{eqnarray*}
If we write
\[
\tilde S(\th)= \left(\begin{array}{cc}
\tilde S^{ss}(\th) & \tilde S^{su}(\th) \\
\tilde S^{us}(\th) & \tilde S^{uu}(\th)
\end{array}\right)\ ,
\]
then we see that we can set $\Delta^s, \Delta^u$ by
\begin{eqnarray}
\label{eqds}
\Delta^s(\th) &=&
\tilde S^{su}(\th) Q^{us}(\th) + \tilde S^{ss}(\th) \ , \\
\label{eqdu}
\Delta^u(\th) &=&
\tilde S^{us}(\th) Q^{su}(\th) + \tilde S^{uu}(\th) \ ,
\end{eqnarray}
where $Q^{us}$ and $Q^{su}$ solve the equations
\begin{equation}
\label{eqsu}
\begin{array}{lcl}
\Lambda_0^s(\th) Q^{su}(\th) -
Q^{su}(\th+\omega) \Lambda_0^u(\th)
&=&
- (\tilde S^{ss}(\th) Q^{su}(\th) -
Q^{su}(\th+\omega) \tilde S^{uu}(\th)) + \\
& & Q^{su}(\th+\omega) \tilde S^{us}(\th)
Q^{su}(\th) - \tilde S^{su}(\th)\\
\end{array}
\end{equation}
\begin{equation}
\label{equs}
\begin{array}{lcl}
\Lambda_0^u(\th) Q^{us}(\th) -
Q^{us}(\th+\omega) \Lambda_0^s(\th)
&=&
- (\tilde S^{uu}(\th) Q^{us}(\th) -
Q^{us}(\th+\omega) \tilde S^{ss}(\th)) + \\
& & Q^{us}(\th+\omega) \tilde S^{su}(\th) Q^{us}(\th) -
\tilde S^{us}(\th) \ .\\
\end{array}
\end{equation}
Hence, we have
just to solve \eqref{eqsu} and \eqref{equs}.
We will just present the details \eqref{eqsu}, because the analysis
of \eqref{equs} follows along the same lines.
(Indeed, the results for \eqref{equs} can be obtained
by applying the results for \eqref{eqsu} to the
inverse mapping. )
By defining the linear operator $\L^{su}_\omega$
acting on continuous vector bundle maps (over the identity)
$Q^{su}(\th): \nr^{n_u} \to \nr^{n_s}$ by
\[
\L^{su}_\omega Q^{su} (\th) =
\Lambda_0^s(\th\!-\!\omega) Q^{su}(\th\!-\!\omega)
(\Lambda_0^u(\th\!-\!\omega))^{-1}
\]
the equation \eqref{eqsu} reads
\begin{equation}
\label{eqsufp}
\begin{array}{lcl}
Q^{su}
&=& (\L^{su}_\omega - \Id)^{-1} \comp
(-\tilde\S^{ss}_\omega Q^{su} + Q^{us}
(\tilde\S^{su}_\omega Q^{us} + \tilde\S^{ss}_\omega) -
\tilde\S^{us}_\omega)
\comp (\L_\omega^u)^{-1} \ ,
\end{array}
\end{equation}
where
\[
\norm{(\L^{su}_\omega - \Id)^{-1}} \leq \frac{1}{1-\lambda^2}\ .
\]
The right hand side is considered as an operator $Q^{su} \to \bar Q^{su}$.
An observation is that from the estimate \eqref{tildesigma} we obtain the
estimates
$\norm{\tilde S^{ss}} \leq \tilde\sigma$,
$\norm{\tilde S^{su}} \leq \tilde\sigma$,
$\norm{\tilde S^{us}} \leq \tilde\sigma$,
$\norm{\tilde S^{uu}} \leq \tilde\sigma$,
because the projections onto the approximate invariant subbundles have
(adapted) norms bounded by $1$.
We will find a closed ball of radius $\alpha$ in which \eqref{eqsufp}
satisfies the contraction principle.
First, if $\norm{Q^{su}}\leq \alpha$ then
\[
\norm{\bar Q^{su}} \leq
\frac{\lambda}{1-\lambda^2} \tilde\sigma (1+\alpha)^2 = \mu (1+\alpha)^2\ ,
\]
where $\mu$ is defined in 9) of Theorem~\ref{validation}.
So, the ball of radius $\alpha$ is mapped into itself if
\begin{equation}
\label{invariance_ball}
\mu (1+\alpha)^2 \leq \alpha\ .
\end{equation}
A Lipschitz constant of $\bar Q^{su}$ is
\begin{equation}
\label{Lip}
L = 2 \frac{\lambda}{1-\lambda^2} \tilde\sigma (1+\alpha) =
2\mu(1+\alpha)\ .
\end{equation}
The invariance condition is satisfied, for instance, if
\[
\alpha=
\frac{1}{2\mu} - 1 -
\frac{1}{2} \sqrt{\frac{1}{\mu} \left(\frac{1}{\mu} - 4\right)}\ ,
\]
where we assume that $\mu<\frac{1}{4}$ by the hypothesis 9).
In such a case, the Lipschitz constant is
\[
L= 2\mu (1+\alpha) = 1- \sqrt{1-4\mu} < 1\ ,
\]
and $\bar Q^{su}$ is a contraction in the ball of radius $\alpha$.
Finally, notice that for the solution $Q^{su}$ of \eqref{eqsu}:
\[
\norm{Q^{su}} \leq \frac{1}{1-L} \frac{\lambda}{1-\lambda^2} \tilde\sigma
= \frac{\mu}{\sqrt{1-4\mu}}\ .
\]
With this estimates we complete the proof of Theorem~\ref{validation}.
\end{proof}
\subsection{Projection method}
\label{Projection_method}
In this section, we describe an algorithm
which is more efficient than the direct application of Newton method
described before in Section~\ref{sec:Newton}
by using the geometry of the problem.
The basic idea of the method is that we
observe that, by taking projections over
the stable and unstable directions, the
infinitesimal equations can be solved just
by iterating (either in the future or in the past).
This is the mechanism of the proof of Theorem~\ref{validation}
and has the flavor of the techniques of the general theory of normally
hyperbolic invariant manifolds.
When we update the torus, the linearization changes,
hence, immediately after we update the invariant torus,
we update the invariant splitting using the perturbative
arguments developed in the previous section.
This avoids storing and inverting the discretized matrix in
\eqref{largelinear}. This gives a very important speed up
with respect to the algorithm based in a straight implementation
of the discretization if the number of coefficients is high and the
gap in the spectrum around the unit circle is not too small.
\note{A}{He anyadido esto a la frase, aunque queda un poco vaga.}
We now give some more details about the algorithm.
Suppose that we have an approximate normally hyperbolic invariant torus
given by $K$, and we have also produced an approximate adapted frame $P$,
in such a way that
\begin{equation}
\label{almost_inv_torus_pro}
P(\th)^{-1} (F(K(\th\!-\!\omega),\th\!-\!\omega)
- K(\th)) = R(\th) \ ,
\end{equation}
and
\begin{equation}
\label{almost_inv_frame}
P(\th + \omega)^{-1} \Dif F(K(\th),\th) P(\th) -
\Lambda(\th) = S(\th) \ ,
\end{equation}
where $R$ and $S$ are small.
In order to improve the estimates $K$, $P$, $\Lambda^s$, $\Lambda^u$,
we look for $\tilde K= K + P H$, $\tilde P= P + PQ$ ,
$\tilde \Lambda^s = \Lambda^s + \Delta^s$,
$\tilde \Lambda^u = \Lambda^u + \Delta^u$,
respectively.
Equation \eqref{ieit} reads
\begin{eqnarray*}
0 &=&
P(\th+\omega)^{-1}
(F(K(\th)+P(\th) H(\th),\th) - K(\th+\omega)) - H(\th+\omega) \\
&=& R(\th+\omega) +
P(\th+\omega)^{-1} \Dif F(K(\th),\th) P(\th) H(\th) + O(H^2)
- H(\th+\omega) \\
&=& R(\th+\omega) + (\Lambda(\th) + S(\th)) H(\th) - H(\th+\omega)
+ O(H^2)
\end{eqnarray*}
and gives rise to
\begin{equation}
\label{linear}
\Lambda(\th) H(\th)-H(\th+\omega) = -R(\th+\omega) \ ,
\end{equation}
where we are neglecting all the quadratically small terms.
If we split $H$ and $R$ in the stable and unstable components,
\eqref{linear} splits in the fixed point equations
\begin{eqnarray}
\label{linears}
H^s(\th) &=& \Lambda^s(\th-\omega) H^s(\th-\omega) + R^s(\th) , \\
\label{linearu}
H^u(\th) &=& (\Lambda^u(\th))^{-1} (H^u(\th+\omega) -
R^u(\th+\omega))\ .
\end{eqnarray}
Since $ \Lambda^s$ is contracting and $\Lambda^u$ is expanding,
both equations are solvable by using simple iteration.
Once we have improved $K$, we improve $P$ and $\Lambda$.
Since $S$, $H$, $Q$ and $\Delta$
are supposed to be small, we eliminate the quadratically small terms
in the following computations:
\begin{eqnarray*}
0 &=&
\tilde P(\th+\omega)^{-1}\cdot\Dif F(\tilde K(\th)),\th)\cdot
\tilde P(\th) - \tilde \Lambda(\th) \\
&=&
(\Id-Q(\th+\omega)) P(\th+\omega)^{-1}
\cdot\Dif F(\tilde K(\th)),\th)\cdot
P(\th) (\Id+Q(\th)) - \Lambda(\th) -\Delta(\th)\\
&=&
P(\th+\omega)^{-1} \Dif F(\tilde K(\th),\th) P(\th) -
Q(\th+\omega) \Lambda(\th) + \Lambda(\th) Q(\th) - \Lambda(\th) -\Delta(\th)
\end{eqnarray*}
Hence, we reach to the equation
\begin{equation}
\label{sol_frame}
\begin{array}{lcl}
\Lambda(\th) Q(\th) - Q(\th+\omega) \Lambda(\th) - \Delta(\th) &=&
- \tilde S(\th)
\end{array}
\end{equation}
where
\begin{equation}
\begin{array}{lcl}
\tilde S(\th) &=&
P(\th+\omega)^{-1} \Dif F(\tilde K(\th),\th) P(\th) - \Lambda(\th) \\
% \mbox{ or }
%& & \\
%\tilde S(\th) &=&
% S(\th) + P(\th+\omega)^{-1}
% \Dif^2 F(K(\th),\th) [P(\th) H(\th), P(\th)]\ .
\end{array}
\end{equation}
Using the block matrix notation
\[
A= \left(\begin{array}{cc} A^{ss} & A^{su} \\
A^{us} & A^{uu} \end{array} \right) \ ,
\]
equation \eqref{sol_frame} reads
\begin{eqnarray}
\label{framess}
\Lambda^s(\th) Q^{ss}(\th) - Q^{ss}(\th+\omega)\Lambda^s(\th)
&=& -\tilde S^{ss}(\th) + \Delta^s(\th) \ ,\\
\label{framesu}
\Lambda^s(\th) Q^{su}(\th) - Q^{su}(\th+\omega)\Lambda^u(\th)
&=& -\tilde S^{su}(\th) \ ,\\
\label{frameus}
\Lambda^u(\th) Q^{us}(\th) - Q^{us}(\th+\omega)\Lambda^s(\th)
&=& -\tilde S^{us}(\th) \ ,\\
\label{frameuu}
\Lambda^u(\th) Q^{uu}(\th) - Q^{uu}(\th+\omega)\Lambda^u(\th)
&=& -\tilde S^{uu}(\th) + \Delta^u(\th) \ .
\end{eqnarray}
In order to solve \eqref{framess} and
\eqref{frameuu}, we choose $Q^{ss}(\th)= 0$, $Q^{uu}(\th)= 0$ and
$\Delta_s(\th) = \tilde S_{ss}(\th)$, $\Delta_u(\th) = \tilde S_{uu}(\th)$.
The geometrical meaning of such an election of $Q$ is that we
modify the stable and unstable frames in the complementary directions.
This is the trick we have used in the proof of Theorem~\ref{validation},
but other choices could be possible (see Section~\ref{Reducibility_method}).
We compute $Q^{us}(\th)$ and $Q^{su}(\th)$ using the contraction principle,
by writing the equations as
\begin{eqnarray}
\label{linearsu}
Q^{su}(\th) &=&
(\Lambda^s(\th-\omega) Q^{su}(\th-\omega) +
\tilde S^{su}(\th-\omega)) (\Lambda^u(\th-\omega))^{-1} , \\
\label{linearus}
Q^{us}(\th) &=&
(\Lambda^u(\th))^{-1} (Q^{us}(\th+\omega)\Lambda^s(\th) -
\tilde S^{us}(\th)) \ .
\end{eqnarray}
We do minor modifications in the previous arguments to compute the inverse of the change of variables $P(\th)$.
We have just defined $P_2(\th)= P(\th)^{-1}$, and added
the equation $P_2(\th)P(\th) - \Id = 0$.
Hence, we arrive to:
\begin{Algorithm}[Projection Method]
Let $K(\th)$ be an approximate normally hyperbolic invariant torus,
$P_1(\th)$ the matrix of an approximate adapted frame and $P_2(\th)$
an approximate inverse of $P_1(\th)$, and $\Lambda(\th)=
\blockdiag(\Lambda^s(\th),\Lambda^u(\th))$
representing the dynamics on the approximate stable and unstable
invariant bundles. That is:
\begin{eqnarray*}
P_2(\th) (F(K(\th-\omega),\th-\omega) - K(\th))
&=& R(\th) \ ,\\
P_2(\th + \omega) \Dif F(K(\th),\th) P_1(\th) -
\Lambda(\th) &=& S(\th) \ ,\\
P_2(\th) P_1(\th) - \Id &=& T(\th) \ ,
\end{eqnarray*}
where $R,S,T$ are small.
Then, one step on the projection method is to compute
$\tilde K= K + P_1 H$, $\tilde P_1= P_1 + P_1 Q$ ,
$\tilde P_2= P_2 - (Q+T) P_2$,
$\tilde\Lambda^s = \Lambda^s + \Delta^s$,
$\tilde\Lambda^u = \Lambda^u + \Delta^u$,
following the next recipe:
\begin{itemize}
\item Compute the components of $H$ from
\begin{eqnarray*}
H^s(\th) &=& \Lambda^s(\th-\omega) H^s(\th-\omega) + R^s(\th) , \\
H^u(\th) &=& (\Lambda^u(\th))^{-1} (H^u(\th+\omega) -
R^u(\th+\omega))\ .
\end{eqnarray*}
using simple iteration.
\item Compute
\[
\tilde S(\th) =
P_2(\th+\omega) \Dif F(\tilde K(\th),\th) P_1(\th) -\Lambda(\th) -
T(\th+\omega) \Lambda(\th) \ ,
\]
\item Compute the components of the matrix
\begin{eqnarray*}
Q(\th)&=& \left(\begin{array}{cc}
0 & Q^{su}(\th) \\ Q^{us}(\th)
& 0 \end{array}\right)
\end{eqnarray*}
from the equations
\begin{eqnarray*}
Q^{su}(\th) &=&
(\Lambda^s(\th-\omega) Q^{su}(\th-\omega) +
\tilde S^{su}(\th-\omega)) (\Lambda^u(\th-\omega))^{-1} , \\
Q^{us}(\th) &=&
(\Lambda^u(\th))^{-1} (Q^{us}(\th+\omega)\Lambda^s(\th) -
\tilde S^{us}(\th)) \ ,
\end{eqnarray*}
that are solved again by simple iteration.
\item Set
$\Delta^s(\th) = \tilde S^{ss}(\th)$ and
$\Delta^u(\th) = \tilde S^{uu}(\th)$.
\end{itemize}
\end{Algorithm}
\begin{remark}
This method avoids the
storage and inversion of a large matrix from the discretization in terms of
Fourier series of the linear equation \eqref{largelinear}. The speed
of the procedure depends strongly on the rate of contraction and expansion
of $\Lambda^s$ and $\Lambda^u$, respectively.
\end{remark}
\begin{remark}
The computation of $P_1(\th)^{-1}$ can be done at each step, using Newton
method, and so eliminating $T(\th)$. We can do this for the computation of
$\Lambda^u(\th)^{-1}$.
\end{remark}
\begin{remark}
There are cases in which the inverse of the matrix $P_1 =P$
is easily computable.
Such as the case when the system is conformal symplectic and so
$P$ is also symplectic. In Section~\ref{geometry} we develop an algorithm
adapted to the conformal symplectic case. Another interesting case in when
the system is reversible.
\end{remark}
\subsection{Reducibility method}
\label{Reducibility_method}
The previous method assumed that there was a frame in the bundle
and we just computed matrices.
The idea of the reducibility method is that one can also
optimize the choice of the frame in such a way that the
linearization becomes diagonal and constant.
As we will see, this reduction to constant coefficients is
not always possible in the quasi-periodic case -- in the periodic
case, this is possible because of Floquet theory --.
However, under some extra non-resonance conditions, which we will
detail, it is possible to make such reduction. The extra conditions
are non-trivial. As we will see, they fail in several cases.
On the other hand, they are verified in many cases and, when they
do hold, the method is extremely fast and accurate.
\begin{Definition}\label{def:reducible}
We say that the invariant $K$ torus is reducible
if the cocycle $M(\th)= \Dif F(K(\th),\th)$ over the rotation $\omega$
is reducible, that is there exists a
constant matrix $\Lambda$ and a change of variables $P(\th)$ such
that
\begin{equation}
\label{reduc}
P(\th+\omega)^{-1} M(\th) P(\th) - \Lambda = 0\ .
\end{equation}
The vector bundle map over the identity given by $P(\th)$ is known
as Floquet transformation.
\end{Definition}
One important case when reducibility holds is
when the invariant subbundles are one dimensional and
$\omega$ is Diophantine \cite{JohnsonS81}. In such a case we can obtain
$\Lambda= \diag (\lambda_1,\dots,\lambda_n)$, where the entries are real.
For more general cases, including complex entries
and dependence on parameters see
\cite{Eliasson88a,Poschel89,JorbaS92,JorbaS96,JorbaLZ99}.
The key idea of the {\em reducibility method}
is to consider both invariance equation \eqref{ieit}
and reducibility equation \eqref{reduc}, in such a way that at each
step of Newton method, the linear equation to be solved is, in some sense,
diagonalized. This is a technique widely used in KAM theory (see, for instance,
\cite{Llave01, JorbaLZ99} and the references therein).
Suppose that we have an approximate solution of
\eqref{reduc} that is,
we have $K(\th)$, $\Lambda$ and $P(\th)$ such that
\begin{eqnarray}
\label{almost_inv_torus_red}
P(\th)^{-1}(F(K(\th-\omega),\th-\omega) - K(\th)) &=& R(\th) \ , \\
\label{almost_inv_frame_red}
P(\th+\omega)^{-1} \Dif F(K(\th),\th) P(\th) - \Lambda &=& S(\th) \ ,
\end{eqnarray}
where $R(\th), S(\th)$ are supposed to be small.
Let
$\tilde K(\th)= K(\th)+P(\th) H(\th)$,
$\tilde P(\th)= P(\th) (\Id +Q(\th))$ and
$\tilde \Lambda= \Lambda + \Delta$ be the corresponding improvements, where
$\Delta= \diag(\delta_1,\dots,\delta_n)$.
Repeating the arguments of Section~\ref{Projection_method} we obtain
the equation
\begin{eqnarray}
\label{Hred}
\Lambda H(\th) - H(\th+\omega) &=& - R(\th+\omega)\ ,
\end{eqnarray}
whose components are, for $i= 1,\dots, n$:
\begin{eqnarray}
\label{Hredi}
\lambda_i H^i(\th) - H^i(\th+\omega) &=&
- R^i(\th+\omega)\ ,
\end{eqnarray}
Writing the Fourier modes of \eqref{Hredi} we obtain
\begin{eqnarray}
\label{Hk}
H_k^i &=& \frac{1}{1-\lambda_i e^{2\pi\ii k \omega}} R_k^i\ ,
\end{eqnarray}
for each $k\in\nz^d$. Small divisors do not appear, provided the eigenvalues
$\lambda_i$ are not in the unit circle. If there are some eigenvalues in the unit
circle, we need some Diophantine conditions involving those eigenvalues and
$\omega$.
The equation for $Q$ and $\Lambda$ is obtained again following
the methods explained in
Section~\ref{Projection_method}, and it is
\begin{eqnarray}
\label{Qred}
\Lambda Q(\th) - Q(\th+\omega) \Lambda - \Delta & = & -\tilde S(\th)
\end{eqnarray}
where $\tilde S(\th) =
- P(\th+\omega)^{-1} \Dif F(\tilde K(\th),\th) P(\th) - \Lambda$.
Writing \eqref{Qred} componentwise we obtain, for any $i,j= 1,\dots,n$:
\begin{eqnarray}
\lambda_i Q^{ij}(\th) - Q^{ij}(\th+\omega)\lambda_j & = &
-\tilde S^{ij}(\th)
\ \ \mbox{, if $i\neq j$},
\label{no_diagonal_elements}
\\
\lambda_i Q^{ii}(\th) - Q^{ii}(\th+\omega) \lambda_i - \delta_i
& = &
- \tilde S^{ii}(\th)
\ \ \mbox{, if $i= j$}\ .
\label{diagonal_elements}
\end{eqnarray}
If the eigenvalues
are all of them different and the non-resonance condition
$\lambda_i/\lambda_j\neq e^{2\pi\ii k \omega}$ for all $k\in\nz^d$, $i\neq j$
is satisfied, then \eqref{no_diagonal_elements} is formally
solvable by
\begin{eqnarray}
Q^{ij}_k &=& \frac{-1}{ \lambda_i - \lambda_j e^{2\pi\ii k \omega} }
\tilde S^{ij}_k\ .
\end{eqnarray}
Notice that small divisors do not appear if the eigenvalues have
different modulus. If this is not the case, suppose that
$\lambda_i/\lambda_j= e^{2\pi\ii \alpha}$, $\alpha\neq 0$
(for instance, if they are complex conjugated), then
one needs some Diophantine conditions involving $\alpha$ and $\omega$ in
order to avoid resonances.
About \eqref{diagonal_elements}, we fix
\[
\delta_i= \tilde S^{ii}_0 \ , \ Q^{ii}_0= 0
\]
and then for $k\neq 0$
\[
Q^{ii}_k = \frac{-1}{\lambda_i (1 - e^{2\pi\ii k \omega})} \tilde S^{ii}_k\ .
\]
Small divisors appear again and we need Diophantine properties on $\omega$.
Notice that we have some
freedom to choose the averages of the solutions of $Q^{ii}(\th)$.
We could apply extra normalizing conditions to fix, for instance,
the averages of the diagonal elements $P^{ii}(\th)$.
\begin{Algorithm}[Reducibility Method]
\label{algorithm_reducibility}
Let $K(\th)$ be an approximate invariant torus.
Let $P_1(\th)$ be
the matrix of an approximate adapted frame and $P_2(\th)$ be
an approximate inverse of $P_1(\th)$, and $\Lambda=
\diag(\lambda_1,\dots,\lambda_n)$ be the constant matrix
representing the dynamics on the approximate complex 1-dimensional
invariant bundles (generated by the columns of $P_1(\th)$). That is:
\begin{eqnarray*}
P_2(\th) (F(K(\th\!-\!\omega),\th\!-\!\omega) -
K(\th)) &=& R(\th) \ ,\\
P_2(\th + \omega) \Dif F(K(\th),\th) P_1(\th) -
\Lambda &=& S(\th) \ ,\\
P_2(\th) P_1(\th) - \Id &=& T(\th) \ ,
\end{eqnarray*}
where $R,S,T$ are small.
Then, one step on the reducibility method is to compute
$\tilde K= K + P_1 H$, $\tilde P_1= P_1 + P_1 Q$ ,
$\tilde P_2= P_2 - (T + Q) P_2$,
$\tilde\Lambda = \Lambda + \Delta$,
following the next recipe:
\begin{itemize}
\item Compute the Fourier modes of $H= (H^1,\dots,H^n)^{\top}$ by
\begin{equation} \label{Hcompute}
H^i_k = \frac{1}{1-e^{-2\pi\ii k \omega}\lambda_i} R^i_k\ .
\end{equation}
\item Compute
\begin{equation}\label{Scompute}
\tilde S(\th) =
P_2(\th+\omega) \Dif F(\tilde K(\th),\th) P_1(\th) -\Lambda -
T(\th+\omega) \Lambda \ .
\end{equation}
\item For $i,j= 1,\dots,n$, $i\neq j$, and for all $k\in\nz^d$ set
\begin{equation}\label{Qcompute}
Q^{ij}_k =
\frac{-1}{ (\lambda_i - e^{2\pi\ii k \omega}\lambda_j)}\tilde S^{ij}_k\ .
\end{equation}
\item For $i= 1,\dots, n$, set
\[
\delta_i= \tilde S^{ii}_0\ , \ Q^{ii}_0= 0\ ,
\]
and for all $k\in\nz^d\setminus\{0\}$
\[
Q^{ii}_k =
\frac{-1}{\lambda_i (1 - e^{2\pi\ii k \omega})} \tilde S^{ii}_k\ .
\]
\end{itemize}
\end{Algorithm}
\begin{remark}
Note that the reducibility algorithm runs into problems if
the denominators in \eqref{Scompute} become zero.
This can happen when the matrix $\Lambda$ has two eigenvalues of
the same modulus and whose phases differ by a rational angle.
Of course, if the eigenvalues are not of modulus $1$, the torus
is normally hyperbolic and the other algorithms (Newton method,
Projection method) described before work.
Hence, the reducibility method has more restrictions on the frequencies
that the projection or the Newton method.
Of course, the reducibility method
yields more information since reducibility
is a geometrically important property.
\end{remark}
\begin{remark}\label{validationreducibility}
If the torus is normally hyperbolic and the spectrum of the associated
transfer operator is decomposed in $n$ circles, then there exists an
splitting of
$\nr^n\times\nt^d$ in $n$ invariant subbundles of rank 1. We can trivialize
them
by a covering of a sufficiently high number of leaves of the torus $\nt^d$.
Notice that Theorem~\ref{validation} does not depend on the
torus being reducible. It only depends on the approximate tori, and its hyperbolicity properties.
Hence, in practice, we use the reducibility method
to compute the approximate torus and the invariant subbundles of
the linearization and, then, validate the computation
using Theorem~\ref{validation}.
We note that the application of
Theorem~\ref{validation} to the results of a reducibility calculation
are particularly easy. Issues such as the contractivity or
the adapted metric are very easy.
If $\Lambda$ is diagonalized by
$\Lambda=
\diag(\lambda_1^s,\dots,\lambda_{n_s}^s,\lambda_1^u, \dots, \lambda_{n_u}^u)$,
with
\[
|\lambda_1^s| \leq\dots\leq|\lambda_{n_s}^s| < 1 <
|\lambda_{n_u}^u|\leq\dots \leq |\lambda_1^u|
\]
then we can take the Finsler norm as the sup-norm on the adapted coordinates
(constant on the fibers).
Notice that, if $\Lambda^s = \diag(\lambda_1^s,\dots,\lambda_{n_s}^s)$ and
$\Lambda^u= \diag(\lambda_1^u, \dots, \lambda_{n_u}^u)$, then
\[
\norm{\Lambda^s} = |\lambda_{n_s}^s|\ , \
\norm{(\Lambda^u)^{-1}} = \frac{1}{|\lambda_{n_u}^u|}
\]
and $\lambda= \max(|\lambda_{n_s}^s|,\frac{1}{|\lambda_{n_u}^u|})$.
\end{remark}
\begin{remark}\label{reducibility_elliptic}
For a reducible torus, we have that the torus is
normally hyperbolic if and only if the
eigenvalues of $\Lambda$ are outside of the unit circle.
Nevertheless, the numerical
method does not distinguish on whether the eigenvalues are
inside or outside of the unit circle. However, we note that
in case that there are eigenvalues of $\Lambda$ in the unit circle,
there is the possibility of small divisors in the computation of
$H$, described in \eqref{Hcompute},
but, in practice, very often, the numerical calculation proceeds
without interruption because the smallness of the
divisors is compensated by the smallness of the numerators.
\note{A}{``denominators'' por ``numerators''}
We note that the presence of elliptic eigenvalues is to be expected
in the case of symplectic systems, but there is some justification for the
use of the reducibility method based on KAM theory.
The justification is not for a single torus, but rather for
calculations in a family depending on a extra parameter
$\mu$. If the eigenvalues depend in such a way that the
eigenvalues and their differences depend on $\mu$, it is
possible to exclude a set of parameters of
small measure so that the
small divisors in \eqref{Hcompute} and \eqref{Qcompute}
are under control and hence, for these values of
the parameter are invariant tori which are reducible.
The stability theory of elliptic tori are
covered in several references \cite{Eliasson88a,Poschel89,JorbaS96}.
A formulation of the results for families which
takes as a starting point the existence of an
approximate solution of the invariance and
reducibility equation and, which is, therefore,
very close to the point of view of Theorem~\ref{validation},
we refer to \cite{JorbaLZ99}.
We note that, in KAM theory, ensuring the reducibility
requires to have control on the small divisors that appear in
\eqref{Qcompute}, besides the small divisors that appear
in \eqref{Hcompute}. The small divisors corresponding to
\eqref{Hcompute} are called the \emph{First Melnikov condition}
and those corresponding to \eqref{Qcompute}, the
\emph{Second Melnikov condition}.
One question that received
attention in recent times -- it is crucial in application of
KAM theory in PDE's -- was the proof of persistence of
normally hyperbolic tori using only the first Melnikov condition.
There are many solutions in the literature. Of particular
interest for us is
the solution based on quasi-reducibility introduced in
\cite{Eliasson99} and which we will study in Section~\ref{almostreducible}.
\end{remark}
\begin{remark}
\label{notclosetoidentity}
In the presentation of the algorithm, we have only shown here
how to improve reducibility using near identity transformations.
In \cite{MoserP84} it is shown how in the case of resonances
one can improve reducibility using transformations which are
not near the identity.
Notice that repeating indefinitely these transformations that are not
near the identity is a non convergent procedure.
On the other hand, it looks plausible that one could use them
just a finite (and small) number of times in a continuation
algorithm to avoid parameter values where reducibility stops
due to resonances. See \cite{Eliasson97,Eliasson99} for
related results. In particular, \cite{Eliasson99} shows that one
can use this procedure to establish persistence of
lower dimensional tori of Hamiltonian systems
with only the first Melnikov condition.
We have implemented these transformations in \cite{HLlex}.
Our numerical experiments suggest that there
are cases in which the resonances of elliptic eigenvalues
are the place of birth of hyperbolic eigenvalues, and then reducibility
is much better is those cases.
Relatedly, the torus
gains regularity when the strong perturbations make it become hyperbolic!
\note{A}{``elliptic'' por ``hyperbolic''}
(These sort of phenomena have been analyzed in \cite{BroerHJVW03}
for quasiperiodically forced Hamiltonian systems with one degree of
freedom.)
\note{A}{He a\~nadido esta frase.}
In fact, the type of resonance has to do
with the topology of the invariant bundles associated with those hyperbolic
eigenvalues. As a result, the index
of the embeddings giving the bundles
is very related with the specific transformation not near the identity
that one uses to gain reducibility.
We can use this information to cross resonances \cite{HLlex}.
\end{remark}
\begin{remark}
\label{examplestandard}
In \cite{HLlex}, one can find numerical
results obtained studying a
quasi-periodic perturbation of the
standard map. We briefly summarize the observations of
\cite{HLlex} here since they are the motivation for
some of the algorithms that we will describe now.
In particular for the high precision calculation of
invariant bundles and the multiplier on them.
In \cite{HLlex}, one can find a numerical continuation
of an elliptic periodic orbit of period
3.
It persists
as an elliptic periodic torus for a (presumably)
Cantor set of parameters. In the gaps, the
periodic torus is normally hyperbolic (in one gap the stable and
unstable invariant manifolds correspond to a bundle of
index 2). At some value of the perturbation,
the torus is normally hyperbolic, but it
seems that eventually it is destroyed.
The destruction of the invariant torus seems to be
due to the fact that stable and unstable subbundles
become closer, even if the hyperbolicity multipliers
$\lambda$ remain constant.
\note{A}{Usamos multiplicadores y no exponentes. Es lo mismo, pero
para no liar con la notacion...}
{From} the point of view of functional analysis, the
spectral projections associated to the two circles of the spectrum
(one inside and the other outside of the unit circle) become
unbounded and at the critical value,
the spectrum is an annulus containing the unit circle. The
periodic curve does not exist beyond this point.
Notice that the set of parameters for which the torus is normally hyperbolic
(a saddle) is open. This is closely
the well known phenomenon of semicontinuity of the
spectrum \cite{Kato76}.
That is, if there is a gap in the spectrum, the torus is saddle-type,
and the gap persists for a small enough perturbation.
On the other hand, it could well happen (and the experiments
in \cite{HLlex} suggest that this is the case) that the spectrum
of the linearization
contains a gap when the parameters lie in an open interval
$(0, \ep_0)$ but that nevertheless, at the boundaries of
the interval, the gap is closed. The size of the gap
can be uniform for $(0, \ep_0)$ but the norm of the spectral
projections grows.
\end{remark}
\begin{remark}
Notice that in practice, we have just a rational
approximation of $\omega$, but Fourier expansions are truncated, and
``zero'' divisors do not appear if we are far from resonances.
The numerical methods produce ``almost'' invariant objects.
For results on effective reducibility, see \cite{JorbaRV97}.
\end{remark}
\begin{remark}
When it is applicable, the reducibility method is the
fastest of the methods we have discussed.
It is specially appropriate for computation of high dimensional tori,
because it not only avoids the large matrix problem,
but also diagonalizes the linear equations at each step
of Newton method.
Of course, when applying the reducibility method, we should be prepared to
study the cases where reducibility fails but the torus persists.
Note that we expect problems when the eigenvalues have the same
modulus. This -- heuristically -- means that, when continuing
families, we should expect problems in sets of codimension
one. When studying families (this appears naturally in
homotopy methods, see Section~\ref{sec:continuation}),
it is advantageous to use the reducibility
method, but pay attention to the cases where it fails.
In the cases that reducibility fails, but that normal
hyperbolicity persists, we have to change to another of the
methods. We will present a practical implementation in an
example in \cite{HLlex}.
\end{remark}
\begin{remark}
If we do not diagonalize the matrix $\Lambda$ during the Newton
process, then \eqref{Hred} reads in Fourier modes
\begin{eqnarray}
(\Lambda - e^{2\pi\ii k \omega}) H_k &=& -e^{2\pi\ii k \omega} R_k
\end{eqnarray}
that is solved by
\begin{eqnarray}
H_k &=& (\Id - e^{-2\pi\ii k \omega} \Lambda)^{-1} R_k\ ,
\end{eqnarray}
provided that the eigenvalues of $\Lambda$ are different from
$e^{-2\pi\ii k \omega}$.
Moreover, \eqref{Qred} reads
\begin{eqnarray}
\label{Qred0}
\Lambda Q_0 - Q_0 \Lambda - \Delta &=& - \tilde S_0\ , \\
\label{Qredk}
\Lambda Q_k - e^{2\pi\ii k \omega} Q_k \Lambda &=& -\tilde S_k\
\mbox{ for all $k\neq 0$}\ .
\end{eqnarray}
A solution of \eqref{Qred0} is
\[
Q_0 = 0 \ , \ \Delta = \tilde S_0\ .
\]
The equations \eqref{Qredk} are linear in the coefficients of the
matrices $Q_k$.
A compact way of writing \eqref{Qredk} is using Kronecker
product.
Recall that the Kronecker product of two matrices $A\in M_{m,n}$
and $B\in M_{p,q}$ is the $mp,np$-matrix
\begin{eqnarray*}
A\otimes B &= &
\left(\begin{array}{cccc}
a_{1,1}B & a_{1,2}B & \dots & a_{1,n} B \\
a_{2,1}B & a_{2,2}B & \dots & a_{2,n} B \\
\vdots & \vdots & & \vdots \\
a_{m,1}B & a_{m,2}B & \dots & a_{m,n} B
\end{array}\right) \ .
\end{eqnarray*}
We also use the notation
\begin{eqnarray*}
\mbox{\rm vec}(A) &=& (a_{1,1},a_{2,1},\dots, a_{m,1}, \dots ,
a_{1,n},a_{2,n},\dots, a_{m,n})^\top\ ,
\end{eqnarray*}
that is, we stacking the columns of $A$.
Then, \eqref{Qredk} is written as a linear system in
$\mbox{\rm vec}(Q_k)$:
\begin{equation}
(\Id_n\otimes \Lambda - e^{2\pi\ii k \omega} \Lambda^\top \otimes \Id_n )
\mbox{\rm vec}(Q_k) = -\mbox{\rm vec}(\tilde S_k)
\end{equation}
This equation has an unique solution if
$\lambda_i \neq e^{2\pi\ii k \omega} \lambda_j$ for all
$\lambda_i,\lambda_j$ eigenvalues of $\Lambda$.
This method is useful since we do not need to deal with complex eigenvalues
and all the procedures can be set in the realm of real Fourier series.
Once we have computed the torus, we can diagonalize the matrix $\Lambda$ to
know the eigenvalues (real or complex).
\end{remark}
\subsection{Almost reducibility}
\label{almostreducible}
Note that in the normally hyperbolic case, all the algorithms
we have designed are contractions. Hence, there is
room to maneuver and change the algorithms without altering their
contraction properties.
In the reducibility case, the basic observation is that, if
the only thing that we need is a computational step,
we do not need that the torus is exactly reducible.
If we detect that improving the reducibility
becomes computationally hard, we just use the approximate
solution of the reducibility equation to obtain
an improved solution of the Newton step.
Even if we do not achieve complete reducibility
this is still more efficient than the projection method.
We give some more algorithm details.
Let $K_0(\th)$ be the approximate invariant torus, whose monodromy matrix
$M(\th)= \Dif F(K_0(\th),\th)$ is close to be reducible.
That is:
\begin{eqnarray}
P(\th)^{-1}(F(K_0(\th-\omega),\th-\omega) - K_0(\th)) &=& R(\th) \ , \\
P(\th+\omega)^{-1} M(\th) P(\th) - \Lambda &=& S(\th) \ ,
\end{eqnarray}
where $P(\th)$ and the constant matrix $\Lambda$ are given,
and $R(\th), S(\th)$ are assumed to be small.
We will look for the invariant torus as
$K(\th)= K_0(\th) + P(\th) H(\th)$. We obtain that $H$ has to
satisfy the equation:
\begin{equation*}
\begin{split}
\Lambda H(\th) - H(\th+\omega) =&
-S(\th) H(\th) \\
&
- P(\th+\omega)^{-1}
\left[ F(K_0(\th) + P(\th) H(\th)) - K_0(\th+\omega)\right.\\
&
\phantom{P(\th+\omega)^{-1} [ } -
\left. \Dif F(K_0(\th),\th) P(\th) H(\th)\right]\ .
\end{split}
\end{equation*}
We can compute $H$ using simple iteration: given $H$, we compute the
right hand side $\hat R(\th+\omega)$,
and compute the new $H$ solving
$\Lambda H(\th) - H(\th+\omega) = \hat R(\th+\omega)$.
\subsection{Continuation of invariant tori}
\label{sec:continuation}
All the methods that we have discussed so far are methods
that improve a sufficiently approximate solution. So far,
we have not discussed how to obtain a solution approximate
enough so that the improving algorithms start working.
A natural technique in this situation is to use
a continuation method that follows a family connecting
the problem we are interested in to another problem that
can be solved explicitly. In many situations, the parameters
connecting to a trivial problem have a physical interpretation
(e.g. a coupling constant) but it is also quite common that
the parameters are just introduced for numerical analysis purposes.
These methods are often called homotopy methods.
For a reference about general properties of homotopy methods,
we refer to
\cite{Homotopyreference}. A general implementation of
homotopy methods is {\tt Hompack}.
In our case, however, we can again take advantage to
the geometric properties of the method and obtain
homotopy methods that are more efficient than the
general ones.
Let $F_\ep$ be a family of quasi-periodic maps,
all of them with the same frequency vector $\omega$.
The continuation curve is the set of
\[
(\ep, K_\ep)\ ,
\]
where $K_\ep$ is an invariant torus for $F_\ep$.
Suppose that we have computed an invariant torus
$K_{\ep_0}= K^0(\th)$, and we want to estimate the invariant torus
$K_\ep(\th)$ for $F_\ep$, with $\ep-\ep_0$ small.
We could just take $K^0$ as an initial
guess for the Newton method for $F_\ep$.
One can, however, obtain better initial guesses.
One general procedure, commonly used in homotopy methods is to
extrapolate from previously computed points in the curve.
In our case, however, we can compute rather efficiently the
derivatives with respect to $\ep$ of $K_\ep$. This can lead
to more efficient methods.
Notice that in \cite{HLlth} it is shown that when the tori are
normally hyperbolic, the dependence on the parameters is as smooth as
the family of maps. This serves a justification of the
homotopy method when the tori remain normally hyperbolic.
Since
\[
F_\ep(K_\ep(\th),\th) = K_\ep(\th+\omega),
\]
by differentiating with respect to $\ep$ and evaluating at $\ep_0$,
we obtain that the first
variation of the family $K_\ep$ in $\ep= \ep_0$,
\[
K^1(\th)= \frac{\partial K_\ep}{\partial \ep}_{\mid \ep= \ep_0}(\th)\ ,
\]
satisfies the linear equation
\begin{equation}
\label{continuation}
\Dif F_0 (K^0(\th),\th) K^1(\th) - K^1(\th+\omega)= -F^1(K^0(\th),\th)\ ,
\end{equation}
where
\[
F^1(x,\th) = \frac{\partial F}{\partial \ep}_{\mid \ep= \ep_0}
(x,\th) \ .
\]
Hence, we can compute the derivative with respect to
parameters by solving the linear equation \eqref{continuation}, which is
identical to the equations that we have
studied in
Section~\ref{Projection_method} or
Section~\ref{Reducibility_method}.
Notice also that an adapted frame for $K_{\ep_0}$ is an approximate
adapted frame for $K_\ep$. This allows us to use the
computational effort invested in computing the guess into
the refinement that we need to perform for the new value of
$\ep$.
It is also possible to compute other derivatives
to obtain better guesses in the continuation method.
The basic idea is to take more derivatives with respect to
$\ep$ and match the terms. Each of the steps involves solving
an equation of the same type that we have solved already
for the first derivative. We will, however, not pursue this matter here.
\section{Computation of invariant subbundles}
\label{Computation of invariant subbundles}
In this section, we describe algorithms to compute invariant
subbundles of the linearization once we have computed an
invariant torus $K$.
To do so, we study the spectral properties of the transfer operator
associated to $M(\th)= \Dif F(K(\th),\th)$, that are translated
geometrically in the existence of invariant subbundles for the linear
skew product
\begin{equation}
\label{lsp}
\begin{array}{l}
\bar v = M(\th) v\ , \\
\bar\th= \th + \omega\ .
\end{array}
\end{equation}
We will develop algorithms for the study of the properties of
the cocycle \eqref{lsp}. The goal is to develop algorithms to compute
invariant subbundles that correspond to different spectral annuli.
For more information on properties of the spectrum
of transfer operators and
their relation to
geometric properties of the cocycles, we refer to \cite{trilogy1,trilogy2}
and references there.
Notice that both Projection method and Reducibility method give information
about the dynamics of the cocycle \eqref{lsp}. We will isolate here the study
of the cocycle from the computation of the invariant torus.
\begin{remark}
Note that, since we have results that tell us that the approximate
solutions of the invariant circle are close to true solutions, we
can easily estimate the difference between the true cocycle and
the cocycle \eqref{lsp} evaluated on the approximate solution.
The approximate solutions we will obtain for the invariance
equations with the numerical approximation of the cocycle are
also approximate solutions for the invariance equation of
the true invariant torus.
Since we also have results that tell us that approximate solutions of
the invariance equations are close to true solutions, we have that
the approximates solutions of the invariance equations
for an approximately invariant torus correspond to
true solutions of the invariant torus.
\end{remark}
\subsection{Computation of spectral subbundles}
\label{spectral_subbundles}
Our first task is to construct frames for the
different invariant subbundles.
This is an extension of the algorithm that we used in
Section~\ref{Projection_method}. In that case, we
used only two bundles, one stable and one unstable.
In this case, we will try to study bundles separated by different
expansion rates.
We will present an iterative algorithm that, given an
approximate invariant frame produces a more approximate one.
In practice, the following algorithm is used as part of a
continuation method.
\begin{Algorithm}
Let $M(\th)\in M_{n}(\nr)$
be a vector bundle map over the rotation $\omega$.
Let $P(\th)\in M_{n}(\nr)$ be
an approximate adapted frame, such that
\[
P(\th+\omega)^{-1} M(\th) P(\th) - \Lambda(\th) = S(\th)
\]
is small enough, where $\Lambda$ is the block-diagonal matrix
\[
\Lambda(\th) = \blockdiag(\Lambda_1(\th), \dots \Lambda_k(\th))\ ,
\]
with $\Lambda_i(\th)\in M_{n_i}(\nr)$ ($n_1+\dots + n_k= n$). For a
suitable Finsler metric $\norm{\cdot}$, assume that
\begin{equation}\label{gapcondition}
\norm{\Lambda_i} \norm{\Lambda_{j}^{-1}} < 1\ ,
\end{equation}
for $i,j= 1,\dots,k$, $ij$ as
\begin{equation} \label{Qequationigj}
\begin{split}
Q^{i,j}(\th) =&
\Lambda_i(\th)^{-1} \left[ Q^{i,j}(\th+\omega) \Lambda_j(\th)
-\left(S^{i,j}(\th) + \sum_{l= 1}^k S^{i,l}(\th) Q^{l,j}(\th)\right) \right.
\\
& \left. + Q^{i,j}(\th+\omega)
\left( S^{j,j}(\th) + \sum_{l= 1}^k S^{j,l}(\th) Q^{l,j}(\th) \right)
\right] \ .
\end{split}
\end{equation}
Because \eqref{gapcondition} we have that, for
small $S, Q$ the RHS of \eqref{Qequationigj},\eqref{Qequationisj}
are contractions, when considered as functionals depending on
$Q$. Hence, by iterating the RHS of \eqref{Qequationisj},\eqref{Qequationigj},
we solve \eqref{Qequation}.
\begin{remark}
The theory of invariant manifolds in \cite{HLlth}
and the algorithms for invariant manifolds that we
will discuss later in Section~\ref{computation_stable_manifold}
do not require that the invariant
subbundles out of which we construct the manifold are
spectral subbundles. Indeed they do not require that they
have invariant complements (an example of this situation
is the eigenvector in a non-trivial Jordan block).
Invariant bundles without invariant complements
appear in practice, for example in
identical systems with master-slave coupling. Nevertheless, this
situations are unstable under general perturbations. Hence,
the algorithms to compute them cannot be general purpose
algorithms such as the ones described here and have
to rely on the specific properties of the system
that we are considering.
\end{remark}
\subsection{Computation of Floquet transformations}
\label{Floquet}
The most precise information about normal behavior that
one can obtain is when the cocycle is reducible
that is, we can find a frame in which the
cocycle \eqref{lsp} becomes a matrix independent of $\th$
(see Definition~\ref{def:reducible}). We will assume along this section that
the frequency vector $\omega$ is Diophantine.
In this section, we discuss different algorithms to
compute numerically the reducing change of variables, also
known as Floquet transformation.
Some of them are not perturbative.
A first observation is that the reducibility equation \eqref{reduc},
\[
M(\th) P(\th) = P(\th+\omega) \Lambda\ ,
\] is not uniquely solvable for
$P(\th)$ and $\Lambda$ (besides linear and constant
changes of variables).
If the cocycle is reducible to a matrix
\[
\Lambda= \diag(\lambda_1,\dots,\lambda_n)
\]
through a change of variables $P(\th)$,
then it is also reducible to a matrix
\[
\Lambda_k=
\diag(e^{-2\pi\ii k_1\cdot\omega} \lambda_1,\dots,
e^{-2\pi\ii k_n\cdot\omega} \lambda_k)\ ,
\] where
$k= (k_1,\dots,k_n) \in\nz^d$, through a change of variables
$P_k(\th)= P(\th) E_k(\th)$, where
\[
E_k(\th)= \diag(e^{2\pi\ii k_1\cdot\th} ,\dots,
e^{2\pi\ii k_n\cdot\th})\ .
\]
That is to say, $M(\th) P_k(\th)= P_k(\th+\omega) \Lambda_k$.
Notice that the transformations $E_k$ for $k\neq 0$ are not near the
identity \cite{MoserP84}.
This observation plays an important role in the theory of reducibility.
We have used them in \cite{HLlex} to cross resonances
(see remarks~\ref{notclosetoidentity},\ref{examplestandard}).
Henceforth, in the following when we refer to the eigenvalues of the cocycle,
we are referring to the eigenvalues of one of the reduced matrices.
The election of one particular reduced matrix can follows from numerical
computations, or from topological arguments, as we will see in the examples
in \cite{HLlex}.
There are several algorithms to compute
$P(\th)$ and $\Lambda$ which we have considered.
\subsubsection{Discretization method}
In \cite{Jorba01} the following
heuristic method of solving \eqref{reduc} is proposed:
discretize the infinite dimensional problem in a standard matrix eigenvalue
problem, cutting off the tails of the Fourier series,
and use a package of linear algebra to solve it.
If one computes all the eigenvalues of the discretized problem,
one has to choose the suitable ones. The method produces spurious eigenvalues,
that have to be discarded (if fact, the discretized eigenvalue problem could
even produce zero eigenvalues, that are not present in the spectral problem).
Choosing the real eigenvalues is an easier task than choosing the pairs
of complex conjugated eigenvalues.
This method is slow, because we have to compute all the eigenvalues of a
large matrix, and then we have to select the appropriate ones. See
\cite{Jorba01}
for details.
\subsubsection {Rational approximation of the frequencies}
In this method we start by choosing a sequence of rational vectors
$p_i/n_i \in \nq^d$ ($p_i\in \nz^d$ and $n_i \in \nn$) converging to the
irrational vector $\omega$. If $d=1$ it is natural to use
continuous fraction algorithm.
If $d > 1$ there are several algorithms to generate rational approximations,
one of the best known ones is the Jacobi-Perron algorithm. (See
\cite{LagariasH86, HardcastleK02} for a survey of rational
approximation algorithms in higher dimensional spaces.)
For each $i$ we have,
\[
P(\th+n_i\omega)^{-1}
M(\th+(n_i-1)\omega) \dots M(\th+\omega) M(\th) P(\th)=
\Lambda^{n_i}
\]
Since $\th+n_i\omega$ is close to an integer vector and $P(\th)$ is
1-periodic in all its variables, then $P(\th+n_i\omega) \simeq P(\th)$,
and the eigenvalues of
\begin{equation}
\label{Mn}
M(\th,n_i)= M(\th+(n_i-1)\omega) \dots M(\th+\omega) M(\th)
\end{equation}
will be close to the eigenvalues of $\Lambda^{n_i}$.
Notice that the procedure is independent on $\th$ (since $\omega$ is
ergodic),
and we can take $\th= 0$.
Once we have estimated the eigenvalues of $\Lambda^{n_i}$, we have to do
the $n_i$-root of them to estimate the eigenvalues of $\Lambda$.
Since there are $n_i$ complex $n_i$-roots, we have also
to decide what to choose.
This method improve the estimates of the eigenvalues at each step.
Since the eigenvalues of $\Lambda^{n_i}$ can grow or decrease
very fast, we have to make scalings when iterating. Notice that this idea
is very related with some methods of computation of Lyapunov multipliers.
\note{A}{Ya he puesto la nota de Simo.}
\begin{remark}
>From several estimates of an eigenvalue corresponding to several rational
approximations, we can produce a new estimate by interpolation. A similar
trick is proposed in \cite{Simo98} to compute invariant tori.
\end{remark}
\begin{remark}
One of the most efficient methods to compute non-dominant
Lyapunov multipliers is the reduction to
upper triangular matrices.
See \cite{BenettinGGS78,EckmannR85,NusseY98,DieciV02}.
We recursively define
\[
Q_{n+1}(\th) R_{n+1}(\th) = M(\th + n \omega) Q_n(\th)
\]
where the $Q$ are unitary and the $R$ are upper triangular.
Then, one can read off the Lyapunov exponents
by just accumulating the diagonal elements of the $R$.
The only steps of the method are multiplications by
unitary transformations and the QR algorithm, which are
rather robust to round off error.
The algorithm above is quite close to the proof
in \cite{Oseledec68} of the multiplicative ergodic theorem
and it is useful in great generality, including non-uniformly
hyperbolic systems with a singular invariant measure.
The cases considered here are notably simpler than the full
generality of the theorem. We are assuming uniform hyperbolicity
and the rotations are uniquely ergodic. In the case that the bundles
are one-dimensional, the existence of negative (resp. positive) Lyapunov
exponents is equivalent to uniform contractivity (resp expansivity).
See Remark~\ref{uniform1d}.
A bundle version of the above method has been developed in
\cite{DieciV02}.
\end{remark}
\begin{remark}
Recall that Lyapunov multipliers are in the spectrum
\cite{SackerS78b,trilogy1},
and in the reducible case the spectrum is a set of circles, so they can be
computed easily.
In fact, the degree of precision of the computation of the Lyapunov
multipliers
is an indicator of the reducibility of the cocycle. (See \cite{HLlex} for
numerical examples).
\end{remark}
The computation of the eigenvalues of the matrices \eqref{Mn} is equivalent
to the computation of the eigenvalues of the $\hat M$ matrices defined by:
\begin{equation}
\label{hat_matrix}
\hat M_{n_i} =
\left(\begin{array}{ccccc}
0 & & \dots & & M_{n_i-1} \\
M_0 & 0 & \dots & & 0 \\
& M_1 & \dots & & 0 \\
& & \ddots& & \vdots \\
& & & M_{n_i-2} & 0
\end{array}\right)\
\end{equation}
where $M_k= M(\th+k\omega)$ \cite{trilogy2}. Although these matrices
are very large, we can take advantage of the symmetry of their
eigenvalues in order to perform the computations:
if $z$ is an eigenvalue of $M_{n_i}$, then $z e^{2\pi\ii k/n_i}$
is also an eigenvalue, where $k= 0,\dots, n_i-1$.
\subsubsection{Power method applied to rank-one bundles}\label{powermethod}
One of the simplest methods to compute a
dominant eigenvalue (and the corresponding eigenvalue)
is just to iterate the application of the operator
to a vector.
\note{A}{``matrix'' por ``vector''.}
Applying the same idea,
we can iterate the transfer on a section. Since there is a circle
of dominant eigenvalues, applying the algorithm to real sections we will
obtain the real dominant eigenvalue (if it exists). If the computation
suggests that there are two dominant eigenvalues (for instance,
they are complex conjugated),
then we have to iterate a pair of sections, an so on. As is standard in the
power
method, we find it convenient to
perform scalings at each step.
Assume we are in the simplest case that there is one dominating eigenvalue
which is simple.
Then, the power method produces a section $v$ and a real function $m(\th)$
such
that
\[
M(\th) v(\th)= m(\th) v(\th+\omega) \ .
\]
The function $m(\th)$ represent the dynamics on the invariant subbundle of
rank 1 generated by $v(\th)$.
Since the rotation $\omega$ is Diophantine, we can reduce the 1-dimensional
cocycle associated to $m$ and $\omega$, that is, we look for a positive
function
$p(\th)$ and a constant $\lambda$ such that
\begin{equation}
\label{1reducibility}
m(\th) p(\th) = \lambda p(\th+\omega)\ .
\end{equation}
To do so, if $m(\th) > 0$, we look for a positive $\lambda$. Then, by
taking logarithms in both sides in \eqref{1reducibility}, we obtain the
equation
\begin{equation}
\label{m+}
\log m(\th) + \log p(\th) = \log\lambda + \log p(\th+\omega) \ ,
\end{equation}
that is a typical KAM equation for $l(\th)= \log p(\th)$. We fix, then,
\begin{equation}
\label{lambda+}
\lambda= \exp \left(\int_{\th\in\nt^d} \log m(\th)\ d\th\right)\ ,
\end{equation}
and solve the equation for $l(\th)$. Finally, $p(\th)= \exp l(\th)$.
\note{A}{A\~nadida la ultima frase}
If $m(\th) < 0$, we look for a negative $\lambda$, and the equation to
be solved is
\begin{equation}
\label{m-}
\log \left(-m(\th)\right) + \log p(\th) = \log\left(-\lambda\right) +
\log p(\th+\omega) \ .
\end{equation}
In this case,
\begin{equation}
\label{lambda-}
\lambda= -\exp\left(\int_{\th\in\nt^d} \log\left(-m(\th)\right)\ d\th\right)\ .
\end{equation}
\begin{remark}
It is very easy to see that (we refer to \cite{trilogy2}
for details) for $\omega$ ergodic, the spectrum of the
transfer operator associated to the cocycle $m$ is a circle of radius
\[
\rho= \exp \left(\int_{\th\in\nt^d} \log \left|m(\th)\right|\ d\th\right)\ .
\]
\end{remark}
\begin{remark} \label{uniform1d}
It is worth remarking that in the one-dimensional case,
uniform contractivity is equivalent to the -- in principle
much weaker -- notion of negative Lyapunov exponent.
The following argument comes from \cite{FalcoliniL92}.
We note that, for one dimensional bundles,
negative Lyapunov exponents means
that
\[
\log \lambda = \lim_n \frac{1}{n} \sum_{i= 0}^{n-1} \log |m(\theta+i\omega)|
< 0 \ .
\]
\note{A}{Faltaba la formula. He puesto $\log \lambda$ porque usamos la
notacion de ``multiplicador'' mas que la de ``exponente''. La $\lambda$
suele significar tambien contraccion o expansion uniforme.}
\note{A}{He puesto valores absolutos en las ``ms''}
In the one-dimensional case, however, the RHS of the equation above is
a Birkhoff sum. If $\log m$ is continuous, using that an irrational
rotation is uniquely ergodic we obtain that the Birkhoff
sums converge uniformly in $\th$. Therefore,
there is an $N$ such that
\[
\log |m(\th + (N -1) \omega) \cdots
m(\th )| < N ( \log \lambda + \delta) < 0.
\]
This implies that the contraction rate of a power is uniform.
\end{remark}
The power method is easily implementable as a numerical algorithm, but
there is a a case that we should be aware of. In some cases, we
have found that
$m(\th)$ vanishes in some points. This is perfectly
consistent if $v(\th)$ also vanishes at the zeros of $m$.
Hence, $v$ is not a good global frame for a bundle of rank one.
This is unavoidable if
and we take as starting point of the iteration an embedding that
has a different index than the index of the invariant bundle.
(This complication happens in numerical implementations
near resonances, when the
invariant bundle changes index). If we apply the iterations of
a bundle of the {\sl ``wrong''} topology, we find empirically
that the iteration develops zeros.
See the examples in \cite{HLlex}.
\note{A}{He reescrito el remark. A ver que tal.}
\begin{remark}
Related to the importance of the topology of the objects,
notice that if a cocycle,
considered as a map $M:\nt^d \rightarrow GL(n, \nr)$
and a frequency $\omega$, is non homotopic to the identity then
it is non reducible. Moreover, in such a case, the corresponding transfer
operator changes the homotopic invariants of continuous sections,
as the index.
\end{remark}
\begin{remark}
\label{proyectivization}
\def\np{{\mathbb P}}
The power method to compute the dominant eigenvalue of a $n$-dimensional
matrix $A$ is equivalent to find the attracting fixed point of the dynamical
system induced in the $(n-1)$-dimensional projective space by using iteration.
Recall
that the $(n-1)$-dimensional projective space $\np^{n-1}$ is the set of
straight lines of $\nr^n$ passing through the origin. Recall also that
$\np^{n-1} = \ns^{n-1}/{a}$, where $a:\ns^{n-1} \to \ns^{n-1}$ is the
antipodal map $a(u)= -u$ (so we identify the points $u$ and $-u$ of the unit
sphere).
With this idea in mind, we can also projectivize the cocycle \eqref{lsp}
and look for the attractors and repellors (or in general, invariant sets) in
the projective bundle.
First, we can work in the unit spheric
bundle, so for $u\in \nr^n$ with $\norm{u}= 1$ and $\th\in \nt^d$, we define
\begin{equation}
\label{ulsp}
\begin{array}{l}
\bar u = M(\th) u/\norm{M(\th) u}\ , \\
\bar\th= \th + \omega\ .
\end{array}
\end{equation}
Then, we identify $u$ with $-u$.
Some numerical experiments appear
in \cite{HLlth}, where $n= 2$ and the $1$-dimensional projective space is
identified with $[0,\pi]/\{0= \pi\}$ (it is parameterized by an angle $\alpha$
from $0$ to $\pi$).
\end{remark}
\begin{remark}
In its most straightforward implementation, the power method
computes just the dominant bundle.
It is possible to use the very well
known trick of {\sl deflation} to compute the rest of the eigendirections.
In our setting,
deflation corresponds to the following general construction:
if we know an invariant
subbundle $E_1$, we construct a complementary subbundle $E_2$, and then
the cocycle expressed using adapted frames to $E= E_1 \oplus E_2$ has
triangular
form:
\[
M(\th) =
\left(\begin{array}{cc}
M^1(\th) & N(\th) \\
0 & M^2(\th)
\end{array}\right)\ .
\]
The spectrum of the transfer $\M_\omega$ is the union of the spectra of
$\M_\omega^1$ and $\M_\omega^2$. See \cite{trilogy1}.
\end{remark}
\begin{remark}
There are also other well known numerical methods
to compute intermediate eigenvalues. General references
are \cite{Demmel97,Trefethen97}.
We mention the algorithms of Wiedlandt.
If $\lambda$ is an approximate eigenvalue
of $\M_\omega$, then $\M_\omega-\lambda\Id$ has an eigenvalue close to zero,
whose inverse will be the dominant eigenvalue of $(\M_\omega-\lambda\Id)^{-1}$.
So, at each step we have to solve $(\M_\omega-\lambda\Id)^{-1} v = w$,
where $v$ is the previous estimate of the eigensection, and $w$ is
the new one. This is equivalent to solving $(\M_\omega-\lambda\Id) w= v$,
that can be done by discretization in Fourier series.
Since the matrix is close to singular,
it could be useful to employ singular value
decomposition.
\note{A}{La ultima frase la he cambiado}
Another standard method -- which, however we have not implemented --
is Arnoldi's method.
\end{remark}
In the numerical examples in \cite{HLlex}, all the cocycles are two
dimensional,
so we can use the power method to compute the two eigenvalues
(if they are different). Even if this is the simplest case,
our numerical experiments suggest new mathematical phenomena.
\subsubsection{Reducibility method}
In the core of the reducibility method to
compute invariant tori, we use Newton method to reduce the linear skew product
given by the monodromy matrix. We can use this ``black box'' to reduce
the linear skew product, avoiding large matrices. This corresponds to apply
Algorithm~\ref{algorithm_reducibility} to the linear quasi-periodic
map given by
\[
F(x,\th)= M(\th) x\
\]
and $\omega$. Obviously, the torus $K(\th)= 0$ is invariant, but the algorithm
looks for also the Floquet transformations.
Of course, Newton method needs good first approximations to converge.
This can be obtained by continuation with respect to parameters.
We can also apply first the
other methods to obtain good starting guesses.
\section{Geometric properties and computation}
\label{geometry}
In case the system has geometrical properties (e.g. preserving
a symplectic matrix), they are recovered by
the invariant objects, and sometimes it is possible to adapt the algorithms
so that they take advantage of these geometric properties.
For instance, when $n= 2m$ and $F$ is symplectic,
and the torus is normally hyperbolic,
both stable and unstable subbundles are Lagrangian
\cite{trilogy1} (for a normally hyperbolic
torus in a general map, they are isotropic).
Therefore, when one of them is known, it is
straightforward to compute the other.
We give here some ideas of algorithms that take advantage of
geometric structures.
For the purposes of this paper, a (trivial) symplectic bundle on the torus is
a vector bundle $E= \nr^{2m}\times\nt^d$ equipped with a linear symplectic
structure of each fiber $E_\th= \nr^{2m}\times\{\th\}$. That is to say,
we have a non-degenerate skew-symmetric matrix $J(\th)$,
depending smoothly on $\th$, defining a skew-symmetric product
$\Omega_\th$ on each fiber:
\[
\Omega_\th (v_\th,w_\th) = v_\th^\top J(\th) w_\th\ ,
\]
for all $v_\th,w_\th\in E_\th$. That is:
\begin{equation}
\label{symplectic_form}
J(\th)^\top= -J(\th)\ .
\end{equation}
Among the subbundles of $E$, the Lagrangian
subbundles are particularly important. A Lagrangian subbundle $L$ has rank $m$ and satisfies
\begin{equation}
\label{lagrangian}
L(\th)^\top J(\th) L(\th) = 0\ ,
\end{equation}
where $L(\th)$ is a (local) frame of $L$.
It is well known that there exist a complex vector bundle structure
on the symplectic bundle \cite{Weinstein77}, that is to say, there exist
a symplectic anti-involution on each fiber,
$I(\th)$, depending smoothly on $\th$:
\begin{equation}
\label{complex_operator}
I(\th)^\top J(\th) I(\th)= J(\th)\ , \
I(\th)^2= -\Id\ .
\end{equation}
As a consequence, if the symplectic bundle $E$ admits a Lagrangian subbundle
$L$, there exist Lagrangian subbundles which are complementary to $L$:
the subbundle $IL$ is Lagrangian and satisfies $E= L \oplus IL$.
The vectors in $IL$ are symplectic conjugated to those in $L$. A suitable
frame for $IL$ is
\[
L(\th)^*= I(\th) L(\th) b(\th)\ ,
\]
where
\begin{equation}
\label{bmatrix}
b(\th)= (L(\th)^\top J(\th) I(\th) L(\th))^{-1}
\end{equation}
is a $m\times m$ symmetric matrix. This election of the frame makes the matrix
\[
P(\th) = (L(\th) , L(\th)^*)
\]
to satisfy
\begin{equation}
\label{to_standard}
P(\th)^\top J(\th) P(\th) = J_0\ ,
\end{equation}
where
\[
J_0= \left(\begin{array}{cc} 0 & \Id_m \\ -\Id_m & 0 \end{array}\right)\
\]
is the standard symplectic matrix in $\nr^{2m}$.
That is to say, $P(\th)$ translates the symplectic form
$J(\th)$ to the standard $J_0$.
A conformal symplectic vector bundle map over a rotation $\omega$ is
a linear skew-product
\begin{equation}
\label{lssp}
\begin{array}{lcl}
\bar x= M(\th) x \ , \\
\bar \th= \th + \omega\ ,
\end{array}
\end{equation}
such that
\[
M(\th)^\top J(\th+\omega) M(\th) = \sigma(\th) J(\th)\ ,
\]
where $\sigma: \nt^d\to\nr$ is a non vanishing function.
Notice that for any $P(\th)$ satisfying \eqref{to_standard}
\[
(P(\th+\omega)^{-1} M(\th) P(\th))^\top J_0
(P(\th+\omega)^{-1} M(\th) P(\th)) = \sigma(\th) J_0\ ,
\]
that is, $P(\th+\omega)^{-1} M(\th) P(\th)$ is conformal symplectic
with respect to the standard form $J_0$.
Let $L$ be a Lagrangian subbundle, invariant for \eqref{lssp}. Let
$\lambda(\th)$ the vector bundle map on $\nr^m\times\nt^d$ over the
rotation $\omega$ representing the dynamics on the subbundle:
\begin{equation}
\label{lagrangian_dynamics}
M(\th) L(\th)= L(\th+\omega) \lambda(\th)\ .
\end{equation}
We want to construct a complementary invariant Lagrangian subbundle.
First, notice that the frame
\[
P(\th) = (L(\th) , L(\th)^*) \ ,
\]
where $L(\th)^*= L(\th) a(\th) +
I(\th) L(\th) b(\th)$,
being $a(\th)$ any $m\times m$ matrix,
satisfies \eqref{to_standard}. So then,
$P(\th+\omega)^{-1} M(\th) P(\th)$ is conformal symplectic
with respect to the standard form $J_0$. Since
$M(\th) L(\th) = L(\th+\omega) \lambda(\th)$, then
\[
P(\th+\omega)^{-1} M(\th) P(\th) =
\left(\begin{array}{cc}
\lambda(\th) & c(\th) \\
0 & \sigma(\th) \lambda(\th)^{-\top}
\end{array}\right)\ ,
\]
where
\begin{eqnarray*}
c(\th)
&=& - {L^*(\th+\omega)}^\top J(\th+\omega) M(\th)
L^*(\th) \\
&=&
\lambda(\th) a(\th) -
\sigma(\th) a(\th+\omega)^\top \lambda(\th)^{-\top} - d(\th)\ ,
\\ \\
d(\th)&=&
b(\th+\omega)^\top L(\th+\omega)^\top I(\th+\omega)^\top
J_{\th+\omega} M(\th) I(\th) L(\th) b(\th)\ .
\end{eqnarray*}
Hence, we produce an invariant Lagrangian splitting $E= L \oplus L^*$
by
solving the equation
\begin{equation}
\label{lagrangian_splitting}
\lambda(\th) a(\th) -
\sigma(\th) a(\th+\omega)^\top \lambda(\th)^{-\top} = d(\th)\ .
\end{equation}
In the projection method and the reducibility method, we can take advantage
of the previous construction, because from an approximate stable invariant
subbundle we can compute an approximate unstable invariant subbundle.
Similar constructions play also an important role in KAM theory
\cite{Llave01,GonzalezJLlV00}. Other properties, such as reversibility,
are useful to simplify the computations \cite{trilogy1}.
\note{A}{Ultima frase nueva}
\section{Computation of the whiskers of an invariant torus}
\label{Computation of the whiskers}
In this section we develop algorithms to compute
invariant manifolds which are tangent to
an invariant bundle around the linearization
around an invariant torus.
Algorithms for the computation of invariant tori and
for invariant bundles of the linearization
are described in Sections \ref{Computation of invariant tori},
\ref{Computation of invariant subbundles}.
The general theory works for manifolds associated to
spectral subbundles that satisfy non-resonance conditions \cite{HLlth}.
In this paper, we will develop only algorithms in two
special cases: stable/unstable manifolds and
non-resonant manifolds associated to rank-1 bundles.
The algorithms for these two special cases present significant
simplifications with respect to the general case and they seem
to be important in practice. These are the algorithms we have implemented
so far. We will present some possible modifications to
cover the general case in remarks.
\subsection{Computation of stable and unstable and other
invariant manifolds}
\label{computation_stable_manifold}
We will assume that we have a
very accurate numerical solution $K$ of
the invariance equation \eqref{ieit}
and a frame $P(\th)$ in which the linear normal dynamics
given by the cocycle \eqref{lsp}
is given by $\Lambda(\th)$. That is:
\[
P(\th+\omega)^{-1} \Dif F(K(\th),\th) P(\th) =
\Lambda(\th)\ .
\]
We will denote $M(\th)= \Dif F(K(\th),\th)$.
We will assume that $\Lambda(\th)$ leaves invariant
a subbundle, which we will call $P(\th)E^s(\th)$. We will denote
by $P(\th)E^u(\th)$ a complementary subbundle.
Hence, $\Lambda(\th)$ expressed in these
coordinates has the form
\begin{equation}\label{Lambdaform}
\Lambda(\th) =
\left(\begin{array}{cc}
\Lambda^s(\th) & N(\th) \\
0 & \Lambda^u(\th)
\end{array}\right)\ .
\end{equation}
The notation $E^s, E^u$ is meant to suggest that
very important special cases is when the splitting is
between stable and unstable bundles. Nevertheless,
this is not necessary. The splitting need not
correspond even to spectral subbundles. For
example, if $N$ is not zero, the bundle $E^u$ is
not invariant. The phenomenon of invariant bundles
without invariant complements arises when we have identical
oscillators in which one of them forces the other but there is
no reverse effect. When the splitting indeed corresponds
to spectral subbundles there are systematic ways
of computing the $P$ and the $\Lambda$ described in
Section~\ref{spectral_subbundles}.
We recall that, by changing units in the stable direction, it is
possible to adjust that $|| N ||$ is sufficiently small.
(See \cite{HLlth}.)
The goal of this section is to compute invariant manifolds
corresponding to invariant subspaces which
satisfy appropriate non-resonance conditions.
This includes as particular cases, the computation of
(strong) stable and unstable manifolds, which we will treat in more detail.
Following the parameterization method, we
will compute an invariant manifold of $K$
by computing $W: \nr^{n_s}\times \nt^d \to \nr^n$
and $\Lambda^s: \nr^{n_s}\times \nt^d \to \nr^{n_s}$
in such a way that:
\begin{equation}
\label{stable_manifold}
F(W(\eta,\th),\th) =
W(\Lambda^s(\eta,\th),\th+\omega) \ .
\end{equation}
We will discretize $W$ as a Fourier-Taylor expansion
\begin{equation}
\label{Wexp}
W(\eta,\th)= \sum_{k= 0}^\infty W_k(\eta,\th) \ ,
\end{equation}
where $W_k(\eta,\th)$ are homogeneous polynomials of order $k$
in the $\eta$ variables. So, we can write
$W_k(\eta,\th) = W_k(\th) \eta^{\otimes k}$, where $W_k(\th)$
is a $k$-multilinear symmetric form from $\nr^{n_s}$ to $\nr^n$.
$\Lambda^s(\eta,\th)$ represents the dynamics on
the invariant manifold, and it will be given by a polynomial of degree
$L$ in the $\eta$ variables:
\begin{equation}
\label{Lexp}
\Lambda^s(\eta,\th)=
\sum_{k= 1}^L \Lambda_k^s(\eta,\th) \ .
\end{equation}
The degree $L$ is a positive integer such that
\begin{equation}
\label{condition}
\norm{\Lambda^s_1}_{C^0}^{L+ 1} \norm{\Lambda^{-1}}_{C^0} < 1 \, .
\end{equation}
(One can choose $L$, but in practical computations, taking
$L$ as small as possible reduces the computations needed. On the
other hand, near resonances, choosing $L$ larger may
give more numerical stability.)
The computation is based on substituting the expansions \eqref{Wexp} and
\eqref{Lexp} in \eqref{stable_manifold} to obtain a hierarchy of
equations for the homogeneous terms of $W(\eta,\th)$ and
$\Lambda^s(\eta,\th)$.
Notice that $W_0(\th)= K(\th)$,
and so the $0$-order equation
\[
F(W_0(\th),\th) = W_0(\th+\omega)
\]
is verified.
Using the frame $P(\th)$ we write
\begin{equation}\label{ordersW}
W(\eta,\th)=
K(\th) + P(\th) \sum_{k= 1}^\infty \tilde W_k (\eta,\th)\ .
\end{equation}
Each term $\tilde W_k(\eta,\th)$ is written as
\[
\tilde W_k(\eta,\th)= \left(\begin{array}{c} W_k^s(\eta,\th) \\
W_k^u(\eta,\th) \end{array}\right)\ .
\]
The $1$-order equation is
\begin{equation}\label{firstorder}
\Dif F(W(0,\th),\th) W_1(\th) =
W_1(\th+\omega) \Lambda^s_1(\th)\ ,
\end{equation}
One possible solution of \eqref{firstorder} is
$W_1(\eta,\th) = V^s(\th)\eta$ and
$\Lambda^s_1(\eta,\th) = \Lambda^s_1(\th)\eta$.
That is to say,
\[
\tilde W_1(\eta,\th)=
\left(\begin{array}{c} \eta\\ 0 \end{array}\right)\ .
\]
\begin{remark}\label{scalechoice}
Note, however, that $W_1(\eta,\th) = \mu V^s(\th) \eta$ for each
$\mu \in \nr - \{0\}$ is also a solution of \eqref{firstorder}.
This is closely related to the multiplicity of the solutions
of \eqref{whiskerparam} pointed out in
Remark~\ref{nonuniqueness}. Choosing $W_1$ amounts to
choosing the scale in the coordinate $\eta$.
As we will see, once we make a choice for $W_1$, all the other
terms in the expansion are determined.
In the theory developed in \cite{HLlth}, it is shown that,
provided that one chooses the domain of $W_1$ small enough, one can
reduce the problem to a contraction mapping.
However, we observe that if some of the computed coefficients are
very small, this leads to numerical error since we lose information
when adding the small coefficient to a larger one. In numerical
implementations, it is advantageous to choose $W_1$
so that the coefficients for $W$ decrease but as slowly
as possible. In some cases, one can do a preliminary run,
adjusting the choice of $W_1$ to obtain some information on
the rate of decay of the coefficients. Then, one can
restart the calculation with the appropriate choice of
$W_1$.
\end{remark}
Suppose we have computed all the terms of order less than $k$.
In the following, a subscript ${}{From the spectral properties of the transfer operators associated to
$\Lambda^{s}_1,\Lambda^{u}_1$,
\eqref{unstable_component} has an unique
solution $\tilde W^u_k(\th,\eta)$ provided that the
spectrum of the linearized operator restricted to
the bundle satisfies some non-resonance conditions,
namely that the product of $k$ points in the spectrum
of the restriction is not a point in the spectrum.
Once that we know $\tilde W^u_k$, we substitute it
in \eqref{stable_component}. Again
appropriate assumptions on the spectrum of $\Lambda^s_1$
we can solve the equation \eqref{stable_component}.
See \cite{HLlth}. The theory of \eqref{stable_component} is
much richer than the theory for \eqref{unstable_component}.
Note that in \eqref{stable_component} we want to
find two unknowns $\tilde W^s_k$ and $\Lambda^s_k$.
The choice that requires less computation is to set
$\Lambda^s_k$ to zero whenever possible. On the other hand,
it can well be numerically advantageous to choose $\Lambda$
so that we improve the condition of the resulting equation.
Note that once we choose $\Lambda^s_k$, amounts to solving
a linear equation for $\tilde W^s_k$.
It is possible to choose $\Lambda^s_k$ in such a way that the
RHS of \eqref{stable_component} does not include
directions along which the operator in the LHS is hard to invert.
This happens for example, if the product of $k$ numbers in the
spectrum of the transfer operator along $E^s$ are close
to points in the spectrum of the operator along $E^u$.
\subsection{The case of stable and unstable manifolds}
In the particular case that the splitting $E^s, E^u$ is between
stable and unstable components of the spectrum, the theory is
much simpler. Note that in that case $N \equiv 0$.
A solution of \eqref{unstable_component},
can be obtained
by iterating the following equation, which is
equivalent to it:
\[
\tilde W^u_k(\eta,\th) =
(\Lambda^u_1(\th))^{-1}
\left[ \tilde W^u_k(\Lambda^s_1(\th) \eta,\th+\omega) +
\tilde R^u_k(\eta,\th) \right]\ .
\]
If $k\leq L$, we take $\tilde W_k^s(\eta,\th)= 0$ and
$\Lambda^s_k(\eta,\th)= -\tilde R^s_k(\eta,\th)$.
On the contrary, if $k>L$, then we take $\Lambda^s_k(\eta,\th)= 0$ and
$\tilde W^s_k(\eta,\th)$ solves the equation
\begin{equation}
\Lambda^s_1(\th) \tilde W^s_k(\eta,\th) -
\tilde W^s_k(\Lambda^s_1(\th) \eta,\th+\omega) =
\tilde R^s_k(\eta,\th) \ .
\end{equation}
Notice that because $\tilde W^s_k$ is homogeneous of
degree $k$ we have
\[
| \tilde W^s_k(\Lambda^s_1(\th) \eta , \th+\omega) |
\le
\norm{\tilde W^s_k}_{C^0} \cdot \norm{\Lambda^s_1}_{C^0}^k
\]
Since \eqref{stable_component} is equivalent to
\[
\tilde W^s_k(\eta,\th) =
\Lambda^s(\th) ^{-1}
\tilde W^s_k(\Lambda^s(\th) \eta, \th+\omega) -
\Lambda^s_k(\eta,\th) +
\Lambda^s(\th) ^{-1}
\tilde R^s_k(\eta,\th)
\]
we see that by \eqref{condition}, the RHS of the
above equation is a contraction and we can solve
\eqref{stable_component} just iterating.
\begin{remark}
Note that the solution of \eqref{stable_component},
in general is not unique. We can make other choices for $\Lambda_k^s$
than the ones we indicated.
Notably, in the \cite{HLlth} it is shown that one can
choose $\Lambda^s_k = 0$ whenever the operator
that to $W^s_k$ associates the LHS of \eqref{stable_component} is
invertible. This is ensured by some non-resonance conditions on
the spectrum of the transfer operator associated to $\Lambda^s_1$.
\end{remark}
\begin{remark}
We can also represent the invariant manifold as a graph by taking
$\tilde W_k^s(\eta,\th)= 0$ and
$\Lambda^s_k(\eta,\th)= -\tilde R^s_k(\eta,\th)$ for all $k\geq 2$.
\end{remark}
\begin{remark}
At the moment, we have not implemented algorithms for
non-resonant manifolds except in the
rank-1 case, which will be discussed in the next Section.
It looks possible that, in the case that the torus is reducible,
there could be practical algorithms for these
non-resonant manifolds even in the case that
the rank is larger than $1$.
\end{remark}
\subsection{Invariant manifolds of rank 1 attached to an invariant torus}
\label{computation_rank1_manifold}
In this section we will look for a manifold
$W: \nr\times \nt^d\to \nr^n$
and a real number $\lambda$ with $|\lambda|\neq 1$
such that the invariance equation
\begin{equation}
\label{rank_1}
F(W(s,\th),\th) = W(\lambda s,\th+\omega),
\end{equation}
is satisfied, with $W(0,\th)= K(\th)$.
That is, we look for a parameterization of an invariant manifold $W$
through $K$ such that the normal dynamics is given by multiplication
with $\lambda$. We will assume that $\omega$ is Diophantine.
By differentiating \eqref{rank_1} with respect to $s$
and taking $s= 0$, we have
\begin{equation}
\label{eigensection}
\Dif F(K(\th),\th)
\frac{\partial W}{\partial s}(0,\th)=
\lambda \frac{\partial W}{\partial s}(0,\th+\omega).
\end{equation}
Let $M(\th)= \Dif F(K(\th),\th)$ be the differential of $F$ along the
invariant torus $K$ (the monodromy matrix),
that gives the normal behavior around the torus.
Let $W_1$ be the first variation of $W$ with respect to $s$ in $s= 0$, that is,
\[
W_1(\th)= \frac{\partial W}{\partial s}(0,\th).
\]
Then, \eqref{eigensection} just requires that $W_1(\th)$ is an eigensection
of the transfer operator $\M_\omega$, whose eigenvalue is $\lambda$.
As $\omega$ is Diophantine,
the fact that there is a real eigenvalue of the
transfer operator is equivalent to the fact that there is
a rank one trivial invariant bundle. The invariant bundle is, clearly,
the span of the eigenvalue. (See \cite{JohnsonS81,trilogy2}.)
Notice that Algorithm~\ref{algorithm_reducibility}, if it works, produces the
invariant torus $K$, a constant cocycle $\Lambda$ conjugated with the
cocycle $M(\th)$, and the corresponding conjugation $P(\th)$ (Floquet
transformation). See also Section~\ref{Floquet}.
Notice that if $\lambda$ is an eigenvalue of $\Lambda$, then
$e^{2\pi\ii k\omega} \lambda$ with $k\in\nz^d$ is an eigenvalue of $M(\th)$,
but we will refer to the eigenvalues of $\Lambda$ as the eigenvalues of
$M(\th)$. The eigensections are the columns of the matrix $P$, and are
solutions of equation \eqref{eigensection}.
The equation \eqref{eigensection},
being an eigenvalue equation, determines $\lambda$ -- the eigenvalue --
up to a choice and determines
$W_1$ up to multiplication by a number.
The fact that $W_1$ is determined up to a multiple is
related to the indeterminacy pointed out in
Remark~\ref{nonuniqueness}. {From} the mathematical
point of view, all choices are equivalent, but in
numerical implementations, it is convenient to make a
choice that minimizes the round-off error and increases the
numerical stability.
This choice is discussed in \cite{HLlex}. See also
the discussion in Remark~\ref{scalechoice}.
\begin{remark}
In the following construction, the reducibility of the cocycle $M(\th)$
is not strictly necessary, but just that it has a rank one trivial invariant
subbundle whose dynamics, since $\omega$ is Diophantine, can be reduced to
a constant $\lambda$. We also need non-resonance conditions similar to those
in the previous section to reduce the dynamics on the whiskers to this constant
$\lambda$.
Notice that the Floquet transformation decreases the degree of
smoothness
of the parameterizations, but it is very useful for computations.
\end{remark}
We expand $W$ as
\begin{equation}\label{Wform}
W(s,\th)= \sum_{k= 0}^\infty W_k(\th) s^k,
\end{equation}
where the Taylor coefficients $W_k(\th)$ are periodic functions.
Notice that $W_0(\th)= K(\th)$ and $W_1(\th)$ is, for the sake of simplicity,
the first column in $P(\th)$.
Substituting \eqref{Wform} in the invariance equation
\eqref{rank_1} and equating the terms containing $s^k$, we obtain
\begin{equation}
\label{tosolve}
M(\th) W_k(\th) - \lambda^k W_k(\th+\omega) = R_k(\th).
\end{equation}
where $R_k$ is a polynomial expression which depends only
on $W_1,\ldots, W_{k-1}$. Hence, it can be
inductively assumed to be known. In practice, to
compute $R_k$ one
just needs to feed the polynomial of degree $k-1$
to a program built using
a package that manipulates Fourier-Taylor series. The
coefficient of degree $k$ is $R_k(\th)$ and all
the other coefficients of degree up to $k$
should be $0$. This provides a check of the accuracy
of the calculation. An alternative that is significantly faster is to
observe that if we keep all the intermediate terms in the
evaluation of $F(W(s,\th),\th)$, one can keep the coefficients
of the polynomial of
degree up $k-1$ and only compute the coefficient of order $k$.
The implementation
of this procedure is very easy if $F$ is obtained
by repeated applications of
of arithmetic
operations and elementary
transcendental functions. (See the references mentioned
before for toolkits of Fourier-Taylor series or \cite{Knuth97}.)
Notice that \eqref{tosolve} has a solution if $\lambda^k$ is not in the
spectrum of the transfer operator $\M_\omega$. So, this gives a non-resonance
condition to linearize the dynamics on the whisker:
$\lambda^k \notin \Spec(\M_\omega)$ for all $k>1$. In fact, the non-resonance
condition involves a finite number of conditions, since $|\lambda| \neq 1$.
We have several options to solve numerically equation \eqref{tosolve}:
\begin{itemize}
\item We can translate the system to a large linear system, whose unknowns are
the coefficients of the Fourier series given $W_k$. This produces,
of course a large matrix.
\item Suppose we have computed the spectral annuli and their corresponding
spectral subbundles of $\M_\omega$ (see Section~\ref{spectral_subbundles}).
Since
$\lambda^k$ is in a gap of the spectrum, we can use the projection on the
bundles $E^{<\lambda^k}$ and $E^{> \lambda^k}$ (both of them are direct sum of
several spectral subbundles). On each projection, we solve the equations using
iteration. This is similar to the constructions in
Section~\ref{computation_stable_manifold}.
\item If the cocycle $M(\th)$ is reducible to a constant cocycle $\Lambda$,
and $P(\th)$ is the reducing transformation,
we write
\[
W_k(\th)= P(\th) \tilde W_k(\th),
\]
where $\tilde W_k(\th)$ solves the equation
\[
\Lambda \tilde W_k(\th) - \lambda^k \tilde W_k(\th + \omega)=
P(\th+\omega)^{-1} R_k(\th),
\]
provided that the non resonance condition
\[
|\lambda|^k \neq |\lambda_i| \ \mbox{for all $k>1$,$i= 1,\dots,n$.}
\]
Reducibility diagonalizes the linear system described in the first
alternative.
\end{itemize}
\section*{Acknowledgments}
This work was initiated when A. H. was enjoying a
Fulbright Scholarship at Univ. of Texas at Austin in the year 2000.
A. H. has been supported by the MCyt/FEDER Grant
BFM2000--805 and the Catalan grant 2000SGR--27 and
the INTAS project 00-221. His work is also supported
by the MCyT/FEDER Grant BFM2003-07521-C02-01.
The work of R. L. has been partially supported by
N.~S.~F. grants. R. L. has enjoyed a Dean's Fellowship at
U.T. Austin Spring 03. Visits of R.L. to Barcelona
were supported by FBBV and ICREA.
\newcommand{\etalchar}[1]{$^{#1}$}
\def\cprime{$'$} \def\cprime{$'$} \def\cprime{$'$} \def\cprime{$'$}
\def\cprime{$'$}
\begin{thebibliography}{GJdlLV00}
\bibitem[AG03]{Homotopyreference}
Eugene~L. Allgower and Kurt Georg.
\newblock {\em Introduction to numerical continuation methods}, volume~45 of
{\em Classics in Applied Mathematics}.
\newblock Society for Industrial and Applied Mathematics (SIAM), Philadelphia,
PA, 2003.
\newblock Reprint of the 1990 edition Springer-Verlag, Berlin;.
\bibitem[BGGS78]{BenettinGGS78}
Giancarlo Benettin, Luigi Galgani, Antonio Giorgilli, and Jean-Marie Strelcyn.
\newblock Tous les nombres caract\'eristiques de {L}yapounov sont effectivement
calculables.
\newblock {\em C. R. Acad. Sci. Paris S\'er. A-B}, 286(9):A431--A433, 1978.
\bibitem[BHJ{\etalchar{+}}03]{BroerHJVW03}
Henk Broer, Heinz Han{\ss}mann, {\`A}ngel Jorba, Jordi Villanueva, and Florian
Wagener.
\newblock Normal-internal resonances in quasi-periodically forced oscillators:
a conservative approach.
\newblock {\em Nonlinearity}, 16(5):1751--1791, 2003.
\bibitem[BHTB90]{BroerHTB90}
H.~W. Broer, G.~B. Huitema, F.~Takens, and B.~L.~J. Braaksma.
\newblock Unfoldings and bifurcations of quasi-periodic tori.
\newblock {\em Mem. Amer. Math. Soc.}, 83(421):viii+175, 1990.
\bibitem[BOV97]{BroerOV97}
H.~W. Broer, H.~M. Osinga, and G.~Vegter.
\newblock Algorithms for computing normally hyperbolic invariant manifolds.
\newblock {\em Z. Angew. Math. Phys.}, 48(3):480--524, 1997.
\bibitem[Cap04]{Capella}
Antonio Capella.
\newblock A parameterization of the invariant manifolds associated to periodic
orbits in the {RTBP}.
\newblock {\em In preparation}, 2004.
\bibitem[CFdlL03a]{CabreFL03a}
Xavier Cabr{\'e}, Ernest Fontich, and Rafael de~la Llave.
\newblock The parameterization method for invariant manifolds. {I}. {M}anifolds
associated to non-resonant subspaces.
\newblock {\em Indiana Univ. Math. J.}, 52(2):283--328, 2003.
\bibitem[CFdlL03b]{CabreFL03b}
Xavier Cabr{\'e}, Ernest Fontich, and Rafael de~la Llave.
\newblock The parameterization method for invariant manifolds. {II}.
{R}egularity with respect to parameters.
\newblock {\em Indiana Univ. Math. J.}, 52(2):329--360, 2003.
\bibitem[CI79a]{ChencinerI79a}
A.~Chenciner and G.~Iooss.
\newblock Bifurcations de tores invariants.
\newblock {\em Arch. Rational Mech. Anal.}, 69(2):109--198, 1979.
\bibitem[CI79b]{ChencinerI79b}
A.~Chenciner and G.~Iooss.
\newblock Persistance et bifurcation de tores invariants.
\newblock {\em Arch. Rational Mech. Anal.}, 71(4):301--306, 1979.
\bibitem[CJ00]{CastellaJ00}
Enric Castell{\`a} and {\`A}ngel Jorba.
\newblock On the vertical families of two-dimensional tori near the triangular
points of the bicircular problem.
\newblock {\em Celestial Mech. Dynam. Astronom.}, 76(1):35--54, 2000.
\bibitem[Dem97]{Demmel97}
James~W. Demmel.
\newblock {\em Applied numerical linear algebra}.
\newblock Society for Industrial and Applied Mathematics (SIAM), Philadelphia,
PA, 1997.
\bibitem[DFJ01]{DellnitzFG01}
Michael Dellnitz, Gary Froyland, and Oliver Junge.
\newblock The algorithms behind {GAIO}-set oriented numerical methods for
dynamical systems.
\newblock In {\em Ergodic theory, analysis, and efficient simulation of
dynamical systems}, pages 145--174, 805--807. Springer, Berlin, 2001.
\bibitem[dlL97]{Llave97}
Rafael de~la Llave.
\newblock Invariant manifolds associated to nonresonant spectral subspaces.
\newblock {\em J. Statist. Phys.}, 87(1-2):211--249, 1997.
\bibitem[dlL01]{Llave01}
Rafael de~la Llave.
\newblock A tutorial on {KAM} theory.
\newblock In {\em Smooth ergodic theory and its applications (Seattle, WA,
1999)}, volume~69 of {\em Proc. Sympos. Pure Math.}, pages 175--292. Amer.
Math. Soc., Providence, RI, 2001.
\bibitem[dlL03]{Llave03}
R.~de~la Llave.
\newblock Invariant manifolds associated to invariant subspaces without
invariant complements: a graph transform approach.
\newblock {\em Math. Phys. Electron. J.}, 9:Paper 3, 35 pp. (electronic), 2003.
\bibitem[dlLO99]{LlaveO99}
R.~de~la Llave and R.~Obaya.
\newblock Regularity of the composition operator in spaces of {H}\"older
functions.
\newblock {\em Discrete Contin. Dynam. Systems}, 5(1):157--184, 1999.
\bibitem[dlLR91]{LlaveR90}
R.~de~la Llave and D.~Rana.
\newblock Accurate strategies for {K}.{A}.{M}.\ bounds and their
implementation.
\newblock In {\em Computer Aided Proofs in Analysis (Cincinnati, OH, 1989)},
pages 127--146. Springer, New York, 1991.
\bibitem[DLR91]{DieciLR91}
Luca Dieci, Jens Lorenz, and Robert~D. Russell.
\newblock Numerical calculation of invariant tori.
\newblock {\em SIAM J. Sci. Statist. Comput.}, 12(3):607--647, 1991.
\bibitem[DVV02]{DieciV02}
Luca Dieci and Erik~S. Van~Vleck.
\newblock Lyapunov spectral intervals: theory and computation.
\newblock {\em SIAM J. Numer. Anal.}, 40(2):516--542 (electronic), 2002.
\bibitem[ElB01]{ElBialy01}
Mohamed~S. ElBialy.
\newblock Sub-stable and weak-stable manifolds associated with finitely
non-resonant spectral subspaces.
\newblock {\em Math. Z.}, 236(4):717--777, 2001.
\bibitem[Eli88]{Eliasson88a}
L.~H. Eliasson.
\newblock Perturbations of stable invariant tori for {H}amiltonian systems.
\newblock {\em Ann. Scuola Norm. Sup. Pisa Cl. Sci. (4)}, 15(1):115--147
(1989), 1988.
\bibitem[Eli97]{Eliasson97}
L.~H. Eliasson.
\newblock Discrete one-dimensional quasi-periodic {S}chr\"odinger operators
with pure point spectrum.
\newblock {\em Acta Math.}, 179(2):153--196, 1997.
\bibitem[Eli01]{Eliasson99}
L.~H. Eliasson.
\newblock Almost reducibility of linear quasi-periodic systems.
\newblock In {\em Smooth ergodic theory and its applications (Seattle, WA,
1999)}, volume~69 of {\em Proc. Sympos. Pure Math.}, pages 679--705. Amer.
Math. Soc., Providence, RI, 2001.
\bibitem[ER85]{EckmannR85}
J.-P. Eckmann and D.~Ruelle.
\newblock Ergodic theory of chaos and strange attractors.
\newblock {\em Rev. Modern Phys.}, 57(3, part 1):617--656, 1985.
\bibitem[FdlL92]{FalcoliniL92}
Corrado Falcolini and Rafael de~la Llave.
\newblock A rigorous partial justification of {G}reene's criterion.
\newblock {\em J. Statist. Phys.}, 67(3-4):609--643, 1992.
\bibitem[Fen72]{Fenichel71}
Neil Fenichel.
\newblock Persistence and smoothness of invariant manifolds for flows.
\newblock {\em Indiana Univ. Math. J.}, 21:193--226, 1971/1972.
\bibitem[Fen77]{Fenichel77}
N.~Fenichel.
\newblock Asymptotic stability with rate conditions. {I}{I}.
\newblock {\em Indiana Univ. Math. J.}, 26(1):81--93, 1977.
\bibitem[Fen74]{Fenichel74}
N.~Fenichel.
\newblock Asymptotic stability with rate conditions.
\newblock {\em Indiana Univ. Math. J.}, 23:1109--1137, 1973/74.
\bibitem[GC91]{AutomaticDifferentiation}
Andreas Griewank and George~F. Corliss, editors.
\newblock {\em Automatic differentiation of algorithms}, Philadelphia, PA,
1991. Society for Industrial and Applied Mathematics (SIAM).
\newblock Theory, implementation, and application.
\bibitem[GJdlLV00]{GonzalezJLlV00}
A.~Gonzalez, A.~Jorba, R.~de~la Llave, and J.~Villanueva.
\newblock {K}{A}{M} theory for non action-angle {H}amiltonian systems.
\newblock {\em Manuscript}, 2000.
\bibitem[Har04]{Haro}
\`Alex Haro.
\newblock Some notes on symbolic and algebraic computation in dynamics.
\newblock {\em In preparation}, 2004.
\bibitem[HdlL01a]{HLlex}
A.~Haro and R.~de~la Llave.
\newblock A parameterization method for the computation of whiskers in quasi
periodic maps: numerical implementation and examples.
\newblock {\em Preprint}, 2001.
\bibitem[HdlL01b]{HLlth}
A.~Haro and R.~de~la Llave.
\newblock A parameterization method for the computation of whiskers in quasi
periodic maps: rigorous results.
\newblock {\em Preprint}, 2001.
\bibitem[HdlL03a]{trilogy1}
A.~Haro and R.~de~la Llave.
\newblock Spectral theory of transfer operators ({I}): General results.
\newblock {\em Preprint}, 2003.
\bibitem[HdlL03b]{trilogy2}
A.~Haro and R.~de~la Llave.
\newblock Spectral theory of transfer operators ({II}): Vector bundle maps over
rotations.
\newblock {\em Preprint}, 2003.
\bibitem[HK02]{HardcastleK02}
D.~M. Hardcastle and K.~Khanin.
\newblock The {$d$}-dimensional {G}auss transformation: strong convergence and
{L}yapunov exponents.
\newblock {\em Experiment. Math.}, 11(1):119--129, 2002.
\bibitem[HKM97]{MingyouKM97}
Mingyou Huang, Tassilo K{\"u}pper, and Norbert Masbaum.
\newblock Computation of invariant tori by the {F}ourier methods.
\newblock {\em SIAM J. Sci. Comput.}, 18(3):918--942, 1997.
\bibitem[HP70]{HirschP68}
M.W. Hirsch and C.C. Pugh.
\newblock Stable manifolds and hyperbolic sets.
\newblock In {\em Global Analysis (Proc. Sympos. Pure Math., Vol. XIV,
Berkeley, Calif., 1968)}, pages 133--163. Amer. Math. Soc., Providence, R.I.,
1970.
\bibitem[HPS77]{HirschPS77}
M.W. Hirsch, C.C. Pugh, and M.~Shub.
\newblock {\em Invariant manifolds}.
\newblock Springer-Verlag, Berlin, 1977.
\newblock Lecture Notes in Mathematics, Vol. 583.
\bibitem[JdlLZ99]{JorbaLZ99}
{\`A}.~Jorba, R.~de~la Llave, and M.~Zou.
\newblock {L}indstedt series for lower-dimensional tori.
\newblock In {\em {H}amiltonian Systems with Three or More Degrees of Freedom
(S'Agar\'o, 1995)}, pages 151--167. Kluwer Acad. Publ., Dordrecht, 1999.
\bibitem[Jor01]{Jorba01}
A.~Jorba.
\newblock Numerical computation of the normal behaviour of invariant curves of
$n$-dimensional maps.
\newblock {\em Nonlinearity}, 14:943--976, 2001.
\bibitem[JRRV97]{JorbaRV97}
{\`A}.~Jorba, R.~Ram\'{\i}rez-Ros, and J.~Villanueva.
\newblock Effective reducibility of quasi-periodic linear equations close to
constant coefficients.
\newblock {\em SIAM J. Math. Anal.}, 28(1):178--188, 1997.
\bibitem[JS81]{JohnsonS81}
Russell~A. Johnson and George~R. Sell.
\newblock Smoothness of spectral subbundles and reducibility of quasiperiodic
linear differential systems.
\newblock {\em J. Differential Equations}, 41(2):262--288, 1981.
\bibitem[JS92]{JorbaS92}
{\`A}.~Jorba and C.~Sim{\'o}.
\newblock On the reducibility of linear differential equations with
quasiperiodic coefficients.
\newblock {\em J. Differential Equations}, 98(1):111--124, 1992.
\bibitem[JS96]{JorbaS96}
{\`A}.~Jorba and C.~Sim{\'o}.
\newblock On quasi-periodic perturbations of elliptic equilibrium points.
\newblock {\em SIAM J. Math. Anal.}, 27(6):1704--1737, 1996.
\bibitem[KA81]{Kantor2}
L.~Kantorovitch and G.~Akilov.
\newblock {\em Analyse fonctionnelle. {T}ome 2}.
\newblock ``Mir'', Moscow, 1981.
\newblock \'Equations fonctionnelles. [Functional equations], Translated from
the second Russian edition by Djilali Embarek.
\bibitem[Kat76]{Kato76}
Tosio Kato.
\newblock {\em Perturbation theory for linear operators}.
\newblock Springer-Verlag, Berlin, second edition, 1976.
\newblock Grundlehren der Mathematischen Wissenschaften, Band 132.
\bibitem[Knu97]{Knuth97}
Donald~E. Knuth.
\newblock {\em The art of computer programming. {V}ol. 2: {S}eminumerical
algorithms}.
\newblock Addison-Wesley Publishing Co., Reading, Mass.-London-Don Mills, Ont,
third revised edition, 1997.
\bibitem[KO98]{OsingaK98}
Bernd Krauskopf and Hinke Osinga.
\newblock Growing $1${D} and quasi-$2${D} unstable manifolds of maps.
\newblock {\em J. Comput. Phys.}, 146(1):404--419, 1998.
\bibitem[KSW96]{KochSW96}
H.~Koch, A.~Schenkel, and P.~Wittwer.
\newblock Computer-assisted proofs in analysis and programming in logic: a case
study.
\newblock {\em SIAM Rev.}, 38(4):565--604, 1996.
\bibitem[Lan87]{Lanford87}
Oscar~E. Lanford, III.
\newblock Computer-assisted proofs in analysis.
\newblock In {\em Proceedings of the International Congress of Mathematicians,
Vol. 1, 2 (Berkeley, Calif., 1986)}, pages 1385--1394, Providence, RI, 1987.
Amer. Math. Soc.
\bibitem[Las89]{Laskar89}
J.~Laskar.
\newblock Manipulation des series.
\newblock In D.~Benest and E.~Froeschle, editors, {\em Modern methods in
celestial mechanics}. Editions Frontieres, Paris, 1989.
\bibitem[LH86]{LagariasH86}
Jeffrey~C. Lagarias and Johan~T. H{\aa}stad.
\newblock Simultaneous {D}iophantine approximation of rationals by rationals.
\newblock {\em J. Number Theory}, 24(2):200--228, 1986.
\bibitem[Mat68]{Mather68}
John~N. Mather.
\newblock Characterization of {A}nosov diffeomorphisms.
\newblock {\em Nederl. Akad. Wetensch. Proc. Ser. A 71 = Indag. Math.},
30:479--483, 1968.
\bibitem[Mn78]{Mane78}
Ricardo Ma\~n{\'e}.
\newblock Persistent manifolds are normally hyperbolic.
\newblock {\em Trans. Amer. Math. Soc.}, 246:261--283, 1978.
\bibitem[MP84]{MoserP84}
J{\"u}rgen Moser and J{\"u}rgen P{\"o}schel.
\newblock An extension of a result by {D}inaburg and {S}inai on quasiperiodic
potentials.
\newblock {\em Comment. Math. Helv.}, 59(1):39--85, 1984.
\bibitem[MS74]{MilnorStasheff}
John~W. Milnor and James~D. Stasheff.
\newblock {\em Characteristic classes}.
\newblock Princeton University Press, Princeton, N. J., 1974.
\newblock Annals of Mathematics Studies, No. 76.
\bibitem[NY98]{NusseY98}
Helena~E. Nusse and James~A. Yorke.
\newblock {\em Dynamics: numerical explorations}, volume 101 of {\em Applied
Mathematical Sciences}.
\newblock Springer-Verlag, New York, second edition, 1998.
\newblock Accompanying computer program Dynamics 2 coauthored by Brian R. Hunt
and Eric J. Kostelich, With 1 IBM-PC floppy disk (3.5 inch; HD).
\bibitem[OF00]{OsingaF00}
Hinke~M. Osinga and Ulrike Feudel.
\newblock Boundary crisis in quasiperiodically forced systems.
\newblock {\em Phys. D}, 141(1-2):54--64, 2000.
\bibitem[Ose68]{Oseledec68}
V.~I. Oseledec.
\newblock A multiplicative ergodic theorem. {C}haracteristic {L}japunov,
exponents of dynamical systems.
\newblock {\em Trudy Moskov. Mat. Ob\v s\v c.}, 19:179--210, 1968.
\bibitem[P{\"o}s89]{Poschel89}
J{\"u}rgen P{\"o}schel.
\newblock On elliptic lower-dimensional tori in {H}amiltonian systems.
\newblock {\em Math. Z.}, 202(4):559--608, 1989.
\bibitem[Ran87]{Rana87}
D.~Rana.
\newblock {\em Proof of Accurate Upper and Lower Bounds to Stability Domains in
Small Denominator Problems}.
\newblock PhD thesis, Princeton University, 1987.
\bibitem[RJB83]{RicklefsJB83}
R.~R.Ricklefs, W.~Jefferys, and R.~Broucke.
\newblock A general precompiler for algebraic manipulation.
\newblock {\em Celest. Mech.}, 29:179--190, 1983.
\bibitem[Rom61]{Rom61}
A.~Rom.
\newblock The manipulation of algebraic expressions.
\newblock {\em Comm. Assoc. Comp. Machinery}, 4(9), 1961.
\bibitem[Sch89]{Schmidt89}
D.~S. Schmidt.
\newblock Polypak: an algebraic processor for computations in celestial
mechanics.
\newblock In Chudnovsky and Jenks, editors, {\em Computer Algebra}, pages
111--120. Marcel Dekker, 1989.
\bibitem[Sel79]{Sell79}
George~R. Sell.
\newblock Bifurcation of higher-dimensional tori.
\newblock {\em Arch. Rational Mech. Anal.}, 69(3):199--230, 1979.
\bibitem[Sim89]{Simo89}
Carles Sim\'o.
\newblock On the analytical and numerical approximation of invariant manifolds.
\newblock In D.~Benest and C.~Froeschl\'e, editors, {\em Les M\'etodes Modernes
de la M\'ecanique C\'eleste}, pages 285--. Goutelas, 1989.
\bibitem[Sim96]{Simo96}
Carles Sim{\'o}.
\newblock Effective computations in {H}amiltonian dynamics.
\newblock In {\em M\'ecanique c\'eleste}, volume 1996 of {\em SMF Journ.
Annu.}, page~23. Soc. Math. France, Paris, 1996.
\bibitem[Sim98a]{Simo97}
C.~Sim{\'o}.
\newblock Effective computations in celestial mechanics and astrodynamics.
\newblock In {\em Modern methods of analytical mechanics and their applications
(Udine, 1997)}, volume 387 of {\em CISM Courses and Lectures}, pages 55--102.
Springer, Vienna, 1998.
\bibitem[Sim98b]{Simo98}
C.~Sim{\'o}.
\newblock Effective computations in celestial mechanics and astrodynamics.
\newblock In {\em Modern methods of analytical mechanics and their applications
(Udine, 1997)}, pages 55--102. Springer, Vienna, 1998.
\bibitem[SS78]{SackerS78b}
Robert~J. Sacker and George~R. Sell.
\newblock A spectral theory for linear differential systems.
\newblock {\em J. Differential Equations}, 27(3):320--358, 1978.
\bibitem[TB97]{Trefethen97}
Lloyd~N. Trefethen and David Bau, III.
\newblock {\em Numerical linear algebra}.
\newblock Society for Industrial and Applied Mathematics (SIAM), Philadelphia,
PA, 1997.
\bibitem[Tru00]{Trummer00}
Manfred~R. Trummer.
\newblock Spectral methods in computing invariant tori.
\newblock {\em Appl. Numer. Math.}, 34(2-3):275--292, 2000.
\newblock Auckland numerical ordinary differential equations (Auckland, 1998).
\bibitem[War91]{Warnock91}
Robert~L. Warnock.
\newblock Close approximations to invariant tori in nonlinear mechanics.
\newblock {\em Phys. Rev. Lett.}, 66(14):1803--1806, 1991.
\bibitem[Wei77]{Weinstein77}
A.~Weinstein.
\newblock {\em Lectures on Symplectic Manifolds}, volume~29 of {\em CBMS
Regional Conf. Ser. in Math.}
\newblock Amer. Math. Soc., Providence, 1977.
\end{thebibliography}
\end{document}