\input amstex
\documentstyle{amsppt}
\magnification=1200
\baselineskip=15 pt
\TagsOnRight
\redefine\mod{\text{\rom{mod}}\,}
\def\smat#1#2{\hskip -3mm\spreadmatrixlines{-2mm}\matrix \\\endmatrix}
\def\tsmat#1#2{\hskip -4mm\matrix &{\ssize#1}\\ \vspace{-2mm} &{\ssize#2}
\endmatrix}
\loadbold
\topmatter
\title Zeros of the Wronskian and Renormalized Oscillation Theory
\endtitle
\author F.~Gesztesy$^{1}$, B.~Simon$^{2}$, and G.~Teschl$^{1}$
\endauthor
\leftheadtext{F.~Gesztesy, B.~Simon, and G.~Teschl}
\date July 13, 1995
\enddate
\thanks $^{1}$ Department of Mathematics, University of Missouri,
Columbia, MO 65211. E-mail for F.G.: mathfg\@\linebreak
mizzou1.missouri.edu; e-mail for G.T.: mathgr42\@mizzou1.missouri.edu
\endthanks
\thanks $^{2}$ Division of Physics, Mathematics, and Astronomy,
California Institute of Technology, Pasadena, CA 91125. This material
is based upon work supported by the National Science Foundation under
Grant No.~DMS-9401491. The Government has certain rights in this
material.
\endthanks
\thanks {\it{To appear in Am.~J.~Math.}}
\endthanks
\abstract For general Sturm-Liouville operators with separated boundary
conditions, we prove the following: If $E_{1,2}\in\Bbb R$
and if $u_{1,2}$ solve the differential equation $Hu_j=E_j u_j$, $j=1,2$
and respectively satisfy the boundary condition on the left/right, then
the dimension of the spectral projection $P_{(E_1, E_2)}(H)$ of
$H$ equals the number of zeros of the Wronskian of $u_1$ and $u_2$.
\endabstract
\endtopmatter
\document
\flushpar{\bf{\S 1. Introduction}}
\medpagebreak
For over a hundred and fifty years, oscillation theorems for
second-order differential equations have fascinated mathematicians.
Originating with Sturm's celebrated memoir [20], extended in a variety
of ways by B\^ocher [2] and others, a large body of material has been
accumulated since then (thorough treatments can be found, e.g., in
[4],[13],[18],[19], and the references therein). In this paper we'll
add a new wrinkle to oscillation theory by showing that zeros of
Wronskians can be used to count eigenvalues in situations where a
naive use of oscillation theory would give $\infty -\infty$.
To set the stage, we'll consider operators on $L^{2}((a,b); r\,dx)$
with $a**0$ a.e.~on $(a,b)$}. \tag 1.1
$$
We'll use $\tau$ to describe the formal differentiation expression and
$H$ the operator given by $\tau$ with separated boundary conditions at
$a$ and/or $b$.
If $a$ (resp.~$b$) is finite and $q,p^{-1},r$ are in addition integrable
near $a$ (resp.~$b$), we'll say $a$ (resp.~$b$) is a {\it{regular}}
end point. We'll say $\tau$ respectively $H$ is {\it{regular}} if
both $a$ and $b$ are regular. As is usual, ([6], Section XIII.2;
[15], Section 17; [22], Chapter 3), we consider the local domain
$$
D_{\text{\rom{loc}}}=\{u \in AC_{\text{\rom{loc}}}((a,b)) \mid pu'\in
AC_{\text{\rom{loc}}}((a,b)),\ \tau u \in
L^{2}_{\text{\rom{loc}}}((a,b); r\,dx)\}, \tag 1.2
$$
where $AC_{\text{\rom{loc}}}((a,b))$ is the set of integrals
of $L^{1}_{\text{\rom{loc}}}$ functions (i.e., the set of locally
absolutely continuous functions) on $(a,b)$. General {\eightpoint{ODE}}
theory shows that for any $E\in\Bbb C$, $x_0\in (a,b)$, and $(\alpha,
\beta)\in\Bbb C^2$, there is a unique $u\in D_{\text{\rom{loc}}}$
such that $-(pu')'+qu - Eru=0$ for a.e.~$x \in (a,b)$ and $(u(x_0),
(pu')(x_0))=(\alpha,\beta)$.
The maximal and minimal operators are defined by taking
$$
D(T_{\text{\rom{max}}})=\{u\in L^{2}((a,b); r\,dx)\cap
D_{\text{\rom{loc}}}\mid \tau u\in L^{2}((a,b); r\,dx)\},
$$
with
$$
T_{\text{\rom{max}}}u=\tau u. \tag 1.3
$$
$T_{\text{\rom{min}}}$ is the operator closure of
$T_{\text{\rom{max}}}\restriction D_{\text{\rom{loc}}}\cap\{u
\text{ has compact support in $(a,b)$}\}$. Then $T_{\text{\rom{min}}}$
is symmetric and $T^{*}_{\text{\rom{min}}}=T_{\text{\rom{max}}}$.
According to the Weyl theory of self-adjoint extensions ([6], Section
XIII.6; [15], Section 18; [17], Appendix to X.1; [21], Section 8.4;
[22], Chapters 4 and 5), the deficiency indices of
$T_{\text{\rom{min}}}$ are $(0,0)$ or $(1,1)$ or $(2,2)$ depending
on whether it is limit point at both, one or neither end point.
Moreover, the self-adjoint extensions can be described in terms of
Wronskians ([6], Section XIII.2; [15], Sections 17 and 18; [21],
Section 8.4; [22], Chapter 3). Define
$$
W(u_1,u_2)(x)=u_1(x)(pu_2')(x)-(pu_1')(x)u_2(x). \tag 1.4
$$
Then if $T_{\text{\rom{min}}}$ is limit point at both ends,
$T_{\text{\rom{min}}}=T_{\text{\rom{max}}}=H$. If $T_{\text{\rom{min}}}$
is limit point at $b$ but not at $a$, for $H$ any self-adjoint extension
of $T_{\text{\rom{min}}}$, if $\varphi_-$ is any function in $D(H)
\backslash D(T_{\text{\rom{min}}})$, then
$$
D(H)=\{u\in D(T_{\text{\rom{max}}})\mid W(u,\varphi_-)(x)\to 0
\text{ as $x\downarrow a$}\}.
$$
Finally, if $u_1$ is limit circle at both ends, the operators $H$ with
separated boundary conditions are those for which we can find
$\varphi_\pm \in D(H)$, $\varphi_+\equiv 0$ near $a$, $\varphi_-
\equiv 0$ near $b$, and $\varphi_\pm \in D(H)\backslash
D(T_{\text{\rom{min}}})$. In that case,
$$
D(H)=\{u\in D(T_{\text{\rom{max}}})\mid W(u,\varphi_-)(x)\to 0
\text{ as $x\downarrow a$}, \,
W(u,\varphi_+)(x)\to 0 \text{ as $x\uparrow b$}\}.
$$
Of course, if $H$ is regular, we can just specify the boundary
conditions by taking values at $a,b$ since by regularity any $u\in
D(T_{\text{\rom{max}}})$ has $u,pu'$ continuous on $[a,b]$ (cf.~(A.4)).
It follows from this analysis that
\proclaim{Proposition 1.1} If $u_{1,2}\in D(H)$, then $W(u_1,u_2)(x)
\to 0$ as $x\to a$ or $b$.
\endproclaim
We'll call such operators SL operators (for Sturm-Liouville, but SL
includes separated boundary conditions (if necessary)).
It will be convenient to write $\ell_-=a$, $\ell_+=b$.
Throughout this paper we will denote by $\psi_{\pm}(z,x) \in
D_{\text{\rom{loc}}}$ solutions of $\tau \psi = z \psi$ so that
$\psi_{\pm}(z,\,.\,)$ is $L^2$ at $\ell_{\pm}$ and $\psi_{\pm}
(z,\,.\,)$ satisfies the appropriate boundary condition at $\ell_\pm$
in the sense that for any $u \in D(H)$, $\lim\limits_{x\to\ell_{\pm}}
W(\psi_{\pm}(z),u)(x)=0$. If $\psi_{\pm}(z,\,.\,)$ exist, they are
unique up to constant multiples. In particular, $\psi_{\pm}(z,\,.\,)$
exist for $z$ not in the essential spectrum of $H$ and we can assume
them to be holomorphic with respect to $z$ in ${\Bbb C} \backslash
\text{spec}(H)$ and real for $z\in\Bbb R$. One can choose
$$
\psi_\pm(z,x) = ((H-z)^{-1} \chi_{(c,d)})(x) \quad
\text{for } x \smat{> d}{< c}, \quad a d}{< c}$.
Here $(H-z)^{-1}$ denotes the resolvent of $H$ and $\chi_\Omega$ the
characteristic function of the set $\Omega \subseteq \Bbb R$. Clearly
we can include a finite number of isolated eigenvalues in the domain
of holomorphy of $\psi_\pm$ by removing the corresponding poles. Moreover,
to simplify notations, all solutions $u$ of $\tau u = Eu$ are understood
to be not identically vanishing and solutions associated with real values
of the spectral parameter $E$ are assumed to be real-valued in this paper.
Thus if $E$ is real and in the resolvent set for $H$ or an isolated
eigenvalue, we are guaranteed there are solutions that obey the boundary
conditions at $a$ or $b$. It can happen if $E$ is in the essential
spectrum that such solutions do not exist or it may happen that they do.
In Theorems 1.3, 1.4 below, we'll explicitly assume such solutions exist
for the energies of interest. If these energies are not in the essential
spectrum, that is automatically fulfilled.
With these preliminaries out of the way, we can describe a theorem
Hartman proves in [10] which gives an eigenvalue count in some cases
where oscillation theory would naively give $\infty -\infty$ (see
Weidmann [22], Chapter 14 for some results when $\tau$ is limit circle
at $b$).
\proclaim{Theorem 1.2 (Hartman [10], see also [9],[11])} Let $H$ be an
SL operator on $(a,b)$ which is regular at $a$ and limit point at $b$
and suppose $E_1< E_2$. Let $u_1$ \rom(resp.~$u_2$\rom) be $\psi_{-}
(E_1)$ \rom(resp.~$\psi_-(E_2)$\rom). Let $N(c)$, $c \in (a,b)$
denote the number of zeros of $u_1$ in $(a,c)$ minus the number of
zeros of $u_2$ in $(a,c)$. Let $P_\Omega(H)$ be the spectral projector
of $H$ corresponding to the Borel set $\Omega\subseteq\Bbb R$. Then,
if $\tau$ is oscillatory at $E_2$,
$$
\dim\text{\rom{Ran}}\, P_{(E_1, E_2)}(H) =
\varliminf\limits_{c\uparrow b}\, N(c), \tag 1.5a
$$
and if $\tau$ is non-oscillatory at $E_2$,
$$
\dim\text{\rom{Ran}}\, P_{[E_1, E_2)}(H) =
\lim\limits_{c\uparrow b}\, N(c). \tag 1.5b
$$
\endproclaim
If $\tau$ is oscillatory at $E_2$ (i.e., $u_2$ has infinitely many zeros
near $b$), $N(c)$ is not constant for large $c$ but instead varies between
$N_0$ and $N_0+1$. Hartman's result leaves several questions open:
What happens if $H$ is limit circle at $b$ or in the case where $H$
is not regular at either end (e.g., the important case of the real line
$(a,b)=(-\infty,\infty)$)? Moreover, it isn't clear when $c$ is so large
that $\varliminf\limits_{c\uparrow b}\,N(c)$ has been reached. It would
be better if we could actually count something analogous to the zero
count in ordinary oscillation theory. Our goal in this paper is to prove
such theorems.
The key is to look at zeros of the Wronskian. That zeros of the
Wronskian are related to oscillation theory is indicated by an old
paper of Leighton [14], who noted that if $u_j,pu_j' \in
AC_{\text{\rom{loc}}}((a,b))$, $j=1,2$ and $u_1$ and $u_2$ have a
non-vanishing Wronskian $W(u_1,u_2)$ in $(a,b)$, then their zeros must
intertwine each other. (In fact, $pu_1'$ must have opposite signs at
consecutive zeros of $u_1$, so by non-vanishing of $W$, $u_2$ must have
opposite signs at consecutive zeros of $u_1$ as well. Interchanging the
role of $u_1$ and $u_2$ yields strict interlacing of their zeros.)
Moreover, let $E_1 e > E_1$ with $e$
an eigenvalue and $|E_2-E_1|$ is small, then (1.7) holds rather than
(1.6). We'll also see that if $u_{1,2}$ are arbitrary solutions of
$\tau u_j=E_j u_j$, $j=1,2$, then, in general, $|W_0-N_0|\leq 2$ (this
means that if one of the quantities is infinite, the other is as well)
and any of $0, \pm 1,\pm 2$ can occur for $W_0-N_0$. Especially, if either
$E_1$ or $E_2$ is in the interior of the essential spectrum of $H$
(or $\dim\text{\rom{Ran}}\, P_{(E_1, E_2)}(H)=\infty$), then $W_0
(u_1,u_2)=\infty$ for any $u_1$ and $u_2$ satisfying $\tau u_j=E_j
u_j$, $j=1,2$ (cf.~Theorem 7.3).
Zeros of the Wronskians have two properties that are critical to these
results: First, zeros are precisely points where the Pr\"ufer angles
for $u_1$ and $u_2$ are equal $(\mod \pi)$. Second, if $\psi_- \in
D_{\text{\rom{loc}}}$ and $\psi_+ \in D_{\text{\rom{loc}}}$ satisfy
the boundary conditions at $a,b$, respectively, and $W(\psi_-,\psi_+)
(x_0)=0$ and if $(\psi_+(x_0), (p\psi_+')(x_0))\neq (0,0)$, then there
is a $\gamma$ such that
$$
\eta(x)= \cases \psi_-(x), & x\leq x_0 \\
\gamma\psi_+(x), & x\geq x_0
\endcases
$$
satisfies $\eta\in D(H)$ and
$$
H \eta(x) =\cases (\tau\psi_-)(x), & x\leq x_0 \\
\gamma (\tau\psi_+)(x), & x\geq x_0.
\endcases
$$
We'll explore these properties further in Propositions 3.1 and 3.2.
Section 2 provides a short proof of the ordinary oscillation theorem
in the regular case following the method in Courant-Hilbert ([5],
page 454). Even though this result is well-known (see, e.g., [1],
Theorem 8.4.5 and [22], Theorem 14.10 which describes the singular
case as well) we include it here since our overall strategy in this
paper is patterned after this proof: A variational argument will show
$N_0\geq W_0$ in Section 6 and a comparison-type argument in Sections
4 and 5 will prove $N_0\leq W_0$. Explicitly, in Section 5 we'll show
\proclaim{Theorem 1.5} Let $E_1 < E_2$. If $u_1=\psi_-(E_1)$ and
either $u_2=\psi_+(E_2)$ or $\tau u_2 = E_2 u_2$ and $H$ is limit point
at $b$, then
$$
W_0(u_1,u_2)\geq\dim\text{\rom{Ran}}\, P_{(E_1, E_2)}(H).
$$
\endproclaim
\flushpar In Section 6, we'll prove that
\proclaim{Theorem 1.6} Let $E_1 < E_2$. Let either $u_1=\psi_+(E_1)$ or
$u_1=\psi_-(E_1)$ and either $u_2=\psi_+(E_2)$ or $u_2=\psi_-(E_2)$.
Then
$$
W_0(u_1,u_2)\leq\dim\text{\rom{Ran}}\, P_{(E_1, E_2)}(H). \tag 1.8
$$
\endproclaim
\remark{Remark} Of course, by reflecting about a point $c \in (a,b)$,
Theorems 1.3--1.5 hold for $u_1 = \psi_+(E_1)$ and $u_2 = \psi_-(E_2)$
(and either $N_0 = 0$ or $H$ is limit point at $a$ in the corresponding
analog of Theorem 1.4 yields (1.6) and similarly, $\tau u_2 = E_2 u_2$
and $H$ is limit point at $a$ yields the conclusion in the corresponding
analog of Theorem 1.5).
\endremark
In Section 7, we provide a number of comments, examples, and extensions
including:
\proclaim{Theorem 1.7} Let $E_{1,2} \in\Bbb R$, $E_1 \ne E_2$,
$\tau u_j = E_j u_j$, $j=1,2$, $\tau v_2=E_2 v_2$. Then $|W_0
(u_1,u_2)-W_0(u_1,v_2)|\leq 1$.
\endproclaim
It is easy to see that Theorems 1.5, 1.6, and 1.7 imply Theorems 1.3
and 1.4.
Some facts on quadratic forms are collected in the appendix.
Our interest in this subject originated in attempts to provide a general
construction of isospectral potentials for one-dimensional Schr\"odinger
operators (see [8]) following previous work by Finkel, Isaacson, and
Trubowitz [7] (see also [3]) in the case of periodic potentials. In fact,
in the special case of periodic Schr\"odinger operators $H_p$, the
non-vanishing of $W(u_1, u_2)(x)$ for Floquet solutions $u_1 =
\psi_{\varepsilon_1}(E_1)$, $u_2 = \psi_{\varepsilon_2}(E_2)$, \;
$\varepsilon_{1,2} \in \{+,-\}$ of $H_p$, for $E_1$ and $E_2$ in the
same spectral gap of $H_p$, is proven in [7].
\vskip 0.3in
\flushpar {\bf{\S 2. Oscillation Theory}}
\medpagebreak
For background, we recall the following:
\proclaim{Theorem 2.1 ([22], Theorem 14.10)} Let $H$ be an SL operator
which is bounded from below. If $e_1 <\cdots < e_n < \cdots $ are its
eigenvalues below the essential spectrum and $\psi_1,\dots, \psi_n,
\dots$ its eigenfunctions, then $\psi_n$ has $n-1$ zeros in $(a,b)$.
All eigenvalues of $H$ are simple.
\endproclaim
\remark{Remarks} (i) Those used to thinking of the Dirichlet boundary
condition case need to be warned that it is {\it{not}} in general true
that if $E$ is not an eigenvalue of $H$, then the number of zeros,
$Z$, of $\psi_\pm(E)$ is the number, $N(E)$, of eigenvalues less
than $E$. In general, all one can say is $N=Z$ or $N=Z+1$.
(ii) In the special case where $\tau$ is regular at $a$ and $b$, any
associated SL operator $H$ is well-known to be bounded from below with
compact resolvent (see, e.g., [1], Theorem 8.4.5; [22], Theorem 13.2).
Thus Theorem 2.1 applies to the regular case (to be used in our proof of
Proposition 4.1).
\endremark
The first part of the proposition below is a simple integration by
parts and the second follows from the first.
\proclaim{Proposition 2.2} Let $E_1 \leq E_2$ and $\tau u_j = E_j u_j$,
$j=1,2$. Then for $a0$ on $(c,d)$.
If $u_2$ has no zeros in $(c,d)$, we can suppose $u_2>0$ on $(c,d)$ again
by perhaps flipping signs. At each end point, $W(u_1,u_2)$ vanishes or
else $u_1=0$, $u_2>0$, and $u_1'(c)>0$ (or $u_1'(d)<0$). Thus,
$W(u_1,u_2)(c)\geq 0$, $W(u_1,u_2)(d)\leq 0$. Since the right side of
(2.1) is positive, this is inconsistent with (2.1). \qed
\enddemo
\demo{Proof of Theorem {\rom{2.1}}} We first prove that $\psi_n$ has
at least $n-1$ zeros and then that if $\psi_n$ has $m$ zeros, then
$(-\infty, e_n]$ has at least $(m+1)$ eigenvalues. If $\psi_n$ has $m$
zeros at $x_1, x_2,\dots, x_m$ and we let $x_0=a$, $x_{m+1}=b$, then by
Corollary 2.3, $\psi_{n+1}$ must have at least one zero in each of $(x_0,
x_1), (x_1, x_2),\dots, (x_m, x_{m+1})$, that is, $\psi_{n+1}$ has at
least $m+1$ zeros. It follows by induction that $\psi_n$ has at least
$n-1$ zeros.
On the other hand, if an eigenfunction $\psi_n$ has $m$ zeros, define
for $j=0,\dots, m$, $x_0=a,x_{m+1}=b$,
$$
\eta_j(x) = \cases \psi_n(x), & x_j \leq x \leq x_{j+1} \\
0, & \text{otherwise} \endcases, \quad 0 \leq j \leq m.
$$
Then $\eta_j$ is absolutely continuous with $p\eta'_j$ piecewise
continuous so $\eta_j$ is in the form domain of $H$ (see (A.6)) and
$\langle |H|^{1/2} \eta_j, \text{sgn}(H) |H|^{1/2} \eta_j \rangle =
e_n\,\| \eta_j \|$ (where $\langle \,\cdot\, ,\,\cdot\,\rangle$ and
$\| \cdot \|$ denote the scalar product and norm in $L^2((a,b);r\,dx$).
Thus if $\eta=\sum\limits^m_{j=0} c_j \eta_j$, then $\langle |H|^{1/2}
\eta, \text{sgn}(H)|H|^{1/2} \eta \rangle = e_n\,\| \eta \|$. It follows
by the spectral theorem that there are at least $m+1$ eigenvalues in
$(-\infty, e_n]$. Since $H$ has separated boundary conditions, its point
spectrum is simple. \qed
\enddemo
The second part of the proof of Theorem 2.1 also shows:
\proclaim{Corollary 2.4} Let $H$ be an SL operator bounded from below.
If $\psi_+(E,\,.\,)$ \rom(resp.\linebreak $\psi_-(E,\,.\,)$\rom) has
$m$ zeros, then there are at least $m$ eigenvalues below $E$. In
particular, $E$ below the spectrum of $H$ implies that $\psi_\pm
(E,\,.\,)$ have no zeros.
\endproclaim
\vskip 0.3in
\flushpar{\bf{\S 3. Zeros of the Wronskian}}
\medpagebreak
Here we'll present the two aspects of zeros of the Wronskian which are
critical for the two halves of our proofs (i.e., for showing $N_0\geq
W_0$ and that $N_0\leq W_0$). First, the vanishing of the Wronskian lets
us patch solutions together:
\proclaim{Proposition 3.1} Suppose that $\psi_{+,j}, \psi_-\in
D_{\text{\rom{loc}}}$ and that $\psi_{+,j}$ and $\tau\psi_{+,j}$,
$j=1,2$ are in $L^{2}((c,b))$ and that $\psi_-$ and $\tau\psi_-$ are
in $L^{2}((a,c))$ for all $c\in (a,b)$. Suppose, in addition, that
$\psi_{+,j}$, $j=1,2$ satisfy the boundary condition defining $H$ at
$b$ \rom(i.e., $W(u,\psi_{+,j})(c)\to 0$ as $c\uparrow b$ for all $u\in
D(H)$\rom) and similarly, that $\psi_-$ satisfies the boundary condition
at $a$. Then
{\rom{(i)}} If $W(\psi_{+,1}, \psi_{+,2})(c)=0$ and $(\psi_{+,2}(c),
(p\psi'_{+,2})(c)) \neq (0,0)$, then there exists a $\gamma$ such that
$$
\eta=\chi_{[c,b)}(\psi_{+,1}-\gamma\psi_{+,2})\in D(H)
$$
and
$$
H\eta=\chi_{[c,b)}(\tau\psi_{+,1}-\gamma\tau\psi_{+,2}). \tag 3.1
$$
{\rom{(ii)}} If $W(\psi_{+,1}, \psi_-)(c)=0$ and $(\psi_-(c),
(p\psi_-')(c))\neq (0,0)$, then there is a $\gamma$ such that
$$
\eta=\gamma\chi_{(a,c]}\psi_- + \chi_{(c,b)}\psi_{+,1}\in D(H)
$$
and
$$
H\eta =\gamma\chi_{(a,c]}\tau\psi_- + \chi_{(c,b)}\tau\psi_{+,1}.
\tag 3.2
$$
\endproclaim
\demo{Proof} Clearly, $\eta$ and the right-hand-sides of (3.1)/(3.2) lie
in $L^2((a,b))$ and satisfy the boundary condition at $a$ and $b$, so it
suffices to prove that $\eta$ and $p\eta'$ are locally absolutely
continuous on $(a,b)$.
In case (i), if $\psi_{+,2}(c)\neq 0$, take $\gamma=-\psi_{+,1}(c)/
\psi_{+,2}(c)$ and otherwise (i.e., if $\psi_{+,2}(c) = 0$) take
$\gamma=- (p\psi'_{+,1})(c)/ (p\psi'_{+,2})(c)$. In either case,
$\eta$ and $p\eta'$ are continuous at $c$. Case (ii) is similar. \qed
\enddemo
The second aspect connects zeros of the Wronskian to Pr\"ufer
variables $\rho_u, \theta_u$ (for $u,pu'$ continuous) defined by
$$
u(x)=\rho_u(x)\sin(\theta_u(x)), \qquad (pu')(x)=\rho_u(x)
\cos(\theta_u(x)).
$$
If $(u(x), (pu')(x))$ is never $(0,0)$, then $\rho_u$ can be chosen
positive and $\theta_u$ is uniquely determined once a value of $\theta_u
(x_0)$ is chosen subject to the requirement $\theta_u$ continuous in $x$.
Notice that
$$
W(u_1,u_2)(x)= \rho_{u_1}(x)\rho_{u_2}(x)\sin(\theta_{u_1}(x)
- \theta_{u_2}(x)).
$$
Thus,
\proclaim{Proposition 3.2} Suppose $(u_j,pu_j')$, $j=1,2$ are never
$(0,0)$. Then $W(u_1,u_2)(x_0)$ is zero if and only if $\theta_{u_1}
(x_0)\equiv \theta_{u_2}(x_0)$ $(\mod \pi)$.
\endproclaim
In linking Pr\"ufer variables to rotation numbers, an important role
is played by the observation that because of
$$
u(x) = \int\limits_{x_0}^x \frac{\rho_u(t) \cos(\theta_u(t))}{p(t)}\,
dt,
$$
$\theta_u(x_0)\equiv 0$ $(\mod \pi)$ implies $[\theta_u(x)-
\theta_u(x_0)]\big/ (x-x_0) >0$ for $0 <|x - x_0|$ sufficiently small
and hence for all $0 <|x - x_0|$ if $(u,pu') \neq (0,0)$. (In fact,
suppose
$x_1 \ne x_0$ is the closest $x$ such that $\theta_u(x_1)=\theta_u
(x_0)$ then apply the local result at $x_1$ to obtain a contradiction.)
We summarize:
\proclaim{Proposition 3.3} If $(u,pu') \neq (0,0)$ then
$\theta_u(x_0)\equiv 0$ $(\mod \pi)$ implies
$$
[\theta_u(x)-\theta_u(x_0)]\big/ (x-x_0) >0
$$
for $x \ne x_0$. In particular, if $\theta_u(c)\in [0,\pi)$ and $u$ has
$n$ zeros in $(c,d)$, then $\theta_u(d-\epsilon)\in (n\pi, (n+1)\pi)$
for sufficiently small $\epsilon > 0$.
\endproclaim
In exactly the same way, we have
\proclaim{Proposition 3.4} Let $E_10$ for $0<|x -x_0|$.
\endproclaim
\demo{Proof} If $\Delta(x_0)\equiv 0$ $(\mod 2\pi)$ and $\theta_{u_2}
(x_0)\not\equiv 0$ $(\mod \pi)$, then $\sin(\theta_{u_2}(x_0))
\sin(\theta_{u_1}(x_0))>0$ so $u_1(x_0)u_2(x_0) >0$ for $0<|x-x_0|$
sufficiently small, and thus by (2.2), $\frac{dW}{dx}(x_0)>0$ for
a.e.~$x$ near $x_0$ and so $\Delta(x)$ is increasing. The same holds
for $\Delta(x_0)\equiv\pi$ $(\mod 2\pi)$ and $\theta_{u_2}(x_0)
\not\equiv 0$ $(\mod \pi)$.
If $\Delta(x_0)\equiv 0$ $(\mod 2\pi)$ and $\theta_{u_1}(x_0)\equiv
\theta_{u_2}(x_0)\equiv 0$ $(\mod \pi)$, then $(pu_1')(x_0) (pu_2')
(x_0)>0$ and so since $u(x_0)=v(x_0)=0$, we see that it is still
true that $\frac{dW}{dx}(x)>0$ a.e.~for $0<|x-x_0|$ sufficiently
small. \qed
\enddemo
\remark{Remarks} (i) Suppose $r,p$ are continuous on $(a,b)$.
If $\theta_{u_1}(x_0)\equiv 0$ $(\mod \pi)$ then $\theta_{u_1}(x) -
\theta_{u_1}(x_0) = c_0(x-x_0) + o(x-x_0)$ with $c_0>0$. If $\Delta
(x_0)\equiv 0$ $(\mod \pi)$ and $\theta_{u_1}(x_0)\not\equiv 0$
$(\mod \pi)$, then $\Delta(x)-\Delta (x_0)=c_1(x-x_0)+o(x-x_0)$ with
$c_{1}>0$. If $\theta_{u_1} (x_0)\equiv 0\equiv\Delta(x_0)$ $(\mod \pi)$,
then $\Delta(x) - \Delta(x_0)=c_2(x-x_0)^{3}+o(x-x_0)^{3})$ with $c_2>0$.
Either way, $\Delta$ increases through $x_0$. (In fact, $c_0=p(x_0)^{-1}$,
$c_{1}= (E_2-E_1)r(x_0)\sin^2 (\theta_{u_1}(x_0))$ and $c_{2}=\frac{1}{3}
r(x_0)p(x_0)^{-2}(E_2-E_1))$.
(ii) In other words, Propositions 3.3 and 3.4 say that the integer parts
of $\theta_u/ \pi$ and $\Delta_{u,v}/ \pi$ are increasing with respect
to $x\in(a,b)$ (even though $\theta_u$ and $\Delta_{u,v}$ themselves
are not necessarily monotone in $x$).
(iii) Let $E \in [E_1,E_2]$ and assume $[E_1,E_2]$ to be outside the
essential spectrum of $H$. Then, for $x \in (a,b)$ fixed,
$$
\frac{d\theta_{\psi_\pm}}{dE}\,(E,x) =
-\frac{\int\limits^{\ell_\pm}_x \psi_\pm(E,t)^2 \,dt}
{\rho_{\psi_\pm}(E,x)} \tag 3.3
$$
proves that $\mp\theta_{\psi_\pm}(E,x)$ is strictly increasing with
respect to $E$. In fact, from Proposition 2.3 one infers
$$
W(\psi_\pm(E), \psi_\pm(\tilde{E}))(x) = (\tilde{E}-E)
\int\limits^{\ell_\pm}_x \psi_\pm(E,t) \psi_\pm(\tilde{E},t)\, dt
$$
and using this to evaluate the limit $\lim\limits_{\tilde{E} \to E}
W(\psi_\pm(E), (\psi_\pm(E) - \psi_\pm(\tilde{E}))/(E-\tilde{E}))(x)$,
one obtains
$$
W(\psi_\pm(E), \frac{d\psi_\pm}{dE}\,(E))(x) =
\int\limits^{\ell_\pm}_x \psi_\pm(E,t)^2 \, dt.
$$
Inserting Pr\"ufer variables completes the proof of (3.3).
\endremark
\vskip 0.3in
\flushpar{\bf{\S 4. The Hare and the Tortoise ($\boldkey N_{\bold 0}
\boldsymbol{\leq}\boldkey W_{\bold 0}$ in the Regular Case)}}
\medpagebreak
Our goal in this section is to prove Theorem 1.5 in the regular case
with opposite boundary conditions, that is,
\proclaim{Proposition 4.1} Let $H$ be a regular SL operator and suppose
$E_10$. If there are $m$
eigenvalues below $E_1$ and $n_0+m$ below $E_2$, then, by standard
oscillation theory (essentially Proposition 3.3), $\theta_{\psi_-}(E_1,b)
\in (m\pi,(m+1)\pi)$ and $\theta_{\psi_+}(E_2,b)=(n_0+m+1)\pi$. Let
$\Gamma_{\pm}(E,x)\equiv \theta_{\psi_\pm}(E,x)$ $(\mod \pi)$, that is,
$\Gamma_{\pm}(E,x)\in [0,\pi)$ and $\Gamma_{\pm}-\theta_{\psi_\pm}\in
\Bbb Z\pi$.
Borrow a leaf from Aesop. Think of $\Gamma_-(E_1)$ as a tortoise and
$\Gamma_+(E_2)$ as a hare racing on a track of size $\pi$ with $0$
as the start and $\pi$ as the finish. Every time either runs through the
finish, it starts all over. Neither has to run only in the forward
direction (i.e., $\theta_{\psi_\pm}$ may not be monotone w.r.t.~$x$)
but they can't run in the wrong direction back through the start (i.e.,
Proposition 3.3 holds).
What makes $\Gamma_+(E_2)$ the hare to $\Gamma_-(E_1)$'s tortoise is
that $\Gamma_+(E_2)$ can only overtake $\Gamma_-(E_1)$, not the other
way around (i.e., Proposition 3.4 holds). Since $\Gamma_-(E_1,a)=0$ and
$\Gamma_+(E_2,a)>0$, the hare starts out ahead of the tortoise. Since
$\Gamma_-(E_1,c)<\pi$ but $\Gamma_+(E_1,c)\nearrow\pi$ as $c\nearrow b$,
the hare also ends up ahead (unlike in Aesop!).
Clearly, the number of times the hare crosses the finish line is the
sum of the number of times the tortoise does, plus the number of times
the hare ``laps,'' that is, passes the tortoise. Thus,
$$
n_0+m=m+W_0(\psi_-(E_1), \psi_+(E_2))
$$
so $W_0(\psi_-(E_1), \psi_+(E_2))=n_0$ in the Dirichlet case.
This picture also explains why it can happen that
$$
W_0(\psi_-(E_1), \psi_-(E_2))=n_0-1.
$$
For in this case, $\theta_{\psi_-}(E_1,a)=\theta_{\psi_-}(E_2,a)=0$.
The hare and tortoise start out together, so for $x=a+\epsilon$,
the hare is slightly ahead. If at $b$, $\Gamma_-(E_1,b)>\Gamma_-(E_2,b)$,
then the tortoise \`a la Aesop wins the races; thus the hare has lapped
the tortoise one time too few, that is,
$$
n_0+m-1=m+W_0(\psi_-(E_1), \psi_-(E_2))
$$
and so
$$
W_0=n_0-1. \tag 4.2
$$
Suppose $E_1E_4>E$ and $u$ is any solution of $\tau u=E u$,
then \rom{(4.3)} holds.
\endproclaim
\demo{Proof} In the first case, think of $u$ as defining a hare and
$\psi_-(E_j)$, $j=3,4$ as defining tortoises. The $E_3$ and $E_4$
tortoises start out at the same place and the $E_3$ tortoise runs
``faster'' in that it is always ahead after the start. Clearly, the
hare will pass the slower tortoise at least as often as the faster one.
In the second case, there are two hares (defined by $\psi_-(E_j)$,
$j=3,4$), which start out at the same place, and one tortoise (defined
by $u$) and it is clear the faster hare (given by $\psi_-(E_3)$) has
to pass the tortoise at least as often as the slower one. \qed
\enddemo
\proclaim{Lemma 4.5} Lemma \rom{4.4} remains true if every $\psi_-$ is
replaced by a $\psi_+$.
\endproclaim
\demo{Proof} Reflect at some point $c \in (a,b)$ implying an
interchange of $\psi_+$ and $\psi_-$. \qed
\enddemo
\demo{Proof of Proposition {\rom{4.1}}} If $N_0=0$, there is nothing
to prove. If $N_0\geq 1$, let $\text{spec}(H)\cap(E_1, E_2)=
\{e_{m}\}_{m\in M}$ and let $e_s \leq e_\ell$ be the smallest and
largest of the $e_m$'s. Thus, $N_0$ is the number of eigenvalues in
$[e_s, e_\ell]$ and so
$$
N_0=W_0(\psi_-(e_s-\epsilon), \psi_+(e_\ell+\epsilon))
$$
by Lemma 4.3. By Lemma 4.4,
$$
W_0(\psi_-(e_s-\epsilon), \psi_+(e_\ell+\epsilon))\leq
W_0(\psi_-(E_1), \psi_+(e_\ell+\epsilon))
$$
and then by Lemma 4.5, this is no larger than $W_0(\psi_-(E_1),
\psi_+(E_2))$. \qed
\enddemo
\newpage
%\vskip 0.3in
\flushpar{\bf{\S 5. Strong Limits ($\boldkey N_{\bold 0}
\boldsymbol{\leq}\boldkey W_{\bold 0}$ in the General Case)}}
\medpagebreak
Using the approach of Weidmann ([22], Chapter 14) to control some limits,
we prove Theorem 1.5 in this section. Fix functions $u_1,u_2 \in
D_{\text{\rom{loc}}}$. Pick $c_{n}\downarrow a$, $d_{n}\uparrow b$.
Define $\tilde{H}_n$ on $L^{2}((c_{n}, d_{n}); r\,dx)$ by imposing
the following boundary conditions on $\eta\in D(\tilde{H}_{n})$
$$
W(u_1,\eta)(c_{n})=0=W(u_2,\eta)(d_{n}). \tag 5.1
$$
On $L^{2}((a,b);r\,dx)= L^{2}((a,c_{n});r\,dx)\oplus L^{2}
((c_{n}, d_{n});r\,dx)\oplus L^{2}((d_{n}, b);r\,dx)$ take $H_{n}
=\alpha\Bbb I\oplus\tilde{H}_{n}\oplus\alpha\Bbb I$ with $\alpha$ a
fixed real constant. Then Weidmann proves:
\proclaim{Lemma 5.1} Suppose that either $H$ is limit point at
$a$ or that $u_1$ is an $\psi_-(E,x)$ for some $E$ and similarly, that
either $H$ is limit point at $b$ or $u_2$ is an $\psi_+(E',x)$ for
some $E'$. Then $H_n$ converges to $H$ in strong resolvent sense as
$n\to\infty$.
\endproclaim
The idea of Weidmann's proof is that it suffices to find a core $D_0$ of
$H$ such that for every $\eta \in D_0$ there exists an $n_0 \in \Bbb N$
with $\eta\in D_0$ for $n \geq n_0$ and $H_{n}\eta\to H\eta$ as $n$
tends to infinity (see [21], Theorem 9.16(i)). If $H$ is limit point at
both ends, take $\eta\in D_0\equiv\{u\in D_{\text{\rom{loc}}}\mid
\text{supp}(u)\text{ compact in }(a,b)\}$. Otherwise, pick
$\tilde{u}_1,\tilde{u}_2\in D(H)$ with $\tilde{u}_2 =u_2$ near $b$ and
$\tilde{u}_2=0$ near $a$ and with $\tilde{u}_1=u_1$ near $a$ and
$\tilde{u}_1=0$ near $b$. Then pick $\eta\in D_0+\text{span}[\tilde{u}_1,
\tilde{u}_2]$ which one can show is a core for $H$ ([22], Chapter 14).
Secondly we note:
\proclaim{Lemma 5.2} Let $A_{n}\to A$ in strong resolvent sense as $n \to
\infty$. Then
$$
\dim\text{\rom{Ran}}\,P_{(E_1,E_2)}(A)\leq
\varliminf\limits_{n \to \infty} \dim\text{\rom{Ran}}\,
P_{(E_1,E_2)}(A_{n}). \tag 5.2
$$
\endproclaim
\demo{Proof} Fix $m\leq\dim\text{\rom{Ran}}\,P_{(E_1,E_2)}(A)$ with
$m<\infty$. We'll prove $m\leq$ {\eightpoint{RHS}} of (5.2). Suppose
first that $(E_1,E_2)$ aren't eigenvalues of $A$. Then by Theorem
VIII.24 of [16], $P_{(E_1,E_2)}(A_{n})\to P_{(E_1,E_2)}(A)$ strongly
as $n \to\infty$. Picking orthonormal $\varphi_{1},\dots,\varphi_{m}$
in $\text{\rom{Ran}}\,P_{(E_1,E_2)}(A)$, we see that
$$
\varliminf\limits_{n \to \infty} \text{Tr}(P_{(E_1,E_2)}(A_{n}))
\geq \varliminf\limits_{n \to \infty} \sum_j \langle\varphi_j,
P_{(E_1,E_2)}(A_{n})\varphi_j\rangle=m
$$
as required.
If $E_{1,2}$ are arbitrary, we can always find a $\delta > 0$ such that
$E_1+\delta, E_2-\delta$ are not eigenvalues of $A$ and such that
$\dim\text{\rom{Ran}}\, P_{(E_1+\delta, E_2-\delta)}(A)\geq m$. Thus,
$$
\varliminf\limits_{n \to \infty} \dim\text{\rom{Ran}}\,
P_{(E_1,E_2)}(A_{n})\geq \varliminf\limits_{n \to \infty}
\dim\text{\rom{Ran}}\, P_{(E_1+\delta, E_2-\delta)}(A_{n})\geq m.
\qed
$$
\enddemo
\demo{Proof of Theorem {\rom{1.5}}} Let $c_{n}\downarrow a$, $d_{n}
\uparrow b$ and $H_n$ be as in Lemma 5.1 with $\alpha\notin [E_1, E_2]$.
Proposition 4.1 implies $W_0(u_1,u_2)\geq\dim\text{\rom{Ran}}\,
P_{(E_1, E_2)}(\tilde H_{n})= \dim\text{\rom{Ran}}\,P_{(E_1, E_2)}
(H_{n})$ since $\alpha\notin [E_1,E_2]$. Thus by Lemmas 5.1 and 5.2,
$$
W_0(u_1,u_2)\geq\dim\text{\rom{Ran}}\,P_{(E_1, E_2)}(H)
$$
as was to be proven. \qed
\enddemo
\newpage
%\vskip 0.3in
\flushpar{\bf{\S 6. A Variational Argument ($\boldkey N_{\bold 0}
\boldsymbol{\geq}\boldkey W_{\bold 0}$)}}
\medpagebreak
In this section, we'll prove Theorem 1.6. Let $E_1x_j \endcases, \quad 1 \le j \le m.
$$
If $E_1$ is an eigenvalue of $H$, we define in addition $\eta_0 = u_2
= -\tilde{\eta}_0$, $x_0=a$ and if $E_2$ is an eigenvalue of $H$,
$\eta_{m+1} = u_1 = \tilde{\eta}_{m+1}$, $x_{m+1}=b$.
\proclaim{Lemma 6.1} $\langle\eta_j, \eta_{k}\rangle =\langle
\tilde{\eta}_j, \tilde{\eta}_{k}\rangle$ for all $j,k$ where
$\langle \,\cdot\, ,\,\cdot\,\rangle$ is the $L^{2}((a,b);r\,dx)$
inner product.
\endproclaim
\demo{Proof} Let $jx_j \endcases, \quad 1 \le j \le m.
$$
If $E_1$ is an eigenvalue of $H$, we define in addition $\eta_0 = u_2
= -\tilde{\eta}_0$, $x_0=b$ and if $E_2$ is an eigenvalue of $H$,
$\eta_{m+1} = u_1 = \tilde{\eta}_{m+1}$, $x_{m+1}=b$. Again, $\eta_j$'s
are linearly independent by considering their supports. To prove the
analog of Lemma 6.1, we need
$$
\int\limits^{x_j}_{a}u_1(x)u_2(x)r(x)\, dx =0.
$$
But by (2.1), this integral is
$$
\lim\limits_{c\downarrow a}\, (E_1-E_2)^{-1}
[W(u_1,u_2)(x_j)-W(u_1,u_2)(c)].
$$
By hypothesis, $W(u_1,u_2)(x_j)=0$ and since $u_1$ and $u_2$ satisfy the
boundary condition at $a$, $W(u_1,u_2)(c)\to 0$ as $c \downarrow a$ by
Proposition 1.1. The cases $u_1=\psi_+(E_1)$, $u_2=\psi_\pm(E_2)$ can be
obtained by reflection.
\vskip 0.3in
\flushpar{\bf{\S 7. Extensions, Comments, and Examples}}
\medpagebreak
The following includes Theorem 1.7:
\proclaim{Theorem 7.1} Let $E_1\neq E_2$. Let $\tau u_j=E_j u_j$,
$j=1,2$, $\tau v_2=E_2 v_2$ with $u_2$ linearly independent of $v_2$.
Then the zeros of $W(u_1,u_2)$ interlace the zeros of $W(u_1,v_2)$ and
vice versa \rom(in the sense that there is exactly one zero of one
function in between two zeros of the other\rom). In particular,
$|W_0(u_1,u_2)-W_0(u_1,v_2)| \leq 1$.
\endproclaim
\demo{Proof} We'll suppose $E_1\Gamma_{u_1}(E_1,b-\epsilon)$), the number of zeros
of $u_2$ equals the number of laps plus the number of zeros of $u_1$
plus one. \qed
\enddemo
Next we want to see how Hartman's theorem (Theorem 1.2) follows from
Theorem 1.4. We start by assuming $\tau$ to be oscillatory at $E_2$.
By Theorem 1.4, $W_0(u_1,u_2)=N_0$ since $H$ in Theorem 1.2 is assumed
to be limit point at $b$, so we need only show that $W_0(u_1,u_2) =
\varliminf\limits_{c\uparrow b}\, N(c)$ in order to prove (1.5a).
Suppose first that $W_0(u_1,u_2)=m<\infty$. Let $x_m$ be the last zero
of $W(u_1,u_2)(x)$ (set $x_m=a$ if $m=0$). Measure Pr\"ufer angles
starting at the same $\theta (a)\in [0,\pi)$. At $x_m$,
$$
\theta_{u_2}(x_{m})=\theta_{u_1}(x_{m})+m\pi \tag 7.1
$$
and then
$$
\Gamma_{u_2}(x_{m}+\epsilon)>\Gamma_{u_1}(x_{m}+\epsilon). \tag 7.2
$$
Let $N_{u_j}(x)$ be the number of zeros of $u_j$, $j=1,2$ in $(a,x)$.
By (7.1) and Proposition 3.3,
$$
N_{u_2}(x_{m})=N_{u_1}(x_{m})+m.
$$
As $x$ increases, (7.2) says that the next zero is of $u_2$ and then
since $W$ has no zeros, zeros of $u_1$ and $u_2$ must alternate. So
for $c>x_{m}$, $N(c)\equiv N_{u_2}(c)-N_{u_1}(c)$ alternates between
$m$ and $m+1$ and since $\tau$ is assumed to be oscillatory at $E_2$,
we immediately get $\varliminf\limits_{c\uparrow b}N(c)=m$.
If $W_0(u_1,u_2)=\infty$, let $x_m$ be the $m$th zero. Then (7.1) and
(7.2) still hold so $N(x_{m})=m$. Since $u_2$ has zeros between any
pair of zeros of $u_1$, $N(x)\geq m$ for any $x\geq x_m$, so
$\varliminf\limits_{c\uparrow b}N(x)=\infty$, as required.
If $\tau$ is non-oscillatory at $E_2$, we first assume that $E_{1,2}$
are not eigenvalues. We need to show that the hare ends up further
along than the tortoise. Without loss we assume $u_{1,2}(x)>0$ for $x$
near $b$ and claim in addition that $u_1 u_2$ is not $L_1$ near $b$.
If $u_1 u_1^2$ or $u_1 u_2 > u_2^2$ for $x$ near $b$ and $u_j
\not\in L^2$ near $b$. In fact, by hypothesis, $u_j \in L^2$ near $a$
and since $E_j$ are not eigenvalues and $\tau$ is limit point at $b$,
$u_j$ cannot be $L^2$ near $b$. Otherwise we can find two points $x_0$
and $x_1$ close to $b$ such that $W(u_1,u_2)(x_0) \ge 0$ and
$W(u_1,u_2)(x_1) \le 0$, contradicting (2.1). But $u_1 u_2$ not $L_1$
near $b$ together with (2.1) implies that $u_2'/u_2 > u_1'/u_1$ for
$x$ near $b$ which, by monotonicity of $\cot(\,.\,)$, yields that
the hare ends up ahead.
It remains to treat the case where $E_{1,2}$ could be eigenvalues.
Choose $E' < E''$ with $u(E')$ (resp.~$u(E'')$) equal to $\psi_{-}
(E')$ (resp.~$\psi_{-}(E'')$) the corresponding wave functions. Next,
choosing $E'$ below the spectrum of $H$ (implying that $u(E')$ has no
zeros by Corollary 2.4) shows that the number of zeros of $u(E'')$
equals the number of eigenvalues below $E''$ (compare Corollary 2.4),
that is, equals $\dim\text{\rom{Ran}}\, P_{(-\infty, E'')}$ if $E''$
is not an eigenvalue. Theorem 2.1 then covers the case where $E''$ is
an eigenvalue. Applying this to $E''=E_1$ and $E''=E_2$ proves (1.5b)
since clearly
$$
\dim\text{\rom{Ran}}\, P_{(-\infty, E'')} -
\dim\text{\rom{Ran}}\, P_{(-\infty,E')} =
\dim\text{\rom{Ran}}\, P_{[E', E'')}.
$$
\vskip 0.1in
Finally, we want to consider the relation to the density of states.
Given an SL operator $H$, let $H^{(L)}_D$ be the operator on $[-L,L]$
with Dirichlet boundary conditions. If the limit exists, we define
the integrated density of states (ids), $k(E)$, by the limit:
$$
k(E)=\lim\limits_{L\to\infty}\,(2L)^{-1}\dim\text{\rom{Ran}}\,
P_{(-\infty, E)} (H^{(L)}_D).
$$
\proclaim{Theorem 7.5} Suppose $H$ is such that the ids exists for all
$E$. Let $E_10$ a.e.~on }
(a,b). \tag A.1
$$
Next, define in $L^2((a,b);rdx)$ the following linear operators
$$
\gather
(H^0_{\alpha,\beta} u)(x) = -r(x)^{-1}(p(x)u'(x))', \\
\matrix \format\l&\l\\ D(H^0_{\alpha,\beta}) =
\{u \in L^2((a,b);r\,dx) \mid\, &
u,pu' \in AC([a,b]), r^{-1}(pu')' \in L^2((a,b);r\,dx), \\
& (pu')(a) + \alpha u(a) = (pu')(b) + \beta u(b) =0\}, \endmatrix \\
\qquad\qquad\qquad
\qquad\qquad\qquad \qquad\qquad\qquad \alpha,\beta \in
{\Bbb R} \cup \{ \infty \}
\endgather
$$
(here $\alpha=\infty$ denotes a Dirichlet boundary condition $u(a)
=0$ and similarly at $b$),
$$
\gather
S_{\alpha,\beta} u = s\, u, \quad (s\, u)(x) =
(p(x)/r(x))^{1/2}u'(x), \quad \alpha,\beta \in \{0,\infty\}, \\
\matrix \format\l&\l\\ D(S_{\alpha,\beta}) =
\{u \in L^2((a,b);r\,dx) \mid\, &
u \in AC([a,b]), s\, u \in L^2((a,b);r\,dx), \\ & u(a)
= 0 \text{ if }
\alpha =\infty,\, u(b) = 0 \text{ if } \beta =\infty \},
\endmatrix\\
S^+_{\alpha,\beta} u = s^+ u, \quad (s^+ u)(x) =
-r(x)^{-1}[(p(x)r(x))^{1/2}u(x)]',
\quad \alpha,\beta \in \{0,\infty\},\\
\matrix \format\l&\l\\ D(S^+_{\alpha,\beta}) =
\{u \in L^2((a,b);r\,dx) \mid\, &
(pr)^{1/2}u \in AC([a,b]),s^+ u \in L^2((a,b);r\,dx), \\ &
((pr)^{1/2}u)(a) = 0 \text{ if } \alpha = 0,
((pr)^{1/2}u)(b) = 0
\text{ if } \beta =0 \}, \endmatrix
\endgather
$$
and the form
$$
R^0_{\alpha,\beta}(u,v) = \langle S_{\alpha,\beta}u,
S_{\alpha,\beta}v \rangle,
\quad {\Cal D}(R^0_{\alpha,\beta}) = D(S_{\alpha,\beta}),
\quad \alpha,\beta \in
\{0,\infty\}
$$
($\langle\,.\, ,\,.\,\rangle$ the scalar product in $L^2((a,b);r\,dx)$).
\proclaim{Lemma A.1} {\rom{(i)}} $S_{\alpha,\beta} =
(S_{\alpha,\beta}^+)^*$ and $S_{\alpha,\beta}^+ = S^*_{\alpha,\beta}$
for all $\alpha,\beta \in \{0, \infty\}$.
{\rom{(ii)}} $H^0_{\alpha,\beta} = S^*_{\alpha,\beta}
S_{\alpha,\beta}$, $\alpha,\beta \in \{0, \infty\}$.
\endproclaim
\demo{Proof} Define
$$
\matrix \format\l&\l\\ K: & L^2((a,b);r\,dx) \to
D(S_{\infty,0})\\ & g \mapsto \int\limits_a^x
\frac{g(y)r(y)\,dy}{(p(y)r(y))^{1/2}}
\endmatrix, \quad
\matrix \format\l&\l\\ K^+: & L^2((a,b);r\,dx) \to
D(S^+_{0,\infty})\\ & g \mapsto (pr)(x)^{-1/2}
\int\limits_a^x g(y)r(y)\,dy
\endmatrix \, .
$$
A straightforward calculation verifies $(K\,g)(a)=0$,
$sK\,g = g$ and $((pr)^{1/2}K^+\,g)(a)=0$, $s^+ K^+\,g = g$.
We only show $S_{\alpha,\beta}^* = S^+_{\alpha,\beta}$, the case
$(S^+_{\alpha,\beta})^* = S_{\alpha,\beta}$ being analogous. Moreover,
since $S_{\infty,\infty} \subseteq S_{\alpha,\beta}$ implies
$S_{\alpha,\beta}^* \subseteq S_{\infty,\infty}^*$ we only concentrate
on proving $S_{\infty,\infty}^* = S_{\infty,\infty}^+$, the rest
following from an additional integration by parts.
An integration by parts proves $S^+_{\infty,\infty}\subseteq
S^*_{\infty,\infty}$. Conversely, let $f \in D(S_{\infty,\infty}^*)$
and set $g = K^+ S_{\infty,\infty}^* f$. Then
$$
\int\limits_a^b (\bar{f}-\bar{g}) (S_{\infty,\infty} h) r\, dx
= \int\limits_a^b (S_{\infty,\infty}^* \bar{f}-s^+ \bar{g}) hr \,
dx =0
$$
for all $h\in D(S_{\infty,\infty})$. Thus, $\text{\rom{Ran}}
(S_{\infty,\infty})$ is a subset of the kernel of the linear functional
$k\mapsto \langle f-g,k \rangle$. But $\text{\rom{Ran}} (S_{\infty,
\infty})= \{(pr)^{-1/2}\}^\perp$ (since $g\in \text{\rom{Ran}}
(S_{\infty,\infty})$ is equivalent to $(Kg)(b)=0$) and hence $f=g+c
(pr)^{-1/2} \in D(S^+_{\infty,\infty})$ for some constant $c$ proving
$S_{\infty,\infty}^* \subseteq S^+_{\infty,\infty}$ and hence (i).
By inspection, we obtain $D(S^+_{\alpha,\beta} S_{\alpha,\beta}) =
\{u\in S_{\alpha,\beta}\mid S_{\alpha,\beta}u \in D(S^+_{\alpha,
\beta})\} = D(H^0_{\alpha,\beta})$ since $pu'\in AC([a,b])$ implies
$(p/r)^{1/2} u'= (pr)^{-1/2}(pu')\in L^2((a,b);r\,dx)$ and
$S^+_{\alpha,\beta} S_{\alpha,\beta}u = H^0_{\alpha,\beta}u.$ This
fact together with (i) proves (ii). \qed
\enddemo
Furthermore, we introduce the forms
$$
\gather
Q_{q/r}(u,v) = \int\limits_a^b q(x)r(x)^{-1}\, \overline{u(x)}\,
v(x) r(x)\, dx, \\
\Cal D (Q_{q/r}) = \{u\in L^2((a,b);r\,dx)\mid (q/r)^{1/2} u \in
L^2((a,b);r\,dx)\},
\endgather
$$
and
$$
\gather
q_{\alpha,\beta}(u,v)= \tilde{\beta}\, \overline{u(b)}\, v(b)
- \tilde{\alpha}\, \overline{u(a)}\, v(a), \quad \Cal D
(q_{\alpha,\beta}) = AC([a,b]),\\
\tilde{\alpha} = \cases \alpha, & \alpha\in\Bbb R \\
0, & \alpha=\infty \endcases,
\quad \tilde{\beta} = \cases \beta, & \beta\in\Bbb R \\
0, & \beta=\infty \endcases,
\quad \alpha,\beta \in \Bbb R \cup \{\infty\}.
\endgather
$$
Finally, we set
$$
\gather
Q^0_{\alpha,\beta} = R^0_{\widehat{\alpha},\widehat{\beta}}
+ q_{\alpha,\beta}, \quad \Cal D (Q^0_{\alpha,\beta}) =
D(S_{\widehat{\alpha},\widehat{\beta}}), \\
\widehat{\alpha} = \cases 0, & \alpha\in\Bbb R \\
\infty, & \alpha=\infty \endcases,
\quad \widehat{\beta} = \cases 0, & \beta\in\Bbb R \\
\infty, & \beta=\infty \endcases,
\quad \alpha,\beta \in\Bbb R \cup \{\infty\}.
\endgather
$$
and
$$
Q_{\alpha,\beta} = Q^0_{\alpha,\beta} + Q_{q/r}, \quad
\Cal D (Q_{\alpha, \beta}) = D(S_{\widehat{\alpha}, \widehat{\beta}}),
\quad \alpha,\beta \in {\Bbb R} \cup \{\infty\}. \tag A.2
$$
\proclaim{Lemma A.2} {\rom{(i)}} $q_{\alpha,\beta}$ is infinitesimally
form bounded with respect to $Q^0_{0,0}$.
{\rom{(ii)}} $Q_{q/r}$ is relatively form compact with respect to
$Q^0_{\alpha,\beta}$, $\alpha,\beta \in\Bbb R \cup \{\infty\}$.
\endproclaim
\demo{Proof} (i) Since for arbitrary $c \in [a,b]$ and $u \in
D(S_{0,0})$,
$$
|u(c)|^2 = \biggl| u(x)^2 - 2 \int\limits_c^x u(t) u'(t)\,dt\biggr|
\leq |u(x)|^2 + \int\limits_a^b |u(t) u'(t)|\,dt,
$$
one infers (after integrating from $a$ to $b$) for any $\epsilon>0$,
$$
\align
\| u \|_\infty^2 &\leq (b-a)^{-1}\| u \|^2_2 + 2 \int\limits_a^b
\frac{|u(t)|}{(\epsilon p(t)/2)^{1/2}}\, (\epsilon p(t)/2)^{1/2}
|u'(t)|\,dt \\
& \leq (b-a)^{-1}\| u \|^2_2 + \int\limits_a^b \biggl(\frac{2}
{\epsilon}\, \frac{|u(t)|^2}{p(t)} + \frac{\epsilon}{2}\, p(t)
|u'(t)|^2 \biggr)\, dt. \tag A.3
\endalign
$$
Since $0**