%~Mouliné par MaN_auto v.0.23.0 2020-06-25 09:58:54
\documentclass[AHL,Unicode,longabstracts,published]{cedram}

\usepackage{mathtools}
%%\usepackage{Fourier}

\AtBeginDocument{
\theoremstyle{plain}
\newtheorem{hypo}[cdrthm]{Assumption} }






\newcommand*\e{\varepsilon}
\newcommand*\R{{\mathbb R}}
\newcommand*\Cbb{{\mathbb C}}%remplacer \C->\Cbb
\newcommand*\N{{\mathbb N}}
\newcommand*\Z{{\mathbb Z}}
\newcommand{\dptl}{\partial}%remplacer \d->\dptl
\newcommand{\alf}{\alpha}%remplacer \a->\alf
\newcommand*\EE{{\mathbb E}}
\newcommand*\g{\gamma}
\newcommand{\ubf}{{\mathbf u}}
\newcommand{\vbf}{{\mathbf v}}
\newcommand*\w{{\mathbf w}}
\newcommand*\f{{\mathbf f}}
\newcommand*\h{{\mathbf h}}
\newcommand{\Hbf}{{\mathbf H}}
\newcommand*\A{{\mathbf A}}
\newcommand*\F{{\mathbf F}}
\newcommand*\dsp{\displaystyle}
\newcommand*\und{\underline}
\newcommand{\rmd}{\mathrm{d}}

%%\newcommand*\Id{{\rm Id}}
\DeclareMathOperator{\Id}{Id}
\DeclareMathOperator{\Imm}{Im}
\DeclareMathOperator{\Sp}{Sp}
\DeclareMathOperator{\Ret}{Re}

\DeclarePairedDelimiter{\VERT}{|\mkern -1.6mu|\mkern -1.6mu|}{|\mkern -1.6mu|\mkern -1.6mu|}

%%\newcommand{\VERTT}{\mathrm{|\mkern -1.6mu|\mkern -1.6mu|}}
%%MATHOP ?
%\newcommand*\O{\Omega}

%\newcommand*\o{\omega}
%\newcommand*\t{\tau}
%\newcommand*\b{\beta}
%\newcommand*\s{\sigma}
%\newcommand*\RR{{\mathbf R}}

%% A desactiver !
%\newcommand*\be{\begin{equation}}
%\newcommand*\ee{\end{equation}}

%\newcommand*\op{{\rm op}}




%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\graphicspath{{./figures/}}

\newcommand*{\mk}{\mkern -1mu}
\newcommand*{\Mk}{\mkern -2mu}
\newcommand*{\mK}{\mkern 1mu}
\newcommand*{\MK}{\mkern 2mu}

\hypersetup{urlcolor=purple, linkcolor=blue, citecolor=red}

\newcommand*{\romanenumi}{\renewcommand*{\theenumi}{\roman{enumi}}}
\newcommand*{\Romanenumi}{\renewcommand*{\theenumi}{\Roman{enumi}}}
\newcommand*{\alphenumi}{\renewcommand*{\theenumi}{\alph{enumi}}}
\newcommand*{\Alphenumi}{\renewcommand*{\theenumi}{\Alph{enumi}}}
\let\oldtilde\tilde
\renewcommand*{\tilde}[1]{\mathchoice{\widetilde{#1}}{\widetilde{#1}}{\oldtilde{#1}}{\oldtilde{#1}}}\let\oldhat\hat
\renewcommand*{\hat}[1]{\mathchoice{\widehat{#1}}{\widehat{#1}}{\oldhat{#1}}{\oldhat{#1}}}\let\oldforall\forall
\renewcommand*{\forall}{\mathrel{\oldforall}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\title[On hyperbolicity and Gevrey well-posedness]{On hyperbolicity and Gevrey well-posedness. Part one: the elliptic case.}
\alttitle{Hyperbolicité et caractère bien-posé en Gevrey. Partie une~: le cas elliptique.}

\subjclass{35L40}
\keywords{Gevrey regularity, hyperbolic systems, ill-posedness}



\author{\firstname{Baptiste} \lastname{Morisse}}
\address{School of Mathematics,\\
Cardiff University,\\
Senghennydd Road,\\
CF24 4AG Cardiff, (UK)}
\email{morisseb@cardiff.ac.uk}

\thanks{The author thanks his advisor Benjamin Texier for all the remarks on this work.}

\editor{P. Gérard}

\begin{abstract}
In this paper we prove that the Cauchy problem for first-order quasi-linear systems of partial differential equations is ill-posed in Gevrey spaces, under the assumption of an initial ellipticity. The assumption bears on the principal symbol of the first-order operator. Ill-posedness means instability in the sense of Hadamard, specifically an instantaneous defect of H\"older continuity of the flow from $G^{\sigma}$ to $L^2$, where $\sigma\in(0,1)$ depends on the initial spectrum. Building on the analysis carried out by G. Métivier [\emph{Remarks on the well-posedness of the nonlinear Cauchy problem}, Contemp. Math.~2005], we show that ill-posedness follows from a long-time Cauchy--Kovalevskaya construction of a family of exact, highly oscillating, analytical solutions which are initially close to the null solution, and which grow exponentially fast in time. A specific difficulty resides in the observation time of instability. While in Sobolev spaces, this time is logarithmic in the frequency, in Gevrey spaces it is a power of the frequency. In particular, in Gevrey spaces the instability is recorded much later than in Sobolev spaces.
\end{abstract}

\begin{altabstract}
Dans cet article nous prouvons que le problème de Cauchy pour des systèmes d'équations aux dérivées partielles non-linéaires du premier ordre sont mal posées dans les espaces de Gevrey, sous la condition d'initiale ellipticité. L'hypothèse porte sur le symbole principal de l'opérateur du premier ordre. Le caractère mal-posé s'entend ici au sens d'Hadamard, en particulier par un défaut instantané de la continuité H\"older du flot depuis $G^{\sigma}$ vers $L^2$, où $\sigma\in(0,1)$ dépend du spectre initial. En suivant la construction proposée par G. Métivier dans [\emph{Remarks on the well-posedness of the nonlinear Cauchy problem}, Contemp. Math.~2005], nous prouvons que le caractère mal-posé découle d'une construction à la Cauchy--Kovalevskaya en temps longs d'une famille de solutions exactes, rapidement oscillantes et analytiques proches initialement de la solution nulle, et qui croissent exponentiellement vite en temps. Une des difficultés ici réside dans le temps d'observation de l'instabilité. Alors qu'en régularité Sobolev, ce temps est logarithmique en fréquence, en régularité Gevrey il est une puissance en fréquence. En particulier, en régularité Gevrey l'instabilité est observée bien plus tard qu'en régularité Sobolev.
\end{altabstract}


\datereceived{2018-11-30}
\daterevised{2019-10-08}
\dateaccepted{2019-12-19}






%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}

\maketitle



\section{Introduction}

We consider the Cauchy problem for first-order quasi-linear systems of partial differential equations
\begin{equation}\label{1.Cauchy}
\dptl_{t}u = \sum_{j=1}^{d} A_{j}(t,x,u) \dptl_{x_j}u + f(t,x,u) \;,\quad u(0,x) = h(x)
\end{equation}
where $t\geq 0$, $x\in\R^{d}$, $u(t,x)$ and $f(t,x,u)$ are in $\R^N$ and $A_j(t,x,u)\in\R^{N\times N}$. We assume throughout the paper that the $A_j$ and $f$ are analytic in a neighborhood of $(0,0,0)$.

Our results extend Métivier's ill-posedness Theorem~\cite{metivier2005remarks} for initially elliptic operators in Sobolev spaces:
\begin{theo*}
Assuming the first-order operator is initially micro-locally elliptic, the Cauchy problem~\eqref{1.Cauchy} is ill-posed in Gevrey spaces.
\end{theo*}

While it may seem natural that Gevrey regularity, with associated sub-exponential Fourier rates of decay $O\left(e^{-|\xi|^{\sigma}} \right)$, with $\sigma <1$, will not be sufficient to counteract the exponential growth of elliptic operators (think of $e^{t\xi}$, as is the case for the Cauchy--Riemann operator $\dptl_{t} + i\dptl_{x}$), the proof of ill-posedness requires a careful analysis of linear growth rates and linear and nonlinear errors. This ill-posedness result is Theorem~\ref{1.theorem.2}, stated more precisely in Section~\ref{1.subsection.result}. By ill-posedness, we mean the absence of a H\"older continuous dependence on the data, as measured from $G^{\sigma}$ to $L^2$. The precise definition is given in Section~\ref{1.subsection.definitions}. The larger $\sigma$, the stronger the result. Of course, well-posedness holds in the limiting case $\sigma=1$, corresponding to analytic functions. Assuming only a property of micro-local ellipticity for the principal symbol of~\eqref{1.Cauchy}, we obtain, in Theorem~\ref{1.theorem.2}, the bound $\sigma < 1/(m + 1)$, where $m \geq 1$ is an algebraic multiplicity. Under an assumption of smooth partial diagonalization (see Assumption~\ref{1.hypo.2}), we obtain, in Theorem~\ref{1.theorem.3}, ill-posedness for any $\sigma <1/2$ regardless of the algebraic multiplicity. Under stronger spectral assumptions (see Assumption~\ref{1.hypo.3}), we obtain, in Theorem~\ref{1.theorem.4}, ill-posedness for any $\sigma < 2/3$ and we outline the conditions which allow for an instability proof at an arbitrarily high Gevrey regularity.

We note that an equation may be simultaneously ill-posed in Sobolev spaces and well-posed in Gevrey spaces (for instance, the Prandtl equation~\cite{gerard2010ill,gerard2013well}). Besides well-posedness, the distinct but related phenomenon of Landau damping for Vlasov--Poisson occurs in Gevrey spaces~\cite{bedrossian2013landau,mouhot2010landau}, but not in Sobolev spaces~\cite{bedrossian2016nonlinear}.

In the companion paper~\cite{morisse2016II}, we extend these results to systems transitioning from hyperbolicity to ellipticity, following~\cite{lerner2010instability,lerner2015onset}.


\subsection{Background: on Lax--Mizohata results}


The question of the well-posedness of the Cauchy problem was first introduced and studied by Hadamard in~\cite{hadamard1902problemes}. Hadamard proved, in the case of linear second-order elliptic equations, that the associated solution flow is not regular in the vicinity of any solution of the system. The case of linear evolution systems of the form~\eqref{1.Cauchy}, with $A_j(t,x,u) \equiv A_j(t,x) $, $f(t,x,u) \equiv f(t,x)$ was first studied by Lax in~\cite{lax2005asymptotic}, where the proof was given that hyperbolicity of the system, i.e. reality of the spectrum of the principal symbol, was a necessary condition for~\eqref{1.Cauchy} to be well-posed in the sense of Hadamard in $C^k$ spaces. Lax's proof relied on separation of the spectrum. Mizohata extended Lax's result without this assumption in~\cite{mizohata1961some}. Some cases of nonlinear systems were studied later by Wakabayashi in~\cite{wakabayashi2001lax} (here with stability also with respect to source term) and by Yagdjian in~\cite{yagdjian2002lax,yagdjian1998note} (there in the special case of gauge invariant systems).

A first statement of a precise Lax--Mizohata result for first-order quasi-linear systems was given by Métivier in~\cite{metivier2005remarks}, with a precise description of the lack of regularity of the flow. As we will adapt the methods used by Métivier, we want to take a close look at~\cite{metivier2005remarks}.


\subsection{On Métivier's result in Sobolev spaces}\label{1.subsubsection.metivier}


In~\cite[Section~3]{metivier2005remarks} Guy Métivier proves H\"older ill-posedness in Sobolev spaces for the Cauchy problem~\eqref{1.Cauchy}, as soon as hyperbolicity fails at $t=0$. The initial defect of hyperbolicity means here that there are some $x_0\in\R^d$, $\vec{u}_0\in\R^N$ and $\xi_0\in\R^{d}$ such that the principal symbol evaluated at $(0,x_0,\vec{u}_0,\xi_0)$:
\begin{equation}\label{1.intro.def.A0}
A_0 := \sum_j A_j(0,x_0,\vec{u}_0)\xi_{0,j}
\end{equation}
is supposed to have a couple of eigenvalues with non zero imaginary part, say $\pm i \g_0$, with eigenvectors $\vec{e}_{\pm}$. H\"older well-posedness, locally in time and space, would mean that initial data $h_1$ and $h_2$ in $H^{\sigma}(B_{r_0}(x_0))$, for some small $r_0>0$, would generate solutions $u_1$ and $u_2$ such that
\begin{equation}\label{1.well}
\|u_2 - u_1\|_{L^2(\Omega)} \lesssim \| h_2 - h_1 \|_{H^{\sigma}\left(B_{r_0}(x_0)\right)}^{\alf}
\end{equation}
for some space-time domain $\Omega$, for some $\sigma\geq 0$, some $\alf\in(0,1]$. In order to disprove~\eqref{1.well}, Métivier chooses $h_1 \equiv \vec{u}_0$, and lets $u_1$ the Cauchy--Kovalevskaya solution issued from $h_1$, the existence of which is granted, locally in space and time, by the analyticity assumption on the coefficients $A_j$ and $f$. Translating, Métivier is reduced to the case $\vec{u}_0=0$, $u_1 \equiv 0$, and the proof that~\eqref{1.well} does not hold is reduced to the construction of a family $(u_{\e})_{\e >0}$ of initially small, exact analytical solutions such~that
\begin{equation}\label{1.intro.holder}
\lim_{\e\to0} \frac{\|u_{\e}\|_{L^2(\Omega_{\e})}}{\|u_{\e}(0)\|^{\alf}_{H^{\sigma}(B_0(x_0))}} = +\infty
\end{equation}
for all H\"older exponent $\alf \in (0,1]$ and all Sobolev indices $\sigma >0$, where $\Omega_{\e}$ is a small conical space-time domain centered at $(0,x_0)$.

To highlight the specific frequency $\xi_0$ at which the initial ellipticity occurs, Métivier looks for solutions of the form
\begin{equation}\label{1.intro.ansatz}
u_{\e}(t,x) = \e \ubf(t/\e, x, (x-x_0)\cdot\xi_0/\e)
\end{equation}
with $\e$ a small parameter and $\ubf(s,x,\theta)$ is periodic in $\theta$. Then $\ubf$ solves
\begin{equation}\label{1.intro.equation}
\dptl_{s}\ubf - A_0\dptl_{\theta}\ubf = G(\e \ubf)
\end{equation}
where $A_0$ is defined by~\eqref{1.intro.def.A0} and $G(\e \ubf)$ comprises both linear and nonlinear ``errors'' terms. Factorizing the propagator, an equivalent fixed point equation is obtained
\begin{equation}\label{1.intro.pointfixe}
\ubf = e^{sA_0\dptl_{\theta}}\ubf(0) + \int_{0}^{s} e^{(s-s')A_0\dptl_{\theta}}G(\e \ubf(s'))ds'.
\end{equation}

For equation~\eqref{1.intro.pointfixe}, the goal is to prove:
\begin{itemize}
\item The existence of solutions over the space-time domain $\Omega_{\e}$. This is a Cauchy--Kovalevskaya type of result, discussed in Section~\ref{1.intro.ck}.
\item The \emph{wild growth estimate}~\eqref{1.intro.holder}. Since the instability develops in time, the existence domain $\Omega_{\e}$ must be large enough for~\eqref{1.intro.holder} to be recorded. This point is discussed in Subsection~\ref{1.intro.growth.solutions}.
\end{itemize}


\subsubsection{Exponential growth of the solutions}\label{1.intro.growth.solutions}

As a consequence of the assumption of ellipticity on $A_0$ defined by~\eqref{1.intro.def.A0}, the propagator has an exponential growth in Fourier
\begin{equation}
\label{1.intro.growth.fourier}
\left|\left(e^{(s-s')A_0\dptl_{\theta}} \ubf(s,x,\theta)\right)_n\right| \lesssim e^{|n|\g_0(s-s')} | \ubf_n(s,x)|
\end{equation}
where we denote by $(\cdot)_n$ the $n^{\rm th}$ Fourier mode with respect to the periodic variable $\theta$. We recall that $\xi_0$ is the distinguished frequency for which $A_0$, defined in~\eqref{1.intro.def.A0}, has a couple of non real eigenvalues associated with eigenvectors $\vec{e}_{\pm}$. We define well-chosen initial data
\begin{equation}\label{1.intro.initial}
h_{\e} = \e^{M+1} \left(e^{\mp i x\cdot\xi_0/\e} \vec{e}_{\pm} \right)\,, \quad \h_{\e} := \e^{M}\left(e^{ \mp i \theta}\vec{e}_{\pm}\right)
\end{equation}
for which the upper bound is attained:
\begin{equation}\label{1.intro.fepsilon}
\f_{\e}(s,\theta) := e^{sA_0\dptl_{\theta}}\h_{\e}(\theta) \quad \text{satisfies} \quad \left|\left(\f_{\e}\right)_n\right| \approx \e^{M}e^{\g_0s}\,, \quad \forall n\in\Z.
\end{equation}


Above $ \f_{\e}(s,\theta) $ is the free solution of~\eqref{1.intro.equation}, that is the solution of the equation when $G(\e \ubf) = 0$. One key observation in view of the Hadamard instability is that, for times of order $M|\ln(\e)|$, the free solution $ \f_{\e} $ is of order $1$ with respect to $\e$, whereas at time $0$ it is of order $\e^{M}$. Roughly there are $f_{\e}(t,x) = \f_{\e}(t/\e,x,(x-x_0)\cdot\xi_0/\e)$, $h_{\e}(t,x) = \h_{\e}(t/\e,x,(x-x_0)\cdot\xi_0/\e)$ and $\Omega_{\e}$ a small conical space-time domain that contains the ball $B_{\e}((M|\ln(\e)|,x_0))$ of $\R_s\times\R_x^{d}$ for which there holds
\begin{equation}\label{1.intro.ratio}
\frac{\|f_{\e}\|_{L^2(\Omega_{\e})}}{\|h_{\e}\|^{\alf}_{H^{\sigma}}} \approx \e^{(d+1)/2} \e^{-\alf(M-\sigma)}
\end{equation}
and a suitable choice of $M$ leads to~\eqref{1.intro.holder} in the simplified case $u_{\e} = f_{\e}$, as $\e\to0$.

Through a careful analysis of the quasilinear system, Métivier proved that the nonlinear solution $\ubf_{\e}$ is close enough to $\f_{\e}$ in such a way that the growth~\eqref{1.intro.fepsilon} of the free solution $\f_{\e}$ in long time $O(|\ln(\e)|)$ passes on to solutions $\ubf_{\e}$, such that
\begin{equation}\label{1.intro.growth}
|\ubf_{\e}(s,x,\theta)| \gtrsim \e^{M} e^{\g_0 s}
\end{equation}
in a whole neighborhood of $(s,x) = (M|\ln(\e)|,x_0)$. This estimate from below leads finally to~\eqref{1.intro.holder}.

In this sketch of analysis, we see in particular that the (projection over the temporal coordinate of) the existence domain $\Omega_{\e}$ introduced in Section~\ref{1.subsubsection.metivier} must be large enough to contain time intervals $[0,M|\ln(\e)|]$. In Gevrey spaces, this domain must be much larger, see Section~\ref{1.intro.extension}.


\subsubsection{Existence of solutions via a long-time Cauchy--Kovalevskaya result}\label{1.intro.ck}

In order to show that nonlinear solution $\ubf_{\e}$ of equation~\eqref{1.intro.pointfixe} actually exists for sufficiently long time $O(M|\ln(\e)|)$, Métivier proved a long-time Cauchy--Kovalevskaya theorem using techniques of majoring series developed by Wagschal in~\cite{wagschal1979probleme} for the resolution of the nonlinear Goursat problem.~A presentation of the method can also be found in~\cite{cartan1995theorie}, and is developed extensively in Section~\ref{1.subsection.majoring}.

For formal series $\phi(x) = \sum_{k\in\N^{d}} \phi_{k} x^{k}$ and $\psi(x) = \sum_{k\in\N^{d}} \psi_{k} x^{k}$, with $\psi_{k} \geq 0$, we define the relation
\[
\phi \prec \psi \quad \Longleftrightarrow \quad |\phi_{k}| \leq \psi_k \;, \; \forall k \in \N^{d}.
\]

The method is based on the observation that, if $\psi$ has convergence radius $R^{-1} >0$ and $\phi \prec \psi$, then $\phi$ has a convergence radius at least equal to $R^{-1}$. Conversely, there are series of one variable $\Phi(z)$ with convergence radius equal to $1$ that satisfy the following property: for any series $\phi$ with convergence radius less than $R^{-1}$, there is $C>0$ such that $\phi \prec C \Phi(R \sum_j x_j)$. The norm of $\phi$ will be defined as the best constant $C$ (see Definition~\ref{1.definition.EE_s}). An example is $\Phi(z) = \frac{1}{1 - z}$, which satisfies the previous property thanks to Cauchy's inequalities.

Based on those two observations, the method consists in shifting the focus from $\phi$ to $\Phi$. The key is that $\Phi$ can be taken to be much simpler than the original, typically unknown, series. In this paper we choose $\Phi$ with convergence radius equal to one and also such that $\Phi^2 \prec \Phi$ (see point $4$ in Lemma~\ref{1.majoring.properties} in Section~\ref{1.subsection.majoring}).

Now assume that we are given an initial datum $u(0,\cdot)$ in~\eqref{1.intro.pointfixe} such that\linebreak$u(0,x)\prec \Phi(R \sum_j x_j)$. The Cauchy--Riemann operator $\dptl_t + i\dptl_x$ provides the simplest example of an elliptic Cauchy problem. On this example the radius of analyticity decays linearly in time: the datum $u$ with $\hat{u}(0,\xi) = e^{-R^{-1}|\xi|}$ generates the solution $\hat{u}(t,\xi) = e^{-(R^{-1}-t)\xi}$, for $t > 0$ and $\xi > 0$. It makes sense to assume similarly a linearly decaying radius of convergence for the solutions to our elliptic problems. Thus after comparing $u(0)$ to $\Phi(R \sum_j x_j)$, we will compare $u(s)$ to $\Phi(R \sum_j x_j + \e\rho s)$, where $R$ and $\rho$ are parameters to be specified later. Note that the series $\Phi(R \sum_j x_j + \e\rho s)$ has converging radius $R^{-1}(1 - \e \rho s)$, which is non zero for $s < (\e \rho)^{-1}$ ; this is hence the \emph{maximal time of regularity} for the solutions.

For simplicity of exposition, consider equation~\eqref{1.intro.pointfixe} with $G(\e\ubf) \equiv \e \sum_j A_j(\e s,x,\vec{u}_0)\linebreak\dptl_{x_j}\ubf$ and $A_0 \equiv 0$. The right-hand side of~\eqref{1.intro.pointfixe} reduces then to
\begin{equation}\label{1.intro.def.rhs}
\int_{0}^{s} \e \sum_j A_j(\e s',x, \vec{u}_0)\dptl_{x_j} \ubf(s') \,ds'.
\end{equation}


By assumption of analyticity of the $A_j$, we may control the series $A_j(\e s',x, \vec{u}_0)$ by the model $\Phi(R \sum_j x_j + \e\rho s)$, up to a multiplicative constant. Then~\eqref{1.intro.def.rhs} is controlled, in the sense of the binary relation $\prec$ and up to a multiplicative constant, by
\begin{multline*}
\int_{0}^{s} \e \Phi(R \sum_j x_j + \e\rho s') \sum_j \dptl_{x_j} \Phi\left(R \sum_j x_j + \e\rho s'\right) \, ds' \\
\begin{aligned}
& \prec \int_{0}^{s} \e R \Phi(R \sum_j x_j + \e\rho s') \Phi'\left(R \sum_j x_j + \e\rho s'\right) \, ds' \\
& \prec \int_{0}^{s} \e R \Phi'\left(R \sum_j x_j + \e\rho s'\right) \, ds' \\
& \prec R\rho^{-1} \Phi\left(R \sum_j x_j + \e\rho s\right).
\end{aligned}
\end{multline*}


Above, we used $2\Phi \Phi' \prec \Phi'$, a consequence of $\Phi^2 \prec \Phi$ (the relation $\prec$ is compatible with derivation, see Lemma~\ref{1.majoring.properties}). We observed above the phenomenon of regularization (of $\dptl_{x_j}$) by integration in time, as in~\cite{ukai2001boltzmann}. The ``error''~\eqref{1.intro.def.rhs} is controlled at a cost of $R\rho^{-1}$.

To conclude to the existence of the family of analytic solutions $\ubf_{\e}$ exhibiting the growth~\eqref{1.intro.growth} on sufficiently long time $O(M|\ln(\e)|)$, Métivier compared the maximal time of regularity $(\e\rho)^{-1}$, which then has to be greater than the instability time $M|\ln(\e)|$. This implies some constraints on $R$ and $\rho$, and finally on the domain of existence $\Omega_{\e}$. We will not go into more detail at this point, as those constraints will appear in the Gevrey analysis too.


\subsection{Extension to Gevrey spaces}\label{1.intro.extension}


The aim of this article is to prove the same kind of H\"older ill-posedness as in~\cite{metivier2005remarks}, under the assumption of analyticity of the coefficients of the $A_j$. But whereas~\cite{metivier2005remarks} holds in Sobolev spaces, we prove here instability in Gevrey spaces\footnote{This has been suggested by Jeffrey Rauch, whom the author thanks warmly.}.\linebreak Following Métivier's method, we construct a family of solutions $(u_{\e})_{\e}$ that satisfies
\begin{equation}\label{1.intro.holder.gevrey}
\lim_{\e\to0} \frac{\|u_{\e}\|_{L^2(\Omega)}}{\|u_{\e}(0)\|^{\alf}_{G^{\sigma}(B_0)}} = +\infty
\end{equation}
where the Gevrey space $G^{\sigma}(B_0)$ is precisely defined in Section~\ref{1.subsection.definitions}, with $B_0$ a ball of $\R^{d}$ containing the distinguished point $x_0$. Our goal in this section is to informally describe the specific difficulties posed by the analysis in Gevrey spaces.


\subsubsection{On the time of instability in Gevrey spaces}\label{1.subsubsection.time}

We first need to find a suitable replacement for the small coefficient $\e^{M}$ of $\h_{\e}$ defined in~\eqref{1.intro.initial} in the Sobolev framework. Indeed, the highly oscillating function $e^{ix\cdot\xi_0/\e}$ has Sobolev norm $ \|e^{ix\cdot\xi_0/\e}\|_{H^{\sigma}(B_0)} \approx \e^{-\sigma} $ whereas the Gevrey norm satisfies (see Definition~\ref{1.def.gevrey} and Lemma~\ref{1.size.gevrey.exp}) $ \|e^{ix\cdot\xi_0/\e}\|_{G^{\sigma}(B_0)} \approx e^{\e^{-\sigma}}$. Appropriate initial data are both small and highly oscillating. Thus we replace~\eqref{1.intro.initial} by
\begin{equation}\label{1.intro.initial.gevrey}
h_{\e} = e^{-\e^{-\delta}} \left(e^{\mp i x\cdot\xi_0/\e} \vec{e}_{\pm} \right)\,, \quad \h_{\e} := e^{-\e^{-\delta}}\left(e^{i\mp \theta}\vec{e}_{\pm}\right)
\end{equation}
with $\sigma < \delta$. At the end of the analysis, we expect~\eqref{1.intro.growth} to be replaced by
\begin{equation}\label{1.intro.growth.gevrey}
|\ubf_{\e}(s,x,\theta)| \gtrsim e^{-\e^{-\delta}} e^{\g_0 s}.
\end{equation}

This leads to a typical observation time $ \e^{-\delta} $. This is the time for which the time exponential growth associated with the ellipticity counterbalances the very small initial amplitude. This observation time is far bigger than the typical Sobolev time $O(|\ln(\e)|)$ described above in Section~\ref{1.subsubsection.metivier}. Note that the limitation $\sigma < \delta$ ensures at least formally that the ratio~\eqref{1.intro.ratio} in Gevrey spaces $G^{\sigma}$ diverges as $\e\to0$ (see Remark~\ref{1.remark.size.gevrey}).


\subsubsection{On the control of linear errors over long times}\label{1.subsubsection.control}

Typically the estimates for $G(\e \ubf)$ (with notation introduced in~\eqref{1.intro.equation}), which comprises both linear and nonlinear error terms, degrade over time. This is problematic in view of the resolution of the fixed point equation~\eqref{1.intro.pointfixe}. By definition of $A_0$ in~\eqref{1.intro.def.A0}, the linear error comprises term
\[
\left(\sum_j A_j(\e s,x,\e\ubf) \xi_{0,j} - A_0\right)\dptl_{\theta} \ubf \approx \left(\e s + |x-x_0| + \e \ubf\right)\dptl_{\theta}\ubf.
\]

Suppose now, for simplicity of exposition, that $G(\e \ubf) = \e s \dptl_{\theta} \ubf$, and recall that $s = O(\e^{-\delta})$ according to the sketch of analysis of Section~\ref{1.subsubsection.time}. Suppose in addition that the linear bound~\eqref{1.intro.growth.fourier} holds, and that we have an a priori control of the Fourier mode $n={}-1$ of the solution $\ubf$ with a growth rate that is equal to the linear growth rate
\[
|\ubf_{{}-1}(s)| \lesssim e^{-\e^{-\delta}} e^{\g_0 s}.
\]


The amplitude $e^{-\e^{-\delta}}$ is the one previously discussed in Section~\ref{1.subsubsection.time}. Then equation~\eqref{1.intro.pointfixe} for the Fourier mode $n={}-1$ reduces to
\[
\ubf_{{}-1}(s) - e^{-\e^{-\delta}}e^{\g_0 s}\vec{e}_{+} = \int_{0}^{s} e^{{}-i(s-s')A_0} \left(\e s' ({}-i) \ubf_{-1}(s')\right) \,{\rmd}s'
\]
where $\vec{e}_{+}$ is the eigenvector of $A_0$ associated to the eigenvalue with imaginary part $i\g_0$. For the right-hand side, we have the estimate:
\begin{align}
\left| \int_{0}^{s} e^{{}-i(s-s')A_0} \left(\e s' (-i) \ubf_1(s')\right) \,{\rmd}s' \right| & \lesssim \int_{0}^{s} e^{\g_0(s-s')} \left(\e s' e^{-\e^{-\delta}} e^{\g_0 s'}\right) \,{\rmd}s' \nonumber \\
& \lesssim \frac{1}{2} \e s^2 e^{-\e^{-\delta}}e^{\g_0 s} \label{1.intro.computation}
\end{align}
thanks to the upper bound~\eqref{1.intro.growth.fourier}. Hence $\ubf_{{}-1}(s)$ would satisfy~\eqref{1.intro.growth.gevrey} if $ \e s^2 = o_{\e\to0}(1) $ for any $s\in[0,\e^{-\delta})$, which would lead to the stringent constraint on the Gevrey index $ \sigma < \delta < 1/2 $.

In a hope to get past that first constraint on the Gevrey index, we will then consider the varying-coefficient operator $\sum_j A_j(\e s, x,
\vec{u}_0)\xi_{0,j} \dptl_{\theta}$, as opposed to~\cite{metivier2005remarks} where the constant-coefficient operator $A_0 \dptl_{\theta}$ was considered. See for instance Theorem~\ref{1.theorem.4}.


\subsubsection{On linear growth bounds}\label{1.intro.section.bounds}

As discussed in Section~\ref{1.subsubsection.control}, we choose to work with the varying-coefficient\linebreak operator
\[
\sum_j A_j(\e s, x, \vec{u}_0)\xi_{0,j} \dptl_{\theta}.
\]


We introduce first the propagator $U(s',s,x,\theta)$ which solves
\[
\dptl_{s} U(s',s,x,\theta) - \sum_j A_j(\e s, x, \vec{u}_0)\xi_{0,j} \,\dptl_{\theta} \, U(s',s,x,\theta)\,, \quad U(s',s',x,\theta) \equiv {\Id}.
\]


As $\sum_j A_j(\e s, x, \vec{u}_0)\xi_{0,j}$ does not depend on $\theta$, the Fourier coefficients $U_n(s',s,x)$ of the propagator satisfies the ODE
\[
\dptl_{s} U_n(s',s,x) - in\sum_j A_j(\e s, x, \vec{u}_0)\xi_{0,j} \,U_n(s',s,x) \,, \quad U_n(s',s',x) \equiv {\Id}.
\]


Then $U(\theta)$ acts diagonally on each Fourier component. Note that in the autonomous case $\sum_j A_j(\e s, x, \vec{u}_0)\xi_{0,j} \equiv \sum_j A_j(0, x, \vec{u}_0)\xi_{0,j}$, the propagator satisfies
\[
U(s',s,x,\theta) = \exp \left((s-s') \sum_j A_j(0, x, \vec{u}_0)\xi_{0,j} \, \dptl_{\theta} \right).
\]


Using the propagator $U(s,s,x,\theta)$, fixed point equation~\eqref{1.intro.pointfixe} is replaced by
\begin{equation}\label{1.intro.pointfixe.full}
\ubf(s,x,\theta) = \f(s,x,\theta) + \int_{0}^{s} U(s',s,x,\theta) G(\e\ubf(s',x,\theta)) ds'
\end{equation}
where $\f(s,x,\theta) = U(0,s,x,\theta) \h_{\e}(\theta)$ is the free solution, with $\h_{\e}$ defined in~\eqref{1.intro.initial.gevrey}.

For the $n^{\rm th}$ Fourier coefficient $U_n(s',s,x)$ of the propagator, the derivation of bounds is described for instance in~\cite[Section~4]{lerner2015onset}. Eigenvalues may cross at the distinguished point $(0,x_0)$. In particular, eigenvalues and eigenprojectors may not be smooth, although eigenvalues are continuous. Since we do not want to formulate any additional assumption on the symbol besides ellipticity (although see Section~\ref{1.intro.subsection.all.gevrey} below and Theorem~\ref{1.theorem.3}), this forces us, in the derivation of upper bounds of $U_n(s',s,x)$, to resort to the procedure of approximate trigonalization described for instance in~\cite{lerner2015onset}.

In this procedure, a small error is produced in the rate of growth. On one side, an upper bound
\begin{equation}
\label{1.intro.propa.bis}
\left|U_n(s',s,x)\right| \lesssim \omega^{-(m-1)} e^{|n|(s-s') ({\Imm}\,\lambda_0 + R^{-1} + \e \und{s} + \omega)}
\end{equation}
is achieved, where $\lambda_0$ is an eigenvalue of $A_0$ with positive imaginary part which is maximal among the other eigenvalues, and $m$ is the algebraic multiplicity of $\lambda_0$ in the spectrum. In~\eqref{1.intro.propa.bis} the parameter $\omega>0$ is associated with the trigonalization error. The optimal choice of $\omega$ is described below in Section~\ref{1.intro.endgame}. The bound~\eqref{1.intro.propa.bis} holds for $x$ in $B_{R^{-1}}(x_0)$ and $s$ in $(0,\und{s})$, where $R^{-1}$ is the convergence radius and $\und{s}$ the final time of observation. This is made precise in Lemma~\ref{1.lemma.growth.propa}.

On the other side, the free solution satisfies a bound of the form
\begin{equation}\label{1.intro.lower}
|\f_{\e}(s,x,\theta)| \gtrsim \omega^{-(m-1)}\,e^{-\e^{-\delta}} e^{s({\Imm}\,\lambda_0 - r - \e \und{s} - \omega)}
\end{equation}
for $(s,x)\in(0,\und{s})\times B_{r}(x_0)$ with $r$ small. This is made precise in Lemma~\ref{1.lemma.growth.free.solution}.



\subsubsection{On the endgame}\label{1.intro.endgame}
As we did in Section~\ref{1.subsubsection.control}, suppose now that there holds $G(\e\ubf) = \e \sum_j A_j(\e s, x \vec{u}_0)\linebreak\dptl_{x_j}\ubf(s)$ and the linear bound~\eqref{1.intro.propa.bis}. Suppose also that we have an a priori control of the Fourier mode $n=1$ of the solution $\ubf$ with a growth rate that is equal to the linear growth rate
\begin{equation}\label{1.intro.apriori}
|\ubf_1(s)| \lesssim e^{-\e^{-\delta}} \omega^{-(m-1)} e^{(s-s') ({\Imm}\,\lambda_0 + R^{-1} + \e \und{s} + \omega)}.
\end{equation}


In view of bound~\eqref{1.intro.propa.bis} and equation~\eqref{1.intro.pointfixe.full}, there holds then for the Fourier mode $n=1$ the bound
\[
|\ubf_1(s) - \f_1(s)| \lesssim \int_{0}^{s} \omega^{-(m-1)} e^{(s-s') ({\Imm}\,\lambda_0 + R^{-1} + \e \und{s} + \omega)} \e \sum_j A_j(\e s', x \vec{u}_0) \dptl_{x_j}\ubf(s') ds'.
\]


Thanks to the majoring series method explained in Subsection~\ref{1.intro.ck} and based on~\eqref{1.intro.apriori}, we may expect to bound the above by
\begin{equation}\label{1.intro.local}
|\ubf_1(s) - \f_1(s)| \lesssim e^{-\e^{-\delta}} \omega^{-2(m-1)} e^{s ({\Imm}\,\lambda_0 + R^{-1} + \e \und{s} + \omega)} R\rho^{-1}.
\end{equation}


To end the proof, it would suffice then to show that $\ubf_1$ has the same bound from below as $\f_1$ in~\eqref{1.intro.lower}. This is the case if the right-hand side of~\eqref{1.intro.local} satisfies
\begin{equation}\label{1.intro.intermediaire}
e^{-\e^{-\delta}} \omega^{-2(m-1)} e^{s ({\Imm}\,\lambda_0 + R^{-1} + \e \und{s} + \omega)} R\rho^{-1} \ll \omega^{-(m-1)}\,e^{-\e^{-\delta}} e^{s({\Imm}\,\lambda_0 - r - \e \und{s} - \omega)}
\end{equation}
for all $s\in(0,\und{s})$, where $\ll$ is defined in~\eqref{1.notation.ll}. This is equivalent to
\begin{equation}\label{1.intro.constraint}
\omega^{-(m-1)}\,e^{\und{s} (R^{-1} + r + \e \und{s} + \omega)} R\rho^{-1} \ll 1.
\end{equation}


As explained in Section~\ref{1.subsubsection.time}, the final time $\und{s}$ is of order $\e^{-\delta}$. In order for~\eqref{1.intro.constraint} to be satisfied, the argument of the exponential should be at most of order $1$ as $\e$ goes to $0$. Hence $R^{-1}$, $r$ and $\omega$ are chosen to be less than $\e^{\delta}$. Note that we also get once again the constraint $\e\und{s}^2 <1$, which brings back the limitation $\sigma < \delta < 1/2$ on the Gevrey index.

Besides~\eqref{1.intro.constraint}, another constraint shows up in the analysis. Recall that we work with the majoring series model $\Phi(R\sum_j x_j + \e\rho s)$. Its domain of analyticity is the conical space-time domain $\{ (s,x) \,|\, R\sum_j |x_j| + \e\rho s < 1 \}$. As the time of instability $\und{s}$ is of order $\e^{-\delta}$, in order to see the instability the maximal regularity time $(\e\rho)^{-1}$ has to be greater than $\e^{-\delta}$. Hence another constraint
\begin{equation}\label{1.intro.constraint.2}
\e^{1 - \delta} \ll \rho^{-1}.
\end{equation}

Since $\omega$ and $R^{-1}$ are of order $\e^{\delta}$, we rewrite constraint~\eqref{1.intro.constraint} as $\rho^{-1} \ll \e^{(m-1)\delta} R^{-1}$ and then as
\[
\rho^{-1} \ll \e^{m\delta}.
\]


Finally we end up with a consistency inequality $\e^{1-\delta} \ll \e^{m\delta}$, equivalent to the limitation $\delta < 1/(m+1)$ of the Gevrey index. This is our principal result, detailed in Theorem~\ref{1.theorem.2}.


\subsubsection{On proving instability for higher Gevrey indices}\label{1.intro.subsection.all.gevrey}

We saw above in Section~\ref{1.intro.endgame} that, in the general case, the consideration of the varying-coefficient operator $ \sum_j A_j(\e \tau,x,\vec{u}_0) \xi_{0,j} \dptl_{\theta} $ does not free us from the constraint $ \sigma < 1/2$.
\footnote{We choose nonetheless to prove Theorems~\ref{1.theorem.2} and~\ref{1.theorem.3} considering varying-coefficient operator, as it is 1) an improvement on itself from the original proof by Métivier, and 2) a way to have a unique common framework for the proofs of all three Theorems presented in this paper.}
Indeed, as discussed in Section~\ref{1.intro.endgame}, we actually need to impose $ \sigma < 1/(m+1)$, where $ m \geq 1$ is the algebraic multiplicity of $\lambda_0$ in the spectrum.

We describe here a situation in which we improve the limiting Gevrey index.

Assume finally that~\eqref{1.intro.propa.bis} and~\eqref{1.intro.lower} can be replaced by
\begin{align}\left|U_n(s',s)\right| &\lesssim e^{|n|(s-s') ({\Imm}\,\lambda_0 + \omega)}\label{1.intro.propa.bis.max}
\\
\intertext{and}
|\f_{\e}(s,x,\theta)| &\gtrsim \,e^{-\e^{-\delta}} e^{s({\Imm}\,\lambda_0 - \e^2 s^2- r - \omega)}\label{1.intro.lower.max}
\end{align}
respectively. Following the previous computations, we may then replace~\eqref{1.intro.intermediaire} by
\[
e^{-\e^{-\delta}} e^{s ({\Imm}\,\lambda_0 + \omega)} R\rho^{-1} \ll \,e^{-\e^{-\delta}} e^{s({\Imm}\,\lambda_0 - \e^2 s^2 - r - \omega)}
\]
and we finally get, instead of~\eqref{1.intro.constraint}, the new constraint
\[
e^{\und{s}(\e^2 \und{s}^2 + r + \omega)} R\rho^{-1} \ll 1.
\]


It can be fulfilled for any $\delta$ in $(0,2/3)$, which implies instability in Gevrey spaces $G^{\sigma}$ with $\sigma < 2/3$. We show in Sections~\ref{1.section.assumptions} and~\ref{1.section.ansatz} that assumptions of maximality and semi-simplicity for the most unstable eigenvalue lead to~\eqref{1.intro.propa.bis.max} and~\eqref{1.intro.lower.max}. These correspond to the assumptions of Theorem~\ref{1.theorem.4}.

\section*{Notations}
\begin{itemize}
\item For all $z\in\Cbb^{m}$ and $k\in\N^{m}$, we put
\begin{equation}\label{1.notation.product}
z^{k} = \prod_{i= 1,\ldots,m} z_i^{k_i}
\end{equation}

\item For all $k\in\N^{m}$
\begin{equation}\label{1.notation.binom}
\binom{k_1 + \cdots + k_m}{k_1,\ldots,k_m} = \frac{\dsp (k_1+\cdots+k_m)!}{\dsp \prod_{i= 1,\ldots,m} k_i! }
\end{equation}

\item For all $m$ and $i\in\{1,\ldots,m\}$, we denote $1_{i}$ the $m$-uple with all coefficients null but the $i^{\rm th}$:
\begin{equation}\label{1.notation.1i}
1_{i} = (0,\ldots,0,1,0,\ldots,0)
\end{equation}

\item For all reals $A$ and $B$ we note
\begin{equation}\label{1.notation.lesssim}
A \lesssim B
\end{equation}
if there is some constant independent of $\e$ such that
\[
A \leq C B.
\]

\item For any functions $A$ and $B$ of $\e$, we denote
\begin{equation}\label{1.notation.ll}
A \ll B \quad \Longleftrightarrow \quad A = o_{\e\to0}(B).
\end{equation}

\item For $r>0$ and $x_0\in\R^{d}$ we denote
\begin{equation}\label{1.notation.ball}
B_{r}(x_0) = \left\{x\in\R^{d} \;\big|\; |x-x_0| < r\right\}.
\end{equation}
\end{itemize}

\section{Main assumptions and results}\label{1.section.assumptions}



\subsection{Definitions: H\"older well-posedness in Gevrey spaces}\label{1.subsection.definitions}



We recall the definition of Gevrey functions on an open set $B$ of $\R^{d}$:

\begin{defi}[Gevrey functions]\label{1.def.gevrey}
Let $\sigma\in(0,1)$ and $c>0$. We define $G^{\sigma}_{c}(B)$ as the set of $C^{\infty}$ functions $f$ on $B$ such that there is a constant $C>0$ such that
\begin{equation}
|\dptl^{\alf} f|_{L^{\infty}(B)} \leq C c^{|\alf|} |\alf|!^{1/\sigma} \quad \forall \alf \in\N^{d}.
\end{equation}
\noindent We then define a norm on $G^{\sigma}_{c}(B)$ by
\begin{equation}
\|f\|_{\sigma,c,B} = \sup_{\alf} |\dptl^{\alf} f|_{L^{\infty}(B)}c^{-|\alf|}|\alf|!^{-1/\sigma}.
\end{equation}
\end{defi}

For an introduction to Gevrey spaces and their properties, we refer to the book of Rodino~\cite{rodino1993linear}. We introduce also space-time conical domains centered on\linebreak $(0,x_0)\in\R\times\R^{d}$.

\begin{defi}[Conical domains]\label{1.defi.conical}
For $x_0\in\R^{d}$, $R>0$, $\rho>0$ and $t\geq 0$ we define the set
\begin{equation}\label{1.defi.Omega.t}
\Omega_{R,\rho,t}(x_0) = \left\{x \in\R^{d} \;\Big|\; R |x-x_0|_{1} + \rho t <1 \right\}
\end{equation}
with $|x|_{1} = \sum_{j=1,\ldots,d} |x_j|$ the $L^{1}$ norm on $\R^{d}$. Note that for all $t \geq \rho^{-1}$, $\Omega_{R,\rho,t}(x_0) = \emptyset$. We also denote
\begin{multline}\label{1.defi.Omega}
\Omega_{R,\rho}(x_0)= \bigcup_{t\geq 0} \{t\}\times\Omega_{R,\rho,t}(x_0)\\
= \left\{(t,x) \in\R\times\R^{d} \;\Big|\; 0\leq t < \rho^{-1},\; R |x-x_0|_{1} + \rho t <1 \right\}.
\end{multline}
\end{defi}

Note that $\Omega_{R,\rho,t}$ is decreasing for the inclusion as a function of $R$, $\rho$ and $t$. In particular, $\Omega_{R,0,0}(x_0)$ is $B_{R^{-1}}(x_0)$.

The question is whether the Cauchy problem~\eqref{1.Cauchy} is well-posed in Gevrey spaces or not, in the following sense

\begin{defi}[H\"older well-posedness]\label{1.defi.well_posedness}
We say that~\eqref{1.Cauchy} is H\"older well-posed in $G^{\sigma}$ locally around $x_0\in\R^{d}$ if there are constants $r_0 > r_1 >0$, $c>0$, $C_{\text{in}}>0$, $C_{\text{fin}}$, $\rho>0$, $\alf\in(0,1)$ such that for any $h$ in $G^{\sigma}_{c}(B_{r_0}(x_0))$ with
\[
\|h\|_{\sigma,c,B_{r_0}(x_0)} \leq C_{\text{in}}
\]
and all $R>r_1^{-1}$ the Cauchy problem~\eqref{1.Cauchy} associated to $h$ has a unique solution $u(t,x)$ in $C^{1}(\overline{\Omega}_{R,\rho}(x_0))$ with $|u|_{L^{2}(\Omega_{R,\rho}(x_0))} \leq C_{\text{fin}}$ and if moreover, given $h_1$ and $h_2$ in $G^{\sigma}_{c}(B_{r_0}(x_0))$ the corresponding solutions $u_1$ and $u_2$ satisfy, for all $R>r_1^{-1}$, the~estimate
\[
|u_1 - u_2|_{L^{2}(\Omega_{R,\rho}(x_0))} \lesssim\|h_1 - h_2\|_{\sigma,c,B_{r_0}(x_0)}^{\alf}.
\]
\end{defi}



\subsection{Assumptions}\label{1.subsection.normal}


We define the principal symbol evaluated at a distinguished frequency $ \xi_0\in\R^{d}$ by
\begin{equation}\label{1.def.A}
A(t,x,u) = \sum_{j} A_j(t,x,u)\xi_{0,j} \,, \; \forall (t,x,u) \in \R_{+}\times\R^{d}\times\R^{N}.
\end{equation}

\begin{hypo}\label{1.hypo.1}

We assume that for some $x_0\in\R^{d}$ and $\vec{u}_0\in\R^{N}$, the spectrum of $A(0,x_0,\vec{u}_0)$ is not real:
\begin{equation}\label{1.hypo.spectrum}
{\Sp} A(0,x_0,\vec{u}_0) \not\subseteq \R.
\end{equation}
\end{hypo}

That is, the principal symbol $A$ is initially elliptic.

\begin{notation}\label{1.notation.elli}
We denote then
\begin{equation}\label{1.def.A_0}
A_0 = A(0,x_0,\vec{u}_0)
\end{equation}
which is a constant matrix with non-real spectrum by~\eqref{1.hypo.spectrum}. Among the nonreal eigenvalues of $A_0$, we denote $\lambda_0$ one of those with maximal positive imaginary part, denoted $\g_0$. We denote $\vec{e}_{+}$ one associated eigenvector. We denote also
\begin{equation}\label{1.def.undA}
\und{A}(t,x) = A(t,x,\vec{u}_0).
\end{equation}
\end{notation}

Up to translations in $x$ and $u$, which do not affect our assumptions, and by homogeneity in $\xi$, we may assume
\begin{equation}\label{1.etc}
x_0 = 0\,, \quad \vec{u}_0 = 0 \,, \quad \xi_0 \in \mathbb{S}^{d-1}.
\end{equation}

Under Assumption~\ref{1.hypo.1} alone, we prove instability for the Cauchy problem~\eqref{1.Cauchy} in some Gevrey indices (Theorem~\ref{1.theorem.2} in Section~\ref{1.subsection.result} below). We now formulate additional assumptions which yield instability for higher Gevrey spaces (Theorems~\ref{1.theorem.3} and~\ref{1.theorem.4} below).

\begin{hypo}\label{1.hypo.2}
For some $x_0 \in \R^d$ and $\xi_0 \in \mathbb{S}^{d-1},$ the matrix $A_0$ has an eigenvalue $\lambda_0$ such that there holds $\lambda_0 \in \Cbb \setminus \R,$ and ${\Imm} \, \lambda_0 > {\Imm} \, \mu,$ for any other eigenvalue $\mu$ of $A_0.$ Besides, the eigenvalue $\lambda_0$ is semisimple (which means algebraic and geometric multiplicities coincide) and belongs to a branch of semisimple eigenvalues of $A.$ Finally, $(0,x_0,\lambda_0)$ is not a coalescing point in the spectrum of $\underline A.$
\end{hypo}

\medskip

We denote $P_0$ the generalized eigenprojector of $A_0$ associated with $\lambda_0,$ which can be defined as
\[
P_0 = \frac{1}{2i\pi} \int_{\gamma} \left(\lambda - P_0\right)^{-1} d\lambda
\]
with $\gamma$ a small enough contour enclosing $\lambda_0$. We denote also $A_0^{-1}$ the partial inverse of $A_0-\lambda_0,$ defined by $P_0 A_0^{-1} = 0,$ $\left(A_0 - \lambda_0\right) A_0^{-1} = \Id - P_0.$ We also denote $(t,x)\linebreak= (x_0, \dots, x_d),$ so that $\dptl_0 = \dptl_t,$ $\dptl_j = \dptl_{x_j}.$

\begin{rema}\label{1.remark.asumm.2}
The non-coalescing Assumption~\ref{1.hypo.2} implies (see~\cite{kato2013perturbation}, or~\cite[Corollary~2.2]{texier2015rouche}) that there is a smooth (actually, analytical) branch $\lambda$ of eigenvalues of $\underline A$ such that $\lambda(0,x_0) = \lambda_0.$ The corresponding local generalized eigenprojector $\underline P$ is smooth as well. The local semisimplicity assumption means that $\underline A \,\underline P = \lambda \underline P,$ that is, in restriction to the generalized eigenspace associated with $\lambda,$ the symbol $A$ is diagonal. A sufficient condition for semisimplicity is algebraic simplicity of the eigenvalue.
\end{rema}


\begin{hypo}\label{1.hypo.3}
With notation $P_0$ and $A_0^{-1}$ introduced just above Remark~\ref{1.remark.asumm.2},
\begin{enumerate}\romanenumi
\item \label{list2.8.1} there holds $P_0 \dptl_j \underline A(0,x_0) P_0 = 0,$ for all $j \in \{0, \dots, d\}.$

\emph{Under condition~\eqref{list2.8.1}, the matrix
\begin{equation}\label{1.local.assum.2}
P_0 \dptl_i \underline A A_0^{-1} \dptl_j \underline A P_0 + P_0 \dptl_j \underline A A_0^{-1} \dptl_i \underline A P_0 + P_0 \dptl_{ij}^2 \underline A P_0
\end{equation}
(where derivatives of $\underline A$ are evaluated at $(0,x_0)$) has only non-zero eigenvalue (see \cite{kato2013perturbation}, or~\cite[Proposition~2.6]{texier2004short}), which we denote $\mu_{ij}$.}
\item \label{list2.8.2}The matrix $({\Imm} \, \mu_{ij})_{0\leq i,j\leq d}$ is negative definite.
\end{enumerate}
\end{hypo}

\begin{rema}\label{1.remark.asum.3}
Under Assumption~\ref{1.hypo.2}, Assumption~\ref{1.hypo.3} implies (see~\cite{kato2013perturbation}, or~\cite[Proposition~2.6]{texier2004short}) that the Hessian of ${\Imm} \, \lambda$ at $(0,x_0)$ is negative definite, hence $(0,x_0)$ is a local maximum, in space-time, for ${\Imm} \, \lambda.$
\end{rema}

\begin{hypo}\label{1.hypo.4}
We assume that $f(t,x,u)$ is quadratic in $u$ locally around $u=\vec{u}_0$, that is
\begin{equation}\label{1.hypo.f}
\dptl_{u} f(t,x,u) \big|_{u=\vec{u}_0=0} \equiv 0
\end{equation}
\end{hypo}



\subsection{Statement of the results}\label{1.subsection.result}

In the statement below we use notations introduced in Definitions~\ref{1.def.gevrey} and~\ref{1.defi.conical}.

\begin{theo}\label{1.theorem.2}
Under Assumptions~\ref{1.hypo.1} and~\ref{1.hypo.4}, the Cauchy problem~\eqref{1.Cauchy} is not H\"older well-posed in Gevrey spaces $G^{\sigma}$ for all $\sigma \in(0,1/(m+1))$ where $m$ is the algebraic multiplicity of $\lambda_0$. That is for all $r_0>0$, $c>0$ and $\alf\in(0,1]$, there are sequences $R_{\e}^{-1} \to 0$ and $\rho_{\e}^{-1} \to 0$, a family of initial conditions $h_{\e}\in G_{c}^{\sigma}\left(B_{r_0}(x_0)\right)$ and corresponding solutions $u_{\e}$ of the Cauchy problem on domains $\Omega_{R_{\e},\rho_{\e}}(x_0)$ such~that
\begin{equation}
\lim_{\e\to 0} \|u_{\e}\|_{L^2(\Omega_{R_{\e},\rho_{\e}}(x_0))} / \|h_{\e}\|_{\sigma,c,B_{r_0}(x_0)}^{\alf} = +\infty.
\end{equation}


The time of existence of the solutions $u_{\e}$ is at least of order $\e^{1-\sigma} $.
\end{theo}

We prove the instability for a larger band of Gevrey indices under stronger assumptions. First, the semisimplicity and non-coalescing Assumption~\ref{1.hypo.2} allows for a critical index equal to $1/2$:

\begin{theo}\label{1.theorem.3}
Under Assumptions~\ref{1.hypo.2} and~\ref{1.hypo.4}, the result of Theorem~\ref{1.theorem.2} holds for any Gevrey index $\sigma$ in $(0,1/2)$.
\end{theo}

Second, under Assumption~\ref{1.hypo.2}, the null condition~\eqref{list2.8.1} and the sign condition~\eqref{list2.8.2} in Assumption~\ref{1.hypo.3} allow for the critical index to go from $1/2$ up to $2/3$:

\begin{theo}\label{1.theorem.4}
Under Assumptions~\ref{1.hypo.2}, \ref{1.hypo.3} and~\ref{1.hypo.4}, the result of Theorem~\ref{1.theorem.2} holds for any Gevrey index $\sigma$ in $(0,2/3)$.
\end{theo}

The rest of the paper is devoted to the proof of Theorems~\ref{1.theorem.2}, \ref{1.theorem.3} and~\ref{1.theorem.4}.

\begin{rema}\label{1.remark.aprestheo3}

Higher-order null and sign conditions allow for a greater critical index. Precisely, under Assumption~\ref{1.hypo.2}, if $(0,x_0)$ is a local maximum for $ {\Imm}\lambda $, and if there holds $ \lambda(\e s,x_0) - \lambda(0,x_0) = O(\e s)^{2k-1} $, then our proof implies ill-posedness with a critical Gevrey index equal to $2k/(2k+1)$. These null and sign conditions can be expressed in terms of derivatives of $\und{A}$, the partial inverse $A_0^{-1}$ and the projector $P_0$, see~\cite{kato2013perturbation}, or ~\cite[Remark~2.7]{texier2004short}. See also Remark~\ref{1.remark.amelioration}.
\end{rema}

\section{Highly oscillating solutions and reduction to a fixed point equation}\label{1.section.ansatz}



\subsection{Preparation of the equation}



We want to compare two solutions of~\eqref{1.Cauchy} with initial data $h_1$ and $h_2$, the first one satisfying
\[
h_1(x=0) = 0
\]
to fit with $\vec{u}_0 = 0$ in~\eqref{1.etc}. We can choose $h_1$ analytic, which lead by Cauchy--Kovalevskaya theorem to an analytic solution $u_1$ in some small neighborhood of $(0,0)\in\R_{t}\times\R_{x}^{d}$. Then changing $u$ into $u-u_1$ in~\eqref{1.Cauchy} we get a new Cauchy problem
\begin{equation}\label{1.Cauchy.bis}
\dptl_{t}u = \sum_{j}A_{j}(t,x,u)\dptl_{x_j}{u} + F(t,x,u)u \,, \quad u(0,x) = h(x)
\end{equation}
with $F(t,x,u)\in\R^{N\times N}$ is also analytic, by analyticity of $f$ and $u_1$. We consider for $h$ small analytical functions as perturbations of the trivial datum $h\equiv 0$.



\subsection{Highly oscillating solutions}



As in~\cite{metivier2005remarks} we look for high oscillating solutions of~\eqref{1.Cauchy.bis} with the aim of seeing the expected growth. In this view we posit the following ansatz
\begin{equation}\label{1.ansatz}
u_{\e}(t,x) = \e \ubf(t/\e,x,x\cdot\xi/\e)
\end{equation}
where the function $\ubf(s,x,\theta)$ is $2\pi$-periodic in $\theta$. We introduce for any analytical function $H(t,x,u)$ the compact notation
\begin{equation}\label{1.def.mathbf.H}
\Hbf(s,x,\ubf) = H\left(\e s,x,\e\ubf\right).
\end{equation}


For $u_{\e}(t,x)$ to be solution of~\eqref{1.Cauchy.bis} it is then sufficient that $\ubf(s,x,\theta)$ solves the following equation
\begin{equation}\label{1.ds.u.passager}
\dptl_{s} \ubf = {\mathbf A}\,\dptl_{\theta} \ubf + \e\left(\sum_j {\mathbf A}_j \dptl_{x_j} \ubf + {\mathbf F} \, \ubf\right)
\end{equation}
where we use the notation~\eqref{1.def.mathbf.H} for the $\mathbf{A}_j$ and $\mathbf{F}$, and $A$ is defined by~\eqref{1.def.A}.

As we focus our study in a neighborhood of the distinguished point $(0,0)\in\R_{t}\times\R^{d}_{x}$ (recall that $x_0 = 0$), we rewrite now~\eqref{1.ds.u.passager} as
\begin{equation}\label{1.equation.u.compact}
\dptl_{s} \ubf - \und{{\mathbf A}} \dptl_{\theta} \ubf = {\mathbf G}(s,x,\ubf)
\end{equation}
where $\und{{\mathbf A}}(s,x) = \und{A}(\e s,x)$ in accordance with notation~\eqref{1.def.mathbf.H}. We define the source~term
\begin{equation}\label{1.def.G}
{\mathbf G} = \left({\mathbf A} - \und{{\mathbf A}} \right)\,\dptl_{\theta} \ubf + \e\left(\sum_j {\mathbf A}_j \dptl_{x_j} \ubf + {\mathbf F} \, \ubf\right)
\end{equation}
using the notation~\eqref{1.def.mathbf.H}.



\subsection{Upper bounds for the propagator}



To solve the Cauchy problem of the equation~\eqref{1.equation.u.compact} with initial datum $h_{\e}$ specified in Section~\ref{1.subsection.free.solution}, we first study the case ${\mathbf G} \equiv 0$, that is
\begin{equation}\label{1.equation.free}
\dptl_{s} \ubf(s,x,\theta) - \und{{\mathbf A}}(s,x) \dptl_{\theta} \ubf(s,x,\theta) = 0.
\end{equation}


Note that this equation is linear, non autonomous and non scalar. We define the matrix propagator $U(s',s,x,\theta)$ as the solution of
\begin{equation}\label{1.equation.propagator}
\dptl_{s} U(s',s,x,\theta) - \und{{\mathbf A}}(s,x) \dptl_{\theta} U(s',s,x,\theta) = 0 \,, \quad U(s',s',x,\theta) \equiv {\Id}.
\end{equation}
and $U(s',s,x,\theta)$ is periodic in $\theta$, following the ansatz~\eqref{1.ansatz}.

\begin{lemm}[Growth of the propagator]\label{1.lemma.growth.propa}
The matrix propagator $U(s',s,x,\theta)$ satisfies the following growth of its Fourier modes in the $\theta$ variable:
\begin{multline}\label{1.growth.propa.n}
|U_n(s',s,x)|\\ 
\lesssim \omega^{-(m-1)}\,\exp\left(\int_{s'}^{s} \g^{\sharp}(\tau \,;R,\omega) d\tau \, |n|\right)\,, \quad \forall \, 0 \leq s' \leq s \text{ and } \forall \, n\in\Z
\end{multline}
where $R^{-1}$ stands for the spatial radius on which the bound holds.
\begin{itemize}
\item Under Assumption~\ref{1.hypo.1}, bound~\eqref{1.growth.propa.n} holds with
\begin{equation}\label{1.bound.g.sharp}
\g^{\sharp}(\tau \,;R,\omega) = \g_0 + \e \tau + R^{-1} + \omega
\end{equation}
 where $\g_0$ is defined in Notation~\ref{1.notation.elli}, $m \geq 1$ is the algebraic multiplicity of $\lambda_0$. The bounds hold for $\omega >0$ small enough, uniformly in $x$ in the ball $B_{R^{-1}}(0)$.

\item Under Assumption~\ref{1.hypo.2}, bound~\eqref{1.growth.propa.n} holds with $ m =1$ and
\begin{equation}\label{1.bound.g.sharp.bis}
\g^{\sharp}(\tau \,;R,\omega) = \g_0 + \e \tau + R^{-1}
\end{equation}

\noindent with $\omega = 0$, both uniformly in $x$ in the ball $B_{R^{-1}}(0)$.

\item Under Assumptions~\ref{1.hypo.2} and~\ref{1.hypo.3}, bound~\eqref{1.growth.propa.n} holds with $\omega = 1$ and
\begin{equation}\label{1.bound.g.sharp.max}
\g^{\sharp}(\tau \,;R,\omega) = \g_0
\end{equation}

\noindent The bounds hold uniformly in $x$ in the ball $B_{R^{-1}}(0)$.
\end{itemize}
\end{lemm}


In the framework of Assumption~\ref{1.hypo.1}, the parameter $\omega$ is chosen in Proposition~\ref{1.prop.below}.

\begin{proof}
As $\und{A}(t,x)$ does not depend on $\theta$, equation~\eqref{1.equation.propagator} reads in Fourier transform in $\theta$ as
\[
\dptl_{s}U_n(s',s,x) - in\,\und{A}(\e s, x) U_n(s',s,x)\,, \quad U_n(s',s,x) = {\Id}
\]
where $U_n$ is the $n^{\rm th}$ Fourier component of $U(\theta)$. That implies that operator $U(\theta)$ acts diagonally on each Fourier components.

The bounds~\eqref{1.growth.propa.n} --~\eqref{1.bound.g.sharp} follow from elementary, and purely linear-algebraic, arguments detailed in~\cite[Sections~(4.2) and (4.3)]{lerner2015onset}.

The bounds~\eqref{1.growth.propa.n} --~\eqref{1.bound.g.sharp.bis} follow from a smooth partial diagonalization of symbol~$\und{A}$ over the eigenspace associated with $\lambda$. In particular, there is no diagonalization or trigonalization error, hence $m = 1$ in~\eqref{1.growth.propa.n} and $\omega = 0$ in~\eqref{1.bound.g.sharp.bis}.

The bounds~\eqref{1.growth.propa.n} --~\eqref{1.bound.g.sharp.max} follow from a smooth partial diagonalization as described above, and the fact that the imaginary part of $\lambda$ is maximal at $(t,x) = (0,x_0),$ as described in Remark~\ref{1.remark.asum.3}.
\end{proof}



\subsection{Free solutions}\label{1.subsection.free.solution}



After getting the previous upper bounds for the propagator, we seek initial conditions $h_{\e}$ that achieve the maximal growth. For this purpose, following again~\cite{metivier2005remarks} we introduce the following high-oscillating, small and well-polarized initial data
\begin{equation}\label{1.def.initial.data}
h_{\e}(x) = \e\,e^{-M(\e)} {\Ret}\left(e^{-ix\cdot\xi_0/\e} \vec{e}_{+} + e^{ix\cdot\xi_0/\e} \vec{e}_{-}\right)
\end{equation}

\noindent which correspond in the ansatz~\eqref{1.ansatz} of high-oscillating solutions to
\begin{equation}\label{1.def.initial.data.ansatz}
\h_{\e}(x,\theta) = e^{-M(\e)} {\Ret}\left(e^{-i\theta} \vec{e}_{+} + e^{i\theta} \vec{e}_{-}\right).
\end{equation}


Here $\vec{e}_{+}$ is defined in Notation~\ref{1.notation.elli}, and $\vec{e}_{-} = \overline{\vec{e}_{+}}$. The parameter $M(\e)$ is large in the limit $\e\to 0$, chosen such that the Gevrey norm of $h_{\e}$ is small. We introduce also

\begin{equation}\label{1.free.solution}
\f_{\e}(s,x,\theta) = U(0,s,x,\theta) \h_{\e}(x,\theta)
\end{equation}
which we call the free solution of equation~\eqref{1.equation.u.compact} as it solves the equation for ${\mathbf G} \equiv 0$.


\subsubsection{Growth of the free solution}


\begin{lemm}[Growth of the free solution]\label{1.lemma.growth.free.solution}
There holds
\begin{equation}\label{1.growth.free.solution}
|\f_{\e}(s,x,\theta)| \gtrsim \omega^{-(m-1)}\,e^{-M(\e)}\exp\left(\int_{0}^{s} \g^{\flat}(\tau \,;r,\omega) d\tau \right).
\end{equation}


\begin{itemize}
\item Under Assumption~\ref{1.hypo.1}, bound~\eqref{1.growth.free.solution} holds with
\begin{equation}\label{1.bound.g.flat}
\g^{\flat}(\tau \,; r,\omega) = \g_0 - \e \tau - r - \omega,
\end{equation}
pointwise in $(s,x,\theta)\in[0,\und{s})\times B_r(x_0)\times\mathbb{T}$.

\item Under Assumption~\ref{1.hypo.2}, bound~\eqref{1.growth.free.solution} holds with $m=1$ and
\begin{equation}\label{1.bound.g.flat.bis}
\g^{\flat}(\tau \,; r,\omega) = \g_0 - \e \tau - r,
\end{equation}
with $\omega = 0$, pointwise in $(s,x,\theta)\in[0,\und{s})\times B_r(0)\times\mathbb{T}$.

\item Under Assumptions~\ref{1.hypo.2} and~\ref{1.hypo.3}, bound~\eqref{1.growth.free.solution} holds with $\omega = 1$ and
\begin{equation}\label{1.bound.g.flat.max}
\g^{\flat}(\tau \,; r,\omega) = {\Imm}\,\lambda(\e\tau,0) - r.
\end{equation}
pointwise in $(s,x,\theta)\in[0,\und{s})\times B_r(0)\times\mathbb{T}$.
\end{itemize}
\end{lemm}

\begin{proof}
Our choice of datum~\eqref{1.def.initial.data}-\eqref{1.free.solution} allows an exact localization at the distinguished frequency $\xi_0$. Similarly to the proof of Lemma~\ref{1.lemma.growth.propa}, the lower bounds follow from linear algebraic arguments detailed in~\cite{lerner2015onset}.
\end{proof}


\subsubsection{Smallness of the free solution and Gevrey index}


The size of the Gevrey-$\sigma$ norm of the initial data $h_{\e}$ is linked to the exponent $M(\e)$ as shown by the following

\begin{lemm}\label{1.size.gevrey.exp}
For any $\sigma\in(0,1)$, $c>0$ and $B$ an open set of $\R^{d}$ there holds
\begin{equation}\label{1.esti.gevrey.free}
\dsp \|h_{\e}\|_{\sigma,c,B} \lesssim \e\,\exp\left(- M(\e) + \frac{\e^{-\sigma}}{\sigma c^{\sigma}}\right).
\end{equation}


We emphasize that the constant in the previous inequality does not depend on $B$.
\end{lemm}

\begin{proof}
First we have
\[
\dsp \dptl_{x}^{k} e^{\pm ix\cdot\xi_{0}/\e} = \left(\pm i\xi_{0}/\e\right)^{k}e^{\pm ix\cdot\xi_{0}/\e} \,,\quad \forall k\in\N^{d} \,,\quad \forall x\in\R^{d}
\]
using notation~\eqref{1.notation.product} and then
\[
\dsp |\dptl_{x}^{k} e^{\pm ix\cdot\xi_{0}/\e}| \leq C_d \, \e^{-|k|} \,,\quad \forall k\in\N^{d} \,,\quad \forall x\in\R^{d}
\]
as $|\xi_{0}| = 1$, with $C_d>0$ a constant depending only of the dimension $d$. So that for any open set $B$ of $\R^{d}$ and by definition~\eqref{1.def.initial.data} of the initial data $h_{\e}$, there holds
\[
\dsp c^{-|k|}|k|!^{-1/\sigma} |\dptl_{x}^{k} h_{\e} |_{L^{\infty}(B)} \lesssim \e\, e^{-M(\e)} \,\e^{-|k|}\,c^{-|k|} |k|!^{-1/\sigma} \,,\quad \forall k\in\N^{d}.
\]


By Definition~\ref{1.def.gevrey} of the Gevrey norms, this implies
\[
\dsp \|h_{\e}\|_{\sigma,c,B} \lesssim \e e^{-M(\e)} \, \sup_{k\in\N^{d}} \e^{-|k|}\,c^{-|k|} |k|!^{-1/\sigma}.
\]


For any $t>0$ we have
\[
\frac{t^{|k|}}{|k|!} \leq e^{t}\,, \quad \forall t>0 \,, \quad \forall k\in\N^{d}
\]
and note that the loss is smaller as $|k|$ is larger. This leads to
\[
\|h_{\e}\|_{\sigma,c,B} \lesssim \e e^{-M(\e)} \, \sup_{k\in\N^{d}} \e^{-|k|}\,c^{-|k|} \left(t^{|k|}e^{-t}\right)^{-1/\sigma}
\]
and then by putting $t = \e^{-\sigma}c^{-\sigma}$ into this last inequality, we finally obtain the inequality~\eqref{1.esti.gevrey.free}.
\end{proof}


As we need $h_{\e}$ to be small both in Gevrey-$\sigma$ norm and in amplitude, we posit
\begin{equation}\label{1.size.gevrey}
M(\e) = \e^{-\delta}, \quad \delta \in(\sigma,1).
\end{equation}

\begin{rema}\label{1.remark.size.gevrey}
With the previous definition~\eqref{1.size.gevrey}, the initial data $h_{\e}$ is exponentially small, both in Gevrey-$\sigma$ norm and in absolute value. This last point is of importance, as we need $h_{\e}$ to be small enough to see the exponential growth of the solution it generates in a sufficiently long time $T(\e)$ to be defined later. A constraint on this final time will lead to a constraint on the size $e^{-M(\e)}$ of $h_{\e}$, and then to the constraint $\sigma < \delta$ (see~\eqref{1.size.gevrey}) bearing on the admissible Gevrey regularity.
\end{rema}



\subsection{Fixed point equation}

Using the propagator $U(s',s,\theta)$, the free solution~\eqref{1.free.solution} and the Duhamel formula, we can express now~\eqref{1.equation.u.compact} as the fixed point equation
\begin{equation}\label{1.fixed.point.equation}
\ubf(s,x,\theta) = \f_{\e}(s,x,\theta) + \int_{0}^{s} U(s',s,x,\theta) {\mathbf G}(s',\ubf(s',x,\theta)) ds'
\end{equation}
where ${\mathbf G}(\ubf)$ is defined by~\eqref{1.def.G}. We denote the integral term
\begin{equation}\label{1.defi.T}
T(s,\ubf) = \int_{0}^{s} U(s',s) {\mathbf G}(s',\ubf(s')) ds'
\end{equation}
which we split into three parts thanks to definition~\eqref{1.def.G} like
\begin{equation}\label{1.defi.T.decoupage}
\begin{aligned}
T(s,\ubf) & = \int_{0}^{s} U(s',s) \left[\left({\mathbf A} - \und{{\mathbf A}} \right)\,\dptl_{\theta} \ubf + \e\bigg(\sum_j {\mathbf A}_j \dptl_{x_j} \ubf + {\mathbf F} \, \ubf\bigg) \right] ds' \\
& = T^{[\theta]}(s,\ubf) + T^{[x]}(s,\ubf) + T^{[\ubf]}(s,\ubf)
\end{aligned}
\end{equation}
where we define
\begin{align}
T^{[\theta]}(s,\ubf) & = \int_{0}^{s} U(s',s) \,\left({\mathbf A} - \und{{\mathbf A}} \right) \,\dptl_{\theta} \ubf(s') ds' \label{1.def.T_theta}
\\
T^{[x]}(s,\ubf) & = \int_{0}^{s} U(s',s)\, \sum_j\big(\e\A_j(s',\ubf(s'))\big) \dptl_{x_j} \ubf(s') ds' \label{1.def.T_x}
\\
T^{[\ubf]}(s,\ubf) & = \int_{0}^{s} U(s',s) \, \big(\e \F(s',\ubf(s'))\big) \, \ubf(s') ds' \label{1.def.T_u}
.
\end{align}



\subsection{Sketch of the proof}\label{1.subsection.sketch}

We have now reduced the initial question of finding a family of initial data $h_{\e}$ generating a family of appropriately growing analytic solutions $u_{\e}$ to the fixed point equation~\eqref{1.fixed.point.equation} for operator $T$. To find smooth solutions of this equation we have first to find a suitable functional space $\EE$ with the following properties:
\begin{itemize}

\item The space $\EE$ should be a Banach space to make use of the Banach fixed point theorem. Moreover functions of $\EE$ should be smooth functions in variables $(s,x,\theta)$.

\item The space $\EE$ should be a Banach algebra equipped with norm $\VERT{\cdot}$ satisfying $ \VERT{\ubf}\, \VERT{\vbf} \leq \VERT{\ubf}\,\VERT{\vbf} $ as we deal with non linear terms ${\mathbf G}(\ubf)$.

\item We will need to precisely evaluate the action of derivation operators $\dptl_{x_j}$ and $\dptl_{\theta}$ on $\EE$. In an analytical framework, these are a priori not bounded operators, and as in~\cite{metivier2005remarks,ukai2001boltzmann} we should use time integration to get back boundedness in $\EE$ with some loss in the bounds we should quantify.

\item The space $\EE$ should be invariant by the flow $U(s',s,x,\theta)$. In this view, we need estimates in $\EE$ for the matrix flow $U_n(s',s,x)$.

\item The operator $T$ should be a contraction on $\EE$ for well chosen parameters, and for small $\e$.
\end{itemize}

\noindent To this end, Section~\ref{1.section.spaces} will present the satisfying functional setting, and Section~\ref{1.section.contraction} will prove the contraction estimate for $T$.

In order to prove the Hadamard instability, the existence of solutions to the fixed point equations~\eqref{1.fixed.point.equation} is not sufficient. The key of the proof is to obtain for the solution $\ubf$ associated to $\f_{\e}$ the same kind of growth as $\f_{\e}$, as developed in Section~\ref{1.intro.endgame}, and this is the aim of Section~\ref{1.section.existence}. Finally, such a growth for $\ubf$ leads to the Hadamard instability of the Cauchy problem~\eqref{1.Cauchy.bis}. This completes the proof of Theorems~\ref{1.theorem.2}, \ref{1.theorem.3} and~\ref{1.theorem.4} in Section~\ref{1.section.Hadamard}.

\section{Majoring series and functional spaces}\label{1.section.spaces}



\subsection{Properties of majoring series}\label{1.subsection.majoring}

One aim of the paper is to construct a family of analytical solutions of the fixed point equation~\eqref{1.fixed.point.equation}. We deal with functions of several variables: $x$, $(s,x)$ or $(s,x,u)$, and the question of analyticity of these functions with respect to all variables or only to some arises. In that purpose we consider formal series of $\mu$ variables, with complex coefficients that depend eventually on a parameter $y$ in some open domain $\mathcal{O}$ of $\Cbb^{\mu'}$. We denote such formal series
\[
\phi(z,y) = \sum_{k\in\N^{\mu}} \phi_{k}(y) z^{k} \,, \quad \phi_{k}(y) \in \Cbb \,, \quad \forall \, k\in\N^{\mu}\,, \; \forall \, y\in\mathcal{O}
\]
where we introduce formal unknowns $z = (z_1,\ldots,z_{\mu})$. A formal series $\phi(z,y)$ is really a $y$-dependent sequence $\left(\phi_k(y)\right)_k$ indexed by $k\in\N^{\mu}$. An important parameter is the dimension $\mu$ of the indices $k$. We define now the relation of majoring series between two formal series $\phi(z,y)$ and $\psi(Z,y)$, with $z$ and $Z$ denoting $\mu$ variables.

\begin{defi}[Majoring series]\label{1.definition.maj}
For $\phi(z,y)$ and $\psi(Z,y)$ formal series of respectively variable $z$ and variable $Z$, and $y$ a parameter in some open domain $\mathcal{O}$ of $\Cbb^{\mu'}$, with furthermore
\[
\psi(Z,y) = \sum_{k\in\N^{\mu}} \psi_k(y) Z^{k} \quad \text{with} \quad \psi_k(y) \geq 0 \quad \forall k\in\N^{\mu} \,,\; \forall y\in\mathcal{O}
\]
we define
\begin{equation}\label{1.def.majoring.series}
\phi(z,y) \prec_{y} \psi(Z,y) \quad \Longleftrightarrow \quad \Big(\forall k\in\N^{\mu}\,,\; \forall y\in\mathcal{O} : \; |\phi_k(y)| \leq \psi_k(y) \Big)
\end{equation}
\end{defi}

\begin{rema}
In notation $\prec_{y}$ we emphasize that we consider $y$ as a parameter in the formal series $\phi(z,y)$.
\end{rema}


In the following we sum up several classical properties of the relation~\eqref{1.def.majoring.series} (see \cite{cartan1995theorie}).

\begin{lemm}\label{1.majoring.properties}
Let $\phi$ and $\psi$ be as in the previous definition, with $\phi \prec_{y} \psi$. Then
\begin{enumerate}
\item If $\psi$ converges at a point $(Z,y)$ with $Z_i \geq 0$ for all $i=1,\ldots,m$, then $\phi$ converges on all $(z,y)$ such that $|z_i|\leq Z_i$, and
\begin{equation} 
|\phi(z_1,\ldots,z_{\mu},y)| \leq \psi(|z_1|,\ldots,|z_{\mu}|,y)
\end{equation}

\item The relation $\prec_{y}$ is compatible with formal derivations: denoting $\dptl_{i}$ the formal derivation along the $i^{\rm th}$ variable, we have
\begin{equation}
\phi \prec_{y} \psi \quad \Longrightarrow \quad \dptl_{i}\phi(z,y) \prec_{y} \dptl_{i}\psi(Z,y)
\end{equation}

\item The relation $\prec_{y}$ is compatible with multiplication:
\begin{equation}
\phi_1 \prec_{y} \psi_1 \text{ and } \phi_2 \prec_{y} \psi_2 \quad \Longrightarrow \quad \phi_1 \phi_2 \prec_{y} \psi_1\psi_2
\end{equation}

\item There is a constant $c_0>0$ such that the series
\begin{equation}\label{1.defi.Phi}
\Phi(z_1) = \sum_{k \geq 0} \frac{\dsp c_0}{\dsp k^2+1} z_1^{k}
\end{equation}
satisfies
\begin{equation}\label{1.Phi2}
\Phi^2 \prec \Phi
\end{equation}

The series $\Phi$ is analytic on $B_{1}(0)$, defined in~\eqref{1.notation.ball}.
\end{enumerate}
\end{lemm}



\begin{proof}
We give here a short proof of the Lemma~\ref{1.majoring.properties}.
\begin{enumerate}

\item Assume that $\psi(Z,y)$ is converging at a point $(Z,y)$, with all $Z_i \geq 0$. By definition of the majoring series, we have for all $k\in\N^{\mu}$ the inequality $ |\phi_{k}(y)| \leq \psi_{k}(y) $. Since the series $\sum_{k} \psi_{k}(y)Z^{k}$ is convergent, then for all $z\in\Cbb^{\mu}$ such that $|z_i| \leq Z_i$ the series $\sum_{k} \phi_{k}(y) z^{k}$ converges and there holds by~\eqref{1.notation.product} and Definition~\ref{1.definition.maj}
\begin{align*}
\left| \sum_{k\in\N^{\mu}} \phi_{k}(y) z^{k} \right| & \leq \sum_{k\in\N^{\mu}} |\phi_{k}(y)| \prod |z_j|^{k_j} \\
& \leq \sum_{k\in\N^{\mu}} \psi_{k}(y) \prod |z_j|^{k_j} \\
& = \psi(|z_1|,\ldots,|z_{\mu}|,y)
\end{align*}
Hence the importance of using two different notations for the $\mu$ variables,\linebreak$z$ and $Z$.

\item By definition of formal derivation $\dptl_{i}$, there holds
\[
\dptl_{i}\phi(z,y) = \sum_{k\in\N^{\mu}} (k_i+1) \phi_{k+1_{i}}(y) z^{k}
\]
where $1_{i}$ is defined by~\eqref{1.notation.1i} and for all $k\in\N^{\mu}$ there holds
\[
|(k_i+1) \phi_{k+1_{i}}(y)| \leq (k_i+1) \psi_{k+1_{i}}(y)
\]
by Definition~\ref{1.definition.maj}, which is exactly the $k^{th}$ coefficient of the formal series $\dptl_{i}\psi(Z,y)$.

\item Let $\phi^1$, $\phi^2$, $\psi^1$ and $\psi^2$ be such that $ \phi^1 \prec_{y} \psi^1$ and $ \phi^2 \prec_{y} \psi^2 $. By definition of the multiplication of two formal series, the coefficients of the formal series $\phi^1\phi^2(z,y)$ in $z$ are
\[
(\phi^1\phi^2)_{k}(y) = \sum_{p=0}^{k} \phi^1_{p}(y)\phi^2_{k-p}(y)
\]
and then for all $y\in\mathcal{O}$ and $k\in\N^{\mu}$ there holds
\begin{align*}
\left|(\phi^1\phi^2)_{k}(y)\right| & \leq \sum_{p=0}^{k} \left|\phi^1_{p}(y)\right| \,\left|\phi^2_{k-p}(y)\right| \\
& \leq \sum_{p=0}^{k} \psi^1_{p}(y) \,\psi^2_{k-p}(y)
\end{align*}
because $\phi^{1} \prec_{y} \psi^{1}$ and $\phi^2 \prec_y \psi^2$. As the right-hand side of the previous inequality is just $(\psi^1\psi^2)_k(y)$, this ends the proof.

\item For $\mu=1$ and $\mu'=0$, we consider the series
\[
\Phi(z) = \sum_{k\in\N} \frac{\dsp c_0}{\dsp k^2+1} z^{k}
\]
We compute
\[
\Phi^2(z) = \sum_{k\in\N} \sum_{p=0}^{k} \frac{\dsp c_0}{\dsp p^2+1}\frac{\dsp c_0}{\dsp (k-p)^2+1} z^{k}.
\]


To prove the existence of some $c_0 >0$ such that~\eqref{1.Phi2} holds, it suffices to prove that
\[
\sum_{p=0}^{k} \frac{\dsp k^2+1}{\dsp (p^2+1)((k-p)^2+1) }
\]
 is bounded for all $k\in\N$. Thanks to $k^2 \leq 2(p^2 + (k-p)^2) $ there holds
\[
\sum_{p=0}^{k} \frac{\dsp k^2+1}{\dsp (p^2+1)((k-p)^2+1) } \leq 4 \sum_{p=0}^{k} \frac{\dsp 1}{\dsp p^2+1} \leq 4\sum_{p\in\N} \frac{\dsp 1}{\dsp p^2+1}
\]
which suffices to end the proof of Lemma~\ref{1.majoring.properties}.\qedhere
\end{enumerate}
\end{proof}


After these abstract considerations we come back to series in the spatial variable $x$, where $t$ a parameter. The principle behind the relation of majoring series is to replace unknown analytical functions by a fixed, well-known series. In this view we consider the series in $d$ variables $(X_1, \ldots, X_d)$, with $t\in[0,\rho^{-1})$ a parameter and $R$ and $\rho$ some positive constants
\begin{equation}\label{1.def.Phi}
\Phi(RX_1+\cdots+RX_d+\rho t) = \sum_{k\in\N^{d}} \left(R^{|k|}\sum_{p\in\N} \frac{c_0}{(|k|+p)^2+1} \binom{|k|+p}{k,p} \rho^p t^p \right) X^{k}
\end{equation}
using the notations~\eqref{1.notation.product} for $X^{k}$ and~\eqref{1.notation.binom} for $\binom{|k|+p}{k,p}$. We denote
\begin{equation}\label{1.def.Phi_k.t}
\Phi_{k}(t) = R^{|k|}\sum_{p\in\N} \frac{c_0}{(|k|+p)^2+1} \binom{|k|+p}{k,p} \rho^p t^p\,,\quad\forall k\in\N^{d}
\end{equation}
where it is implicit that $\Phi_{k}(t)$ depend also on $R$ and $\rho$. Note that the series in the right hand side of~\eqref{1.def.Phi_k.t} is convergent for $|t| <\rho^{-1}$. Since the series $\Phi(z)$ converges in $B_1(z=0)$, the series $\Phi(RX_1+\cdots+RX_d+\rho t)$ is convergent as a series in $X$ and $t$ variables on $\Omega_{R,\rho}(0)$ defined by~\eqref{1.defi.Omega}.

From now on, we will note for convenience and with an abuse of notation
\begin{equation}
\Phi(RX+\rho t) = \Phi(RX_1+\cdots+RX_d+\rho t)
\end{equation}
as the reference series in the $x$ variable, for some positive constants $R$ and $\rho$. In the following Lemma~\ref{1.lemma.phi.prec.Phi} we sum up properties for formal series $\phi$ in $d$ variables with one parameter $t$ that satisfy
\[
\phi(x,t) \prec_{t} C \Phi(R X + \rho t)
\]
for some $C>0$. This is equivalent, thanks to~\eqref{1.def.majoring.series}, \eqref{1.def.Phi} and~\eqref{1.def.Phi_k.t} to
\begin{equation}\label{1.def.maj}
|\phi_{k}(t)| \leq C \Phi_{k}(t)\,, \quad \forall k \in\N^{d} \, \text{ and }\; 0\leq t < \rho^{-1}.
\end{equation}

\begin{lemm}\label{1.lemma.phi.prec.Phi}
For $\phi(x,t)$ a formal series in $x$ with $ \phi(x,t) \prec_{t} C\Phi(R X + \rho t) $ there holds
\begin{enumerate}
\item \label{lem4.4.1}$\phi(x,t)$ is analytic as a series in $x$ in the domain $\Omega_{R,\rho,t}(0)$ for all $0\leq t < \rho^{-1}$.

\item \label{lem4.4.2}For all $0\leq t < \rho^{-1}$, there holds
\begin{equation}\label{1.dx.Phi}
\dptl_{x_j} \phi(x,t) \prec_{t} CR \Phi'(R X + \rho t)
\end{equation}
with $\Phi'$ the derivative of $\Phi$.

\item \label{lem4.4.3}For any $R \geq R_0$ and $\rho \geq \rho_0$, there holds
\begin{equation}\label{1.R_0.R}
\Phi(R_0X+\rho_0 t) \prec_{R_0,\rho_0,R,\rho,t} \Phi(RX+\rho t).
\end{equation}

\item \label{lem4.4.4}For any $R>0$, $\rho>0$ and $0 \leq t' < t < \rho^{-1}$, there holds
\begin{equation}\label{1.t'.t}
\Phi(RX + \rho t') \prec_{t',t} \Phi(RX + \rho t).
\end{equation}
\end{enumerate}
\end{lemm}

\begin{proofc}
\begin{enumerate}
\item[\eqref{lem4.4.1}] By the first property of Lemma~\ref{1.majoring.properties}, the formal series $\phi(x,t)$ is analytic in $x$ on the domain of convergence of the series $\Phi(RX + \rho t)$ thought as a series in $X$ variable. As it is just $\Omega_{R,\rho,t}(0)$, defined by~\eqref{1.defi.Omega.t}, the function $\phi(x,t)$ is analytic on $\Omega_{R,\rho,t}(0)$ as a series in the $x$ variable for all $0\leq t < \rho^{-1}$.

\item[\eqref{lem4.4.2}] By the second property of Lemma~\ref{1.majoring.properties} there holds
\[
 \dptl_{x_j}\phi(x,t) \prec_{t} C\dptl_{X_j} \left(\Phi(RX+\rho t)\right)
\] 
and as
\[
\dptl_{X_j} \left(\Phi(RX+\rho t)\right) = \dptl_{X_j} \left(\Phi(RX_1 + \cdots + RX_{d} +\rho t)\right) = R\Phi'(RX+\rho t)
\]
for all $0\leq t < \rho^{-1}$, we finally get~\eqref{1.dx.Phi}.

\item[\eqref{lem4.4.3}] Thanks to notation~\eqref{1.def.Phi_k.t} we have $\Phi(RX + \rho t) = \sum_{k\in\N^{d}} \Phi_{k}(t) X^{k} $ for all\linebreak$0 \leq t < \rho^{-1}$, where we recall it is implicit that the coefficients $\Phi_{k}(t)\linebreak= \Phi_{k}(t,R,\rho)$ depend also on $R$ and $\rho$. In the definition~\eqref{1.def.Phi_k.t} we easily see~that
\[
\Phi_{k}(t,R_0,\rho_0) \leq \Phi_{k}(t,R,\rho)\,, \quad \forall R\geq R_0,\; \forall \rho \geq \rho_0,\; \forall 0 \leq t < \rho^{-1}
\]
which is exactly~\eqref{1.R_0.R}.

\item[\eqref{lem4.4.4}] In the same way we see that, $ R$ and $\rho$ being fixed, the coefficients $\Phi_{k}(t)$ are increasing functions of $t$:
\[
\Phi_{k}(t') \leq \Phi_{k}(t) \quad \forall k\in\N^{d}, \; \forall 0 \leq t' < t < \rho^{-1}
\]
which is exactly~\eqref{1.t'.t}.
\end{enumerate}
\end{proofc}

The first property of the previous Lemma~\ref{1.lemma.phi.prec.Phi} indicates that series controlled by $\Phi$ are analytic. Conversely the following Lemma~\ref{1.lemma.H} proves that analytic functions are controlled by appropriate series:

\begin{lemm}\label{1.lemma.H}
Let $H(t,x,u)$ an analytic function in the neighborhood of\linebreak$(0,0,0)\in\R\times\R^{d}\times\R^{N}$. Then there are some positive constants $C_{H}$, $R_H$, $\rho_H$ and $a_H$ such that
\begin{equation}\label{1.prec.H}
H(t,x,u) \prec C_H \Phi(R_H X + \rho_H t) \prod_{j=1}^{N}\frac{\dsp 1}{1 - a_H u_j}
\end{equation}
\end{lemm}

\begin{proof}
Formally we write
\[
H(t,x,u) = \sum_{k_1,k_2,k_3} H_{k_1,k_2,k_3} \,t^{k_1} \,x^{k_2} \, u^{k_3}
\]
with $k_1\in\N$, $k_2\in\N^{d}$ and $k_3\in\N^{N}$. By the Cauchy relations for $H$, we know there are some positive constants $C$, $r_1$, $r_2$ and $r_3$ depending only on $H$ such that
\[
|H_{k_1,k_2,k_3}| \leq C\frac{\dsp 1}{\dsp r_1^{k_1}r_2^{|k_2|}r_3^{|k_3|}} \,, \quad \forall \,(k_1, k_2,k_3)\in\N \times\N^{d} \times \N^{N}.
\]
We compare $ |H_{k_1,k_2,k_3}| $ to the coefficients of the series $\Phi(R_HX + \rho_H t)\prod \left(1 - a_H u_j\right)^{-1}$:
\begin{multline*}
\Phi(R_HX + \rho_H t) \prod_{j=1}^{N}\frac{\dsp 1}{1 - a_Hu_j} \\
\begin{aligned}
& = \sum_{p\in\N} \frac{c_0}{p^2+1} \left(R_HX+\rho_Ht\right)^{p} \, \sum_{q\in\N^{N}} a_H^{|q|}u^{q} \\
& = \sum_{p} \sum_{k_1 + |k_2| = p} \frac{c_0}{p^2+1} \binom{p}{k_1,k_2} (\rho_H t)^{k_1} (R_HX)^{k_2} \, \sum_{q} a_H^{|q|}u^{q} \\
& = \sum_{k_1,k_2,k_3} \frac{c_0}{(k_1 + |k_2|)^2+1} \binom{k_1 + |k_2|}{k_1,k_2} \rho_H^{k_1} R_H^{|k_2|} a_H^{|k_3|} \, t^{k_1} X^{k_2} u^{k_3}
\end{aligned}
\end{multline*}


Then we have for all $(k_1, k_2,k_3)\in\N \times\N^{d} \times \N^{N}$ we have
\begin{align*}
|H_{k_1,k_2,k_3}| & \leq C\frac{\dsp 1}{\dsp r_1^{k_1}r_2^{|k_2|}r_3^{|k_3|}} \\
& \leq \frac{C}{c_0} \frac{\dsp (k_1+|k_2|)^2 + 1}{\dsp (\rho_Hr_1)^{k_1} (R_Hr_2)^{|k_2|} (a_Hr_3)^{|k_3|}} \, \frac{c_0}{(k_1+|k_2|)^2+1} \binom{k_1+|k_2|}{k_1,k_2} \rho_H^{k_1} R_H^{|k_2|} a_H^{|k_3|}
\end{align*}
thanks to $\binom{k_1+|k_2|}{k_1,k_2} \geq 1$ for all $k_1,\,k_2$. By choosing $R_H$, $\rho_H$ and $a_H$ such that $\rho_Hr_1$, $R_Hr_2$ and $a_Hr_3$ are larger than $1$, the term
\[
\frac{\dsp (k_1+|k_2|)^2 + 1}{\dsp (\rho_Hr_1)^{k_1} (R_Hr_2)^{|k_2|} (a_Hr_3)^{|k_3|}}
\]
is bounded for all $(k_1, k_2,k_3)\in\N \times\N^{d} \times \N^{N}$. Then there is a constant $C_H>0$ depending only on $H$, $R_H$, $\rho_H$ and $a_H$ such that for all $(k_1, k_2,k_3)\in\N \times\N^{d} \times \N^{N}$ there holds
\[
|H_{k_1,k_2,k_3}| \leq C_H \frac{c_0}{(k_1 + |k_2|)^2+1} \binom{k_1 + |k_2|}{k_1,k_2} \rho_H^{k_1} R_H^{|k_2|} a_H^{|k_3|}
\]
which implies
\[
H(x,t,u) \prec C_H \Phi(R_H X + \rho_H t) \prod_{j=1}^{N}\frac{\dsp 1}{1 - a_H u_j}.\qedhere
\]
\end{proof}

\begin{lemm}\label{1.lemma.c_1}
There is $c_1>0$ such that
\begin{equation}\label{1.definition.c_1}
\sum_{p\in\mathbb{Z}} \frac{\displaystyle c_1}{\displaystyle p^2+1} \frac{\displaystyle c_1}{\displaystyle (n-p)^2+1} \leq \frac{\displaystyle c_1}{\displaystyle n^2+1}
\end{equation}
\end{lemm}

\begin{proof}
In the same way of the proof of the third point of Lemma~\ref{1.majoring.properties}, there holds
\[
\sum_{p\in\mathbb{Z}} \frac{\displaystyle n^2+1}{\displaystyle (p^2+1)((n-p)^2+1)} \leq \sum_{p\in\mathbb{Z}} \frac{\displaystyle 2(p^2+1 + (n-p)^2 +1)}{\displaystyle (p^2+1)((n-p)^2+1)} \leq 4 \sum_{p\in\Z} \frac{\dsp 1}{\dsp p^2+1}
\]
which suffices to end the proof of Lemma~\ref{1.lemma.c_1}.
\end{proof}



\subsection{Definitions of functional spaces}



\subsubsection{Fixed time spaces \texorpdfstring{$\EE_{s}$}{EEs}}


We consider trigonometric series in one variable $\theta$ with coefficients in the space of formal series in $d$ variables $x$ in the sense of Section~\ref{1.subsection.majoring}, and we denote $F_{d+1}$ the space of all such trigonometric series:
\[
F_{d+1} = \left\{ \vbf(x,\theta) = \sum_{n\in\mathbb{Z}} \vbf_n(x) e^{in\theta} \;\Big|\; \vbf_n(x) = \sum_{k\in\N^{d}} \vbf_{n,k} x^{k} \right\}.
\]

\begin{defi}[Fixed time spaces $\EE_{s}$]\label{1.definition.EE_s}
Given $s\in[0,(\e \rho)^{-1})$, $R>0$, $\rho >0$, $M'>0$ and $\beta \in (0,1)$, we denote $\EE_{s} = \EE_{s}(R,\rho,M',\beta)$ the space of trigonometric series $\vbf\in F_{d+1}$ such that for some constant $C>0$ there holds
\begin{equation}\label{1.defi.EEs}
\vbf_n(x) \prec C\frac{\displaystyle c_1}{\displaystyle n^2+1}\exp\Big({}-\big(M' - \int_{0}^{s} \g(\tau)d\tau \big)\left< n \right>\Big) \Phi\left(R X+\e\rho s\right)\,,\quad \forall n \in\Z.
\end{equation}
where we denote
\begin{equation}\label{1.def.g}
\g(\tau) = \g(\tau \,;R,\omega) := \g^{\sharp}(\tau \, ; R,\omega) + \beta.
\end{equation}

We define a norm on $\EE_{s}$ with
\begin{equation}\label{1.defi.norm.s}
\|\vbf\|_{s} = \inf \left\{ C>0 \;|\text{~\eqref{1.defi.EEs}} \text{ is satisfied } \right\}.
\end{equation}
\end{defi}

Note that in definition~\eqref{1.def.g} of $\g$, the function $\g^{\sharp}$ corresponds to either one defined in Lemma~\ref{1.lemma.growth.propa}. In previous Definition~\ref{1.definition.EE_s}, it is implicit that space $\EE_{s}$ depends on a positive function $\g^{\sharp}$.

Thanks to Lemma~\ref{1.lemma.phi.prec.Phi}, for $s\in[0,(\e\rho)^{-1})$, all $\vbf\in\EE_{s}$ are holomorphic in the $x$ variable in the domain $\Omega_{R,\e\rho,s}$ defined by~\eqref{1.defi.Omega.t}. We introduce also the growth time $\und{s}_1$ defined implicitly as 
\begin{equation}\label{1.def.und.s.1}
M' = \int_{0}^{\und{s}_1} \g(\tau) d\tau.
\end{equation}

For $0 \leq s < \und{s}_1$ we have $M' - \int_{0}^{s} \g(\tau)d\tau > 0$ and then analyticity of $\vbf$ in the $\theta$ variable. We will also see in Lemma~\ref{1.lemma.algebra} that if $0\leq s < \und{s}_1$, the space $(\EE_{s},\|\cdot\|_{s})$ is an algebra. After these considerations it is convenient to define the final time as
\begin{equation}\label{1.finaltime}
\und{s} = \min\left\{\und{s}_1, \left(\e\rho\right)^{-1}\right\}.
\end{equation}

To simplify the notations, in all the following we will omit the parameters $R$, $\rho$, $M'$ and $\beta$ in $\EE_{s}(R,\rho,M',\beta)$. All properties of spaces $\EE_{s}$ do not depend on particular values of those parameters.


\subsubsection{Spaces \texorpdfstring{$\EE$}{EE}}

We consider now trigonometric series
\[
\ubf(s,x,\theta) = \sum_{n\in\Z} \ubf_n(s,x) e^{in\theta}
\]
with coefficients $\ubf_n(s,x)$ being formal series in $x$ whose coefficients depend smoothly on $s\in[0,\und{s})$. We denote $F_{d+2}$ the space of all such trigonometric series:
\begin{multline*}
F_{d+2} = \bigg\{ \ubf(s,x,\theta) = \sum_{n\in\Z} \ubf_n(s,x) e^{in\theta} \;\Big|\; \ubf_n(s,x) \\
= \sum_{k\in\N^{d}} \ubf_{n,k}(s)x^{k} \; \text{with } \ubf_{n,k}(s) \;C^{\infty} \text{ in } s \bigg\}.
\end{multline*}

\begin{defi}[Spaces $\EE$]
We introduce
\begin{equation}\label{1.defi.space}
\EE = \left\{\ubf\in F_{d+2} \,|\, \forall \, 0\leq s <\und{s} \,, \quad \ubf(s)\in\EE_{s} \right\}
\end{equation}
and the corresponding norm
\begin{equation}\label{1.defi.norm}
\VERT{\ubf} = \sup_{0\leq s < \und{s}} \|\ubf(s)\|_s.
\end{equation}
\end{defi}

Recalling the definition of majoring series~\eqref{1.def.majoring.series} and the definition of $\EE_{s}$~\eqref{1.defi.EEs}, for all $\ubf\in\EE$ there holds
\begin{multline}\label{1.esti.u}
\ubf_n(s,x) \prec_{s} \VERT{\ubf} \frac{\displaystyle c_1}{\displaystyle n^2+1}\exp\Big({}-\big(M' - \int_{0}^{s} \g(\tau) d\tau \big)\left< n \right>\Big) \Phi\left(R X+\e\rho s\right)\,,\\
\forall n \in\Z,\; \forall s\in[0,\und{s}).
\end{multline}


For $\ubf$ valued in $\mathbb{C}^{N}$, $\ubf\in\EE$ means simply that each component of $\ubf$ is in $\EE$, and $\VERT{ \ubf}$ is then the maximum of the norms of the components.

We denote the ball of $\EE$ of radius $a$, centered in $\ubf\in\EE$ by
\begin{equation}\label{1.defi.ball}
B_{\EE}(\ubf,a) = \big\{ \vbf\in\EE \;|\; \VERT{\vbf - \ubf} < a \big\}.
\end{equation}



\subsection{Some properties of spaces \texorpdfstring{$\EE$}{EE}}


\subsubsection{The spaces \texorpdfstring{$\EE_{s}$}{EEs} are Banach spaces}


\begin{prop}
For all $s\in[0,\und{s})$, the space $\EE_{s}$ equipped with the norm $\|\cdot\|_{s}$ is a Banach space.
\end{prop}

\begin{proof}
Any $\vbf$ in $\EE_{s}$ is uniquely determined by the sequence of coefficients $(\vbf_{n,k})_{n\in\Z,k\in\N^{d}}$, where
\[
\vbf(x,\theta) = \sum_{n\in\Z} \vbf_n(x) e^{in\theta} \quad \text{with} \quad \vbf_n(x) = \sum_{k\in\N^{d}} \vbf_{n,k}x^{k}.
\]


By the definition of majoring series~\eqref{1.def.maj} and notation~\eqref{1.def.Phi_k.t}, the definition~\eqref{1.defi.EEs} is equivalent to
\begin{multline*}
|\vbf_{n,k}| \leq C \frac{\displaystyle c_1}{\displaystyle n^2+1}\exp\Big({}-\big(M' - \int_{0}^{s} \g(\tau) d\tau \big)\left< n \right>\Big) \Phi_{k}(\e s) \,,\quad \forall n\in\Z\,,\\
k\in\N^{d},\; 0\leq s < (\e \rho)^{-1}
\end{multline*}
where $\g$ is defined in~\eqref{1.def.g}. Thus the map
\goodbreak
\begin{equation}
\mathcal{O}(s): \vbf\in\EE_{s} \mapsto \left(\vbf_{n,k} \mathcal{O}_{n,k}(s)\right)_{n\in\Z, k\in\N^{d}}
\end{equation}
with
\[
\mathcal{O}_{n,k}(s) = \left(\frac{\displaystyle c_1}{\displaystyle n^2+1}\exp\Big({}-\big(M' - \int_{0}^{s} \g(\tau) d\tau \big)\left< n \right>\Big) \Phi_{k}(\e s)\right)^{-1}
\]
is onto $ \ell^{\infty}(\Cbb^{\Z\times\N^{d}}) $. By definition of the norm in $\EE_{s}$, the map $\mathcal{O}(s)$ is clearly an isometric isomorphism between $\EE_{s}$ and $\ell^{\infty}(\Cbb^{\Z\times\N^{d}})$. This implies that $\left(\EE_{s},\|\cdot\|_{s}\right)$ is a Banach space.
\end{proof}

This implies immediately the following

\begin{coro}
The space $(\EE,\VERT{\cdot})$ is a Banach space.
\end{coro}


\subsubsection{The spaces \texorpdfstring{$\EE_s$}{EEs} are Banach algebra}


\begin{lemm}\label{1.lemma.algebra}
For all $s\in[0,\und{s})$, for all $\vbf$ and $\w$ in $\EE_{s}$, the product $ \vbf\w$ is in $\EE_{s} $ and we have
\begin{equation} 
\|\vbf \w\|_{s} \leq \|\vbf\|_{s} \, \|\w\|_{s}.
\end{equation}
\end{lemm}

\begin{proof}
Starting with the definition of $\EE_{s}$~\eqref{1.defi.EEs}, we obtain first for all $n\in\Z$ the following
\begin{align*}
(\vbf \w)_{n}(x) &= \sum_{p+q=n} \vbf_p(x) \w_q(x) \\
 &\prec \sum_{p+q=n}
 \|\vbf\|_{s} \frac{\displaystyle c_1}{\displaystyle p^2+1}\exp\Big({}-(M' - \int_{0}^{s} \g(\tau) d\tau)\left< p \right>\Big) \Phi\left(R X+\e\rho s\right) \\
&\times \|\w\|_{s} \frac{\displaystyle c_1}{\displaystyle q^2+1}\exp\Big({}-(M' - \int_{0}^{s} \g(\tau) d\tau)\left< q \right>\Big) \Phi\left(R X+\e\rho s\right)
 \\
 &\prec \|\vbf\|_{s} \, \|\w\|_{s}\,\Phi^2\left(R X+\e\rho s\right) \\
&\mkern120 mu \sum_{p+q=n} \frac{\displaystyle c_1}{\displaystyle p^2+1}\frac{\displaystyle c_1}{\displaystyle q^2+1}\exp\Big({}-(M' - \int_{0}^{s} \g(\tau) d\tau)(\left< p \right>+\left< q \right>)\Big).
\end{align*}

Recalling that $\Phi^2 \prec \Phi$ by Lemma~\ref{1.majoring.properties}, we have
\begin{align*}
&(\vbf \w)_{n}(x) \prec \|\vbf\|_{s} \, \|\w\|_{s}\,\Phi\left(R X+\e\rho s\right)\\
&\mkern 160mu \sum_{p+q=n} \frac{\displaystyle c_1}{\displaystyle p^2+1}\frac{\displaystyle c_1}{\displaystyle q^2+1}\exp\Big({}-(M' - \int_{0}^{s} \g(\tau) d\tau)(\left< p \right>+\left< q \right>)\Big) \\
& \prec \|\vbf\|_{s} \, \|\w\|_{s} \Phi\left(R X+\e\rho s\right) \exp\Big({}-(M' - \int_{0}^{s} \g(\tau) d\tau)\left< n \right>\Big) \sum_{p+q=n} \frac{\displaystyle c_1}{\displaystyle p^2+1}\frac{\displaystyle c_1}{\displaystyle q^2+1}
\end{align*}
because $\left< p \right>+\left< q \right> \geq \left< p+q\right> = \left<n\right>$ and $ M' - \int_{0}^{s} \g(\tau) d\tau $ is positive for all $s < \und{s}$, and $\g$ is defined in~\eqref{1.def.g}. And by definition~\eqref{1.definition.c_1} of $c_1$ we have finally
\[
(\vbf \w)_{n}(x) \prec \|\vbf\|_{s} \, \|\w\|_{s}\frac{\displaystyle c_1}{\displaystyle n^2+1}\exp\Big({}-(M' - \int_{0}^{s} \g(\tau) d\tau)\left< n \right>\Big) \Phi\left(R X+\e\rho s\right)
\]
which implies the result.
\end{proof}
\goodbreak
This implies immediately the following

\begin{coro}\label{1.coro.algebra}
The space $\EE$ is an algebra, and the norm $\VERT{\cdot}$ is an algebra norm.
\end{coro}


\subsubsection{Action of holomorphic functions}


\begin{lemm}\label{1.lemma.holomorphic}
Let $H(t,x,u)$ be a holomorphic function on a neighborhood of $(0,0,0)\in\R_{t}\times\R_{x}^{d}\times\R_{u}^{N}$. Then for $\e$ small enough there are constants $C_H$, $R_H$ and $\rho_H$ which depend only on $H$ and $c_0$, such that for all $R\geq R_H$ and $\rho\geq\rho_H$,
\begin{equation}\label{1.esti.holomorphic}
\forall \,\ubf\in B_{\EE(R,\rho)}(0,1): \quad \VERT{\Hbf(\ubf)}\leq C_{H}2^{N}
\end{equation}
where $\Hbf$ is defined by~\eqref{1.def.mathbf.H} and $\VERT{\cdot}$ is defined by~\eqref{1.defi.norm}.
\end{lemm}

\begin{proof}
Thanks to Lemma~\ref{1.lemma.H} we have
\[
H(t,x,u) \prec C_H \Phi(R_H X + \rho_H t) \prod_{j=1}^{N}\frac{\dsp 1}{1 - a_H u_j}
\]


Let $\ubf$ be in $ B_{\EE}(0,1)$ with $\EE=\EE(R,\rho)$ for $R \geq R_H$ and $\rho \geq \rho_H$. For $\e$ small enough we have $ \e a_{H} < 1/2 $ so that $ \VERT {\e a_H \ubf} \leq 1/2 $. We now prove that $\Hbf(s,x,\ubf)$ is indeed in $\EE$. By Lemma~\ref{1.lemma.H} it suffices to prove that
\[
(s,x,\theta) \mapsto C_H \Phi(R_H X + \e\rho_H s) \prod_{j=1}^{N}\frac{\dsp 1}{1 - \e a_H \ubf_j(s,x,\theta)}
\]
is in $\EE$. Because $\EE$ is a Banach algebra (Corollary~\ref{1.coro.algebra})and $\e a_{H} < 1/2$, the operator
\[
\ubf \mapsto \prod_{j=1}^{N}\left(1 - \e a_H \ubf_j\right)^{-1}
\]
is a bounded operator and we have
\[
\left|\mkern -1.6mu\left|\mkern -1.6mu\left|\prod_{j=1}^{N}\frac{\dsp 1}{1 - \e a_H \ubf_j(s,x,\theta)}\right|\mkern -1.6mu\right|\mkern -1.6mu\right|
\leq
\prod_{j=1}^{N}\frac{\dsp 1}{1 - \e a_H \VERT{\ubf}} \leq \left(\frac{\dsp 1}{1 - 1/2}\right)^{N} = 2^{N}
\]


By~\eqref{1.R_0.R}, we have $\Phi(R_H X + \e\rho_H s) \prec_{s} \Phi(R X+\e\rho s)$ for all $R\geq R_H$ and $\rho\geq\rho_H$, so that
\begin{align*}
\Phi(R_H X + \e\rho_H s) \Phi(R X+\e\rho s) & \prec_{s} \Phi(R X+\e\rho s)^2 \\
& \prec_{s} \Phi(R X+\e\rho s)
\end{align*}
by~\eqref{1.Phi2}. Hence $(s,x,\theta) \mapsto C_H \Phi(R_H X + \e\rho_H s) \prod_{j=1}^{N}(1 - \e a_H \ubf_j(s,x,\theta))^{-1}$ is in $\EE$, and then for all $\ubf\in\EE$ in the ball $B_{\EE}(0,1)$ the bound~\eqref{1.esti.holomorphic} holds.
\end{proof}

In the operators $T^{[\theta]}$, $T^{[x]} $ and $T^{[\ubf]} $ defined by~\eqref{1.def.T_theta}, \eqref{1.def.T_x} and~\eqref{1.def.T_u}, there appear $A$, $\und{A}$, $A_{j}$ and $F$. In Corollary~\ref{1.reg_dtheta.bis}, there will appear also $A_{u_j}$, all of which are analytic functions in variables $(t,x,u)\in\R\times\R^{d}\times\R^{N}$ in a neighborhood of $(0,0,0) \in \R_{t}\times\R_{x}^{d}\times\R_{u}^{N}$. The previous Lemma~\ref{1.lemma.holomorphic} applies:

\begin{coro}\label{1.coro.norms}
There are constants $R_0$ and $\rho_0$ such that for all $R\geq R_0$, $\rho \geq \rho_0$ and $\e$ small enough:
\begin{equation}\label{1.action.A}
\forall \,\ubf\in B_{\EE(R,\rho)}(0,1): \quad \VERT{\Hbf(\ubf)} \lesssim 1
\end{equation}
with $H$ equals to $A$, $\und{A}$, $A_j$, $F$, or $A_{u_j}$.
\end{coro}



\subsection{Action of \texorpdfstring{$U(s',s)$}{U(s',s)} on \texorpdfstring{$\EE$}{EE}}

Recall the growth of the Fourier modes of the propagator as showed in Lemma~\ref{1.lemma.growth.propa}
\[
|U_n(s',s,x)| \lesssim \omega^{-(m-1)}\,\exp\left(|n| \int_{s'}^{s} \g^{\sharp}(\tau) d\tau \right).
\]

Here, as opposed to~\cite{metivier2005remarks}, the propagator $U_n$ does depend on $x$. As $U_n(s',s,x)$ is the solution of the differential equation~\eqref{1.equation.propagator} and as $\und{A}(t,x)$ is analytic in $x$, so is $U_n(s',s,x)$. Using the Cauchy inequalities as in the proof of Lemma~\ref{1.lemma.H}, we can prove in particular that
\begin{equation}\label{1.esti.prec.propa}
U_n(s',s,x) \prec_{s',s} \omega^{-(m-1)}\,\exp\left(|n| \int_{s'}^{s} \g^{\sharp}(\tau) d\tau \right) \Phi(R_0 X)
\end{equation}
for $R_0$ determined in Corollary~\ref{1.coro.norms}. We use this result to determine precisely the action of the propagator on $\EE$.

\begin{lemm}\label{1.lemma.action.U}
Given $\ubf$ in $\EE = \EE(R,\rho,M',\beta)$ then for all $n\in\Z$ and\linebreak$0 \leq s' \leq s < \und{s}$ there holds
\begin{multline}\label{1.precise.bound.action.U}
U_n(s',s)\ubf_n(s',x)
\prec_{s',s} C_n(s',s) \,\omega^{-(m-1)}\,\|\ubf(s')\|_{s'}\\ \frac{\displaystyle c_1}{\displaystyle n^2+1} \, e^{{}-(M'-\int_{0}^{s}\g(\tau) d\tau)\left< n \right>} \Phi\left(R X+\e\rho s\right)
\end{multline}
\begin{equation}\label{1.defi.Cn}
\text{with} \quad C_n(s',s) = \exp\left({}- \left<n\right> \beta \,(s-s') \right) \leq 1.
\end{equation}

In particular we have
\begin{equation}\label{1.bound.lemma.action.U}
\|U(s',s)\ubf(s')\|_s \leq \omega^{-(m-1)}\, \|\ubf(s')\|_{s'} \,, \quad \forall \, 0 \leq s' \leq s < \und{s}.
\end{equation}
\end{lemm}

\begin{proof}
By the estimate~\eqref{1.esti.u} for $\ubf\in\EE$ we have
\[
\ubf_n(s',x)
\prec_{s'} \|\ubf(s')\|_{s'} \frac{\displaystyle c_1}{\displaystyle n^2+1}\exp\Big({}-(M' - \int_{0}^{s'} \g(\tau)d\tau)\left< n \right>\Big) \Phi\left(R X+\e\rho s'\right)
\]
where $\g$ is defined in~\eqref{1.def.g}. By estimate~\eqref{1.esti.prec.propa} and the multiplicative property of $\prec$ there holds
\begin{multline*}
U_n(s',s)\ubf_n(s',x) \prec_{s',s} \omega^{-(m-1)}\,\exp\left(|n| \int_{s'}^{s} \g^{\sharp}(\tau) d\tau \right) \\
\begin{aligned}
& \times\|\ubf(s')\|_{s'} \frac{\displaystyle c_1}{\displaystyle n^2+1}\exp\Big({}-(M'- \int_{0}^{s'} \g(\tau)d\tau)\left< n \right>\Big) \Phi\left(R X+\e\rho s'\right) \\
& \prec_{s',s} \omega^{-(m-1)}\,\|\ubf(s')\|_{s'} \frac{\displaystyle c_1}{\displaystyle n^2+1}\exp\Big({}-(M' - \int_{0}^{s} \g(\tau)d\tau)\left< n \right>\Big) \Phi\left(R X+\e\rho s\right) \\
& \times \exp\left({}- \left<n\right> \int_{s'}^{s} \left(\g(\tau) - \g^{\sharp}(\tau) \right) d\tau \right)
\end{aligned}
\end{multline*}
because $\Phi(R X + \e\rho s') \prec_{s',s} \Phi(R X + \e\rho s)$ for $s' \leq s < \und{s}$ by~\eqref{1.t'.t}. This gives us exactly~\eqref{1.precise.bound.action.U} using~\eqref{1.def.g}, and then~\eqref{1.bound.lemma.action.U}.
\end{proof}

\begin{rema}
The estimate~\eqref{1.bound.lemma.action.U} is not precise enough to show that $T$ is a contraction in $\EE$. The more precise estimate~\eqref{1.precise.bound.action.U} is very important for the estimate~\eqref{1.esti.reg_dtheta} below.
\end{rema}



\subsection{Norm of the free solution}

\begin{lemm}[Norm of the free solution]\label{1.lemma.norm.free.solution}
The free solution $\f$ defined by~\eqref{1.def.initial.data} satisfies
\begin{equation}\label{1.norm.free.solution}
\VERT{\f} \lesssim \omega^{-(m-1)}\,e^{M'-M(\e)}.
\end{equation}
\end{lemm}

\begin{proof}
The Fourier decomposition of $\f_{\e}$ is given by $\f_{\e} = \f_{+1}e^{-i\theta} + \f_{-1}e^{i\theta}$ with $ \f_{\pm}(s,x) = U_{\mp}(0,s,x)\vec{e}_{\pm} $. The Fourier coefficients $\f_{\pm}$ satisfy thanks to~\eqref{1.esti.prec.propa} the estimate
\begin{equation}\label{1.estimate.f.pm1}
\f_{\pm 1}(s) \prec_{s} \omega^{-(m-1)}\,e^{-M(\e)}e^{ \int_{0}^{s} \g^{\sharp}(\tau) d\tau } \Phi(R_0 X).
\end{equation}


Then by definition of $\VERT{\cdot}$ given by~\eqref{1.defi.norm}, and by definition~\eqref{1.def.g} of $\g$, there holds
\begin{align*}
\VERT{\f_{\pm 1}} & = \frac{\dsp 2}{\dsp c_0c_1} \omega^{-(m-1)}\,e^{M'-M(\e)} \max_{[0,\und{s})} e^{ \int_{0}^{s} \g^{\sharp}(\tau) d\tau}e^{- \int_{0}^{s} \g(\tau) d\tau} \\
& = \frac{\dsp 2}{\dsp c_0c_1} \omega^{-(m-1)}\,e^{M'-M(\e)} \max_{[0,\und{s})} e^{- \int_{0}^{s} \beta d\tau} \\
& \lesssim \omega^{-(m-1)}\,e^{M'-M(\e)}
\end{align*}
which ends the proof of Lemma~\ref{1.norm.free.solution}.
\end{proof}



\section{Regularization by integration in time and contraction estimates}\label{1.section.contraction}

In this section we prove estimates in spaces $\EE$ for the three operators $T^{[\theta]}$, $T^{[x]}$ and $T^{[\ubf]}$ defined respectively by~\eqref{1.def.T_theta}, \eqref{1.def.T_x} and~\eqref{1.def.T_u}. Note that in the first two operators there appear derivation operators $\dptl_{\theta}$ and $\dptl_{x_j}$. As we will see in the next subsection, these are not bounded operators in $\EE$. But thanks to some smoothing effect of the time-integration, as used in~\cite{metivier2005remarks}, we will show that operators $T^{[\theta]}$, $T^{[x]}$ and $T^{[\ubf]}$ are in fact bounded in $\EE$. We will follow in this section the work of~\cite{ukai2001boltzmann}.



\subsection{Lack of boundedness of derivation operators}


In the following we make precise how the derivation operators $\dptl_{x_j}$ and $\dptl_{\theta}$ act on $\EE$.

\begin{lemm}[Estimates for the derivation operators]\label{1.lemma.derivation}
For any $\ubf$ in $\EE$, we have the following estimates
\begin{align}
(\dptl_{\theta}\ubf)_n(s,x) & \prec_{s} |n| \,\VERT{\ubf} \frac{\dsp c_1}{\dsp n^2+1} e^{{}-(M'- \int_{0}^{s}\g(\tau) d\tau) \left<n\right>} \Phi\left(R X+\e\rho s\right) \label{1.action.dtheta}
\\
(\dptl_{x_j}\ubf)_n(s,x) & \prec_{s} \,R \,\VERT{\ubf} \frac{\dsp c_1}{\dsp n^2+1} e^{{}-(M'- \int_{0}^{s}\g(\tau) d\tau) \left<n\right>} \Phi'\left(R X+\e\rho s\right) \label{1.action.dy}
\end{align}
for all $n\in\Z$ and $s\in[0,\und{s})$.
\end{lemm}

\begin{proof}
The estimates~\eqref{1.action.dtheta} and~\eqref{1.action.dy} are straightforward. Indeed $(\dptl_{\theta}\ubf)_n = n \ubf_n $ for all $n\in\Z $ which implies~\eqref{1.action.dtheta}. For~\eqref{1.action.dy} there holds $(\dptl_{x_j}\ubf)_n = \dptl_{x_j} \ubf_n $ for all $ n\in\Z $ and we get~\eqref{1.action.dy} thanks to the relation~\eqref{1.dx.Phi}.
\end{proof}

\begin{rema}[Lack of boundedness of derivation operators]
Lemma~\ref{1.lemma.derivation} does not prove directly that the $\dptl_{x_j}$ and $\dptl_{\theta}$ are not bounded operators on $\EE$. But let us consider the function in $\EE$ defined by its Fourier modes
\begin{align*}
\ubf_n(s,x) &= \frac{\dsp c_1}{\dsp n^2+1} e^{{}-(M'- \int_{0}^{s}\g(\tau) d\tau) \left<n\right>} \Phi\left(R X+\e\rho s\right) \, \forall n\in\Z\\
\intertext{Then}
\left(\dptl_{\theta}\ubf\right)_n(s,x) &= \frac{\dsp c_1 n}{\dsp n^2+1} e^{{}-(M'- \int_{0}^{s}\g(\tau) d\tau) \left<n\right>} \Phi\left(R X+\e\rho s\right)
\end{align*}
and $\dptl_{\theta}\ubf$ is not in $\EE$ as we may not bound $ \frac{|n|}{n^2+1}$ by $ \frac{1}{n^2+1} $. Since $\Phi' \prec \Phi$ does not hold, the applications $\dptl_{x_j} \ubf$ are not in $\EE$ either. Hence the derivation operators $\dptl_{x_j}$ and $\dptl_{\theta}$ are not bounded operators in $\EE$.
\end{rema}

In the following, we will need exact estimates on terms like $\vbf\dptl_{\theta}\ubf$, or $U(s',s)\dptl_{x_j}\ubf(s')$.

\begin{lemm}[Action of product and $U(s',s)$ on the lack of boundedness]\label{1.lemma.product.lack}

For any $\ubf$ and $\vbf$ in $\EE$, for all $n\in\Z$ and $0\leq s' \leq <\und{s}$, there holds
\begin{equation}\label{1.action.product.dtheta}
(\vbf\dptl_{\theta}\ubf)_n(s,x) \prec_{s} C|n| \,\VERT{\ubf}\, \VERT{\vbf} \frac{\dsp c_1}{\dsp n^2+1} e^{{}-(M'- \int_{0}^{s}\g(\tau) d\tau) \left<n\right>} \Phi\left(R X+\e\rho s\right) 
\end{equation}

\begin{equation}\label{1.action.product.dy}
(\vbf\dptl_{x_j}\ubf)_n(s,x) \prec_{s} \,C'R\, \VERT{\ubf}\, \VERT{\vbf} \frac{\dsp c_1}{\dsp n^2+1} e^{{}-(M'- \int_{0}^{s}\g(\tau) d\tau) \left<n\right>} \Phi'\left(R X+\e\rho s\right) 
\end{equation}
\begin{multline}\label{1.action.U.dy}
(U(s',s,x,\theta)\dptl_{x_j}\ubf(s',x,\theta))_n \\
\prec_{s',s} C_n(s',s)R\, \omega^{-(m-1)}\,\|\ubf(s')\|_{s'} \frac{\dsp c_1}{\dsp n^2+1} e^{{}-(M'- \int_{0}^{s}\g(\tau) d\tau) \left<n\right>} \Phi'\left(R X+\e\rho s'\right) 
\end{multline}
for some constants $C>0$ and $C'>0$ independent of all parameters.
\end{lemm}

\begin{proof}
To prove estimate~\eqref{1.action.product.dtheta} it suffices to get back to the proof of Lemma~\ref{1.lemma.algebra}. Following the same computations we get
\begin{multline*}
(\vbf\dptl_{\theta}\ubf)_n(s,x)\\
\prec_{s} \|\ubf\|_{s} \, \|\vbf\|_{s} \Phi\left(R X+\e\rho s\right) \exp\Big({}-(M' - \int_{0}^{s}\g(\tau) d\tau)\left< n \right>\Big) \sum_{p+q=n} \frac{\displaystyle c_1}{\displaystyle p^2+1}\frac{\displaystyle c_1 |q|}{\displaystyle q^2+1}.
\end{multline*}

By adapting the proof of the existence of some $c_1$ such that Lemma~\ref{1.lemma.c_1}$\MK$\eqref{1.definition.c_1} there holds
\[
\sum_{p+q=n} \frac{\dsp c_1}{\dsp p^2+1} \frac{\dsp c_1|q|}{\dsp q^2+1} \lesssim \frac{\dsp c_1 |n|}{\dsp n^2+1} \,, \quad \forall n\in\mathbb{Z}
\]
and then~\eqref{1.action.product.dtheta} holds.

In the same way we have
\begin{multline*}
(\vbf\dptl_{x_j}\ubf)_n(s,x)\\
\prec_{s} \|\vbf\|_{s} \, \|\w\|_{s} \frac{\displaystyle c_1}{\displaystyle p^2+1}\exp\Big({}-(M' - \int_{0}^{s}\g(\tau) d\tau)\left< n \right>\Big)\,R\Phi'\left(R X+\e\rho s\right) \Phi\left(R X+\e\rho s\right)
\end{multline*}

Thanks to Lemma~\ref{1.majoring.properties}, we differentiate the inequality $\Phi^2 \prec \Phi$ to get $ 2\Phi \Phi' \prec \Phi' $, hence estimate~\eqref{1.action.product.dy}.

For estimate~\eqref{1.action.U.dy} it suffices to adapt the proof of Lemma~\ref{1.lemma.action.U}, as $U(s',s)$ acts only on the size of the Fourier coefficients $\ubf_n(s,x)$ and not on the coefficients of the series $\ubf_{n,k}(s)$.
\end{proof}



\subsection{Integration in time and regularization of \texorpdfstring{$\dptl_{\theta}$}{d theta}}



\begin{prop}\label{1.reg_dtheta}
For operator $T^{[\theta]}$ defined by~\eqref{1.def.T_theta}, for any $\ubf\in B_{\EE}(0,1)$ there holds
\begin{equation}\label{1.esti.reg_dtheta}
\VERT{T^{[\theta]}(\ubf)} \lesssim \omega^{-(m-1)}\,\beta^{-1} \VERT{({\mathbf A} - \und{{\mathbf A}})(\ubf)}\,\VERT{\ubf}.
\end{equation}
\end{prop}

\begin{proof}
By Lemma~\ref{1.lemma.holomorphic}, the function $(\A - \und{\A})(\cdot,\ubf)$ is in $\EE$. Applying first estimate~\eqref{1.action.product.dtheta} we get
\begin{multline*}
\left((\A - \und{\A})(s',\ubf(s')) \dptl_{\theta}\ubf(s')\right)_n \\
\prec_{s'} |n| \,\VERT{\ubf}\, \VERT
{({\mathbf A} - \und{{\mathbf A}})(\ubf)}
\frac{\dsp c_1}{\dsp n^2+1} e^{{}- (M' - \int_{0}^{s'}\g(\tau) d\tau) \left<n\right>} \Phi\left(R X+\e\rho s'\right)
\end{multline*}
where $\g$ is defined in~\eqref{1.def.g}. Then by~\eqref{1.action.U.dy} there holds
\begin{multline*}
\left(U(s',s) (\A - \und{\A})(s',\ubf(s')) \dptl_{\theta}\ubf(s') \right)_n \prec_{s',s} C_n(s',s) |n| \,\omega^{-(m-1)}\,\VERT{\ubf}\\
\VERT{({\mathbf A} - \und{{\mathbf A}})(\ubf)} \frac{\dsp c_1}{\dsp n^2+1}e^{{}- (M' - \int_{0}^{s}\g(\tau) d\tau) \left<n\right>} \Phi\left(R X+\e\rho s\right).
\end{multline*}


As integration in time and Fourier transform commute, we have
\[
\left(T^{[\theta]}(\ubf)\right)_n(s) = \int_{0}^{s} \left(U(s',s) (\A - \und{\A}) \dptl_{\theta}\ubf(s') \right)_n ds'
\]
and then
\begin{multline*}
\left(T^{[\theta]}(\ubf)\right)_n(s)
\prec_{s} \int_{0}^{s} C_n(s',s) |n| \,\omega^{-(m-1)}\,\VERT{\ubf} \\
\begin{aligned}
&\VERT{({\mathbf A} - \und{{\mathbf A}})(\ubf)} \,\frac{\dsp c_1}{\dsp n^2+1} e^{{}- (M'- \int_{0}^{s}\g(\tau) d\tau) \left<n\right>} \Phi\left(R X+\e\rho s\right) ds'\prec_{s} \omega^{-(m-1)}\,\VERT{\ubf}\\
& \VERT{({\mathbf A} - \und{{\mathbf A}})(\ubf)} \,\frac{\dsp c_1}{\dsp n^2+1}e^{{}- (M'- \int_{0}^{s}\g(\tau) d\tau) \left<n\right>} \Phi\left(R X+\e\rho s\right) \int_{0}^{s} C_n(s',s) |n| \, ds'.
\end{aligned}
\end{multline*}

To end the proof, we prove a uniform bound independent of $n$ for the integral term $ \int_{0}^{s} C_n(s',s) |n| \, ds'$. Recalling first the definition~\eqref{1.defi.Cn}:
\[
C_n(s',s) = \exp\left(- \beta\, (s-s') \left<n\right> \right)
\]
there holds
\begin{align*}
\int_{0}^{s} C_n(s',s) |n| \, ds' & = \int_{0}^{s} \exp\left(- \beta\, (s-s') \left<n\right> \right) |n| ds' \\
& = \exp\left({} - \beta\, s\left<n\right> \right) \int_{0}^{s} \exp\left(\beta\, s' \left<n\right> \right) |n| ds' \\
& \leq \beta^{-1}
\end{align*}
which ends the proof.
\end{proof}

Thanks to the definition~\eqref{1.def.undA} of $\und{A}$ and an expansion formula we make the previous result more precise:

\begin{coro}\label{1.reg_dtheta.bis}
For operator $T^{[\theta]}$ defined by~\eqref{1.def.T_theta}, for any $\ubf\in B_{\EE}(0,1)$ there holds
\begin{equation}\label{1.esti.reg_dtheta.bis}
\VERT{T^{[\theta]}(\ubf)} \lesssim \omega^{-(m-1)}\,\beta^{-1}\, \e \,\VERT{\ubf}^2.
\end{equation}
\end{coro}

\begin{proof}
By analyticity of $A(t,x,u)$ there are a family of matrices $A_{u_j}(t,x,u)$ depending analytically on $(t,x,u)$ such that
\[
A(t,x,u) - \und{A}(t,x) = \sum_j A_{u_j} u_j.
\]


This implies that
\[
\VERT{({\mathbf A} - \und{{\mathbf A}})(\ubf)} \leq \e \VERT{\ubf}
\]
by definition of notation~\eqref{1.def.mathbf.H}.
\end{proof}



\subsection{Integration in time and regularization of \texorpdfstring{$\dptl_{x_j}$}{dxj}}



After managing to deal with unbounded term $\dptl_{\theta}\ubf$ we consider the other unbounded terms $\dptl_{x_j}\ubf$. We consider then the operator $T^{[x]}$:

\begin{prop}\label{1.reg_dx}
For operator $T^{[x]}$ defined by~\eqref{1.def.T_x} and any $\ubf\in B_{\EE}(0,1)$, there holds
\begin{equation}\label{1.esti.reg_dx}
\VERT{T^{[x]}(\ubf)} \lesssim \omega^{-(m-1)}\,R\rho^{-1}\,\VERT{\ubf}.
\end{equation}
\end{prop}

\begin{proof}
By Lemma~\ref{1.lemma.holomorphic}, functions ${\mathbf A}_j(\cdot,\cdot, \ubf(\cdot))$ are in $\EE$. Applying first estimate~\eqref{1.action.product.dy} we get
\begin{align*}
& \left(\A_j(s',\ubf(s')) \dptl_{x_j}\ubf(s')\right)_n \\
& \prec_{s'} R \,\VERT{\ubf}\, \VERT{\A_j(\ubf)}
\frac{\dsp c_1}{\dsp n^2+1} e^{{}- (M'- \int_{0}^{s'}\g(\tau)d\tau) \left<n\right>} \Phi'\left(R X+\e\rho s'\right)
\end{align*}
where we denote $\VERT{\A_j(\ubf)}$ for $\VERT{\A_j(\cdot,\cdot, \ubf(\cdot))}$. Then by Lemma~\ref{1.lemma.action.U} there holds
\begin{align*}
& \left(\sum_j U(s',s) \A_j(s',\ubf(s')) \dptl_{x_j}\ubf(s') \right)_n \\
& \prec_{s',s} C_n(s',s) R \,\omega^{-(m-1)}\,\VERT{\ubf}\, \sum_j\VERT{\A_j(\ubf)} \frac{\dsp c_1}{\dsp n^2+1}e^{{}- (M'- \int_{0}^{s}\g(\tau)d\tau) \left<n\right>} \Phi'\left(R X+\e\rho s'\right) \\
& \prec_{s',s} R \,\omega^{-(m-1)}\,\VERT{\ubf}\, \sum_j\VERT{\A_j(\ubf)} \frac{\dsp c_1}{\dsp n^2+1}e^{{}- (M'- \int_{0}^{s}\g(\tau)d\tau) \left<n\right>} \Phi'\left(R X+\e\rho s'\right)
\end{align*}
as $C_n(s',s) \leq 1$. As integration in time and Fourier transform commute, we have
\[
\left(T^{[x]}(\ubf)\right)_n(s) = \int_{0}^{s} \left(U(s',s) \e \sum_j\A_j(s', \ubf(s')) \dptl_{x_j}\ubf(s')\right)_n ds'
\]
and then
\begin{align*}
& \left(T^{[x]}(\ubf)\right)_n \\
& \prec_{s} \int_{0}^{s} \e R \,\omega^{-(m-1)}\,\VERT{\ubf}\, \sum_j\VERT{\A_j(\ubf)} \frac{\dsp c_1}{\dsp n^2+1}e^{{}- (M'- \int_{0}^{s}\g(\tau)d\tau) \left<n\right>} \Phi'\left(R X+\e\rho s'\right) ds' \\
& \prec_{s} \e R\omega^{-(m-1)}\,\VERT{\ubf}\, \sum_j\VERT{\A_j(\ubf)} \frac{\dsp c_1}{\dsp n^2+1}e^{{}- (M'- \int_{0}^{s}\g(\tau)d\tau) \left<n\right>} \int_{0}^{s} \Phi'\left(R X+\e\rho s'\right) \, ds'.
\end{align*}

By term-wise integration of the series, we have
\begin{align*}
\int_{0}^{s} \Phi'\left(RX+\e\rho s'\right) ds' & = \int_{0}^{s} (\e\rho)^{-1} \dptl_{s'} \left(\Phi\left(R X+\e\rho s'\right) \right)ds' \\
& \prec_{s} (\e\rho)^{-1} \Phi\left(R X+\e\rho s\right)
\end{align*}
which suffices to end the proof of Proposition~\ref{1.reg_dx}.
\end{proof}



\subsection{Integration in time and product}

As $\EE$ is an algebra the operator $T^{[\ubf]}$ is directly bounded, with no need of a regularization by time result, on the contrary of operators $T^{[\theta]}$ and $T^{[x]}$. The following proposition gives us precisely

\begin{prop}\label{1.reg_u}
For the operator $T^{[u]}$ defined by~\eqref{1.def.T_u}, for any $\ubf\in B_{\EE}(0,1)$ there holds
\begin{equation}\label{1.esti.reg_u}
\VERT{T^{[\ubf]}(\ubf)} \lesssim \omega^{-(m-1)}\,\beta^{-1}\, \e \, \VERT{\F(\ubf)} \, \VERT{\ubf}.
\end{equation}
\end{prop}

\begin{proof}
As in the proof of Proposition~\ref{1.reg_dtheta} we have
\begin{align*}
& \left(T^{[\ubf]}(\ubf)\right)_n(s) \\
& \prec_{s} \int_{0}^{s} C_n^{\eta}(s',s) \,\omega^{-(m-1)}\,\VERT{\ubf}\, \e\VERT{\F(\ubf)} \frac{\dsp c_1}{\dsp n^2+1}e^{{}- (M'- \int_{0}^{s} \g(\tau)d\tau) \left<n\right>} \Phi\left(R X+\e\rho s\right) ds' \\
& \prec_{s} \e \omega^{-(m-1)}\,\VERT{\ubf}\, \VERT{\F(\ubf)} \frac{\dsp c_1}{\dsp n^2+1}e^{{}- (M'- \int_{0}^{s} \g(\tau)d\tau) \left<n\right>} \Phi\left(R X+\e\rho s\right) \int_{0}^{s} C_n(s',s) |n| \, ds'
\end{align*}
and as
\[
\int_{0}^{s} C_n(s',s) |n| \, ds' \lesssim \beta^{-1} \,, \quad \forall n\in\Z,\; \forall 0 \leq s <\und{s}
\]
we get~\eqref{1.esti.reg_u}.
\end{proof}

Using Assumption~\ref{1.hypo.4}, we have in fact a more precise estimate:

\begin{coro}\label{1.coro.ref.du}
Under Assumption~\ref{1.hypo.4}, operator $T^{[\ubf]}$ defined by~\eqref{1.def.T_u} satisfied for any $\ubf\in B_{\EE}(0,1)$ the following bound
\begin{equation}\label{1.esti.reg_u.bis}
\VERT{T^{[\ubf]}(\ubf)} \lesssim \omega^{-(m-1)}\, \beta^{-1} \e \, \VERT{\ubf}^2.
\end{equation}
\end{coro}



\subsection{Contraction estimates}



The three previous subsections give us some precious estimates on operators $T^{[\theta]}$, $T^{[x]}$ and $T^{[\ubf]}$ in $\EE$. In the perspective of using a fixed point theorem on the Banach space $\EE$, we prove now estimates on the differences $T^{[\theta]}(\ubf) - T^{[\theta]}(\vbf)$, $T^{[x]}(\ubf) - T^{[x]}(\vbf)$ and $T^{[\ubf]}(\ubf) - T^{[\ubf]}(\vbf)$ for $\ubf$ and $\vbf$ in the ball $B_{\EE}(0,1)$.

\begin{prop}[Contraction estimates in $\EE$]\label{1.prop.estimates}
There are $R_0$, $\rho_0>0$ such that for all $R\geq R_0$, $\rho>\rho_0$ and $\e \in (0,1)$, we get the following estimates for all $\ubf$ and $\vbf$ in $B_{\EE}(0,1)$:
\begin{equation}\label{1.esti.T}
\VERT{T(\ubf)} \lesssim \omega^{-(m-1)}\,\left(\beta^{-1} \left(\e\VERT{\F(\ubf)} + \VERT{\mathbf{A}(\ubf) - \und{\A}(\ubf)} \right) + R\rho^{-1}\right) \VERT{\ubf} 
\end{equation}
\begin{multline}\label{1.esti.TT}
\VERT{T(\ubf) - T(\vbf)} \\
\lesssim \omega^{-(m-1)}\,\left(\beta^{-1} \left(\e \VERT{\F(\ubf)} + \VERT{\mathbf{A}(\ubf) - \und{\A}(\ubf)} \right) + R\rho^{-1}\right) \VERT{\ubf - \vbf}
\end{multline}
\end{prop}

\begin{proof}
Recalling that $T = T^{[\theta]} + T^{[x]} + T^{[\ubf]}$, we can apply directly Propositions~\ref{1.reg_dtheta}, \ref{1.reg_dx} and~\ref{1.reg_u} to get~\eqref{1.esti.T}.

To prove the contraction estimate~\eqref{1.esti.TT}, we write for all $\ubf$ and $\vbf$ in $B_{\EE}(0,1)$ the following
\[
T(\ubf) - T(\vbf) = \left(T^{[\theta]}(\ubf)-T^{[\theta]}(\vbf)\right) + \left(T^{[x]}(\ubf)-T^{[x]}(\vbf)\right) + \left(T^{[\ubf]}(\ubf)- T^{[\ubf]}(\vbf)\right)
\]

To get estimates on those three terms we first introduce some notations:
\begin{align*}
T_{H}^{[\theta]}(s,\ubf) & = \int_{0}^{s} U(s',s) \,\Hbf(\ubf(s')) \,\dptl_{\theta} \ubf(s') ds' \\
T_{H}^{[x_j]}(s,\ubf) & = \int_{0}^{s} U(s',s)\, \Hbf(\ubf(s')) \, \dptl_{x_j} \ubf(s') ds' \\
T_{H}^{[\ubf]}(s,\ubf) & = \int_{0}^{s} U(s',s) \, \Hbf(\ubf(s')) \, \ubf(s') ds'
\end{align*}
with $H(t,x,u)$ holomorphic on the neighborhood of $(0,0,0)\in\R_{t}\times\R_{x}^{d}\times\R_{u}^{N}$, and using notation~\eqref{1.def.mathbf.H}. For example,
\begin{equation}\label{1.T_H_theta}
T^{[\theta]}(s,\ubf) = T_{H}^{[\theta]}(s,\ubf) \quad \text{with } H = A - \und{A}
\end{equation}


Differences like $T^{[\theta]}(s,\ubf) - T^{[\theta]}(s,\vbf)$ are now easier to write. For example
\begin{equation}\label{1.one}
\begin{aligned}
T_{H}^{[\theta]}(s,\ubf) - T_{H}^{[\theta]}(s,\vbf) & = \int_{0}^{s} U(s',s) \,\left(\Hbf(\ubf(s')) \,\dptl_{\theta} \ubf(s') - \Hbf(\vbf(s')) \,\dptl_{\theta} \vbf(s') \right) ds' \\
& = \phantom{+}\int_{0}^{s} U(s',s) \,\left(\Hbf(\ubf(s')) - \Hbf(\vbf(s'))\right) \,\dptl_{\theta} \vbf(s') ds' 
\\
& \mkern 120mu+ \int_{0}^{s} U(s',s) \,\Hbf(\ubf(s')) \,\dptl_{\theta} (\ubf-\vbf)(s') ds'
\end{aligned}
\end{equation}
and these two terms are very similar to $T_{H}^{[\theta]}$. The same proof as Proposition~\ref{1.reg_dtheta} gives then directly
\[
\left|\mkern -1.6mu\left|\mkern -1.6mu\left| \int_{0}^{s} U(s',s) \,\Hbf(\ubf(s')) \,\dptl_{\theta} (\ubf-\vbf)(s') ds' \right|\mkern -1.6mu\right|\mkern -1.6mu\right| \lesssim \beta^{-1} \,\VERT{\Hbf(\ubf)}\, \VERT{\ubf - \vbf}
\]


For the other term~\eqref{1.one} we first note that for all $(t,x,u)$ and $(t,x,v)$ close to the distinguished point $(0,0,0)\in\R\times\R^{d}\times\R^{N}$, with $u-v$ small enough, there holds
\begin{align*}
H(t,x,u) - H(t,x,v) &= (u-v)\,\widetilde{H}(t,x,u,v)
\intertext{with}
\widetilde{H}(t,x,u,v) &= \int_{0}^{1} \dptl_{u} H(t,x,v + y (u-v)) dy.
\end{align*}


Note that $\widetilde{H}$ is an analytic function of $(t,x,u,v)$ near $(0,0,0,0)$. Hence an adaptation of the proof of Proposition~\ref{1.reg_dtheta} gives
\begin{align*}
&\left|\mkern -1.6mu\left|\mkern -1.6mu\left| \int_{0}^{s} U(s',s) \,\left(\Hbf(\ubf(s')) - \Hbf(\vbf(s'))\right) \,\dptl_{\theta} \vbf ds' \right|\mkern -1.6mu\right|\mkern -1.6mu\right| \\
& \mkern200mu\lesssim \omega^{-(m-1)}\, \beta^{-1}\e \,\VERT{\ubf - \vbf}\,\VERT{\widetilde{\Hbf}(\ubf,\vbf)}\, \VERT{\vbf} \\
& \mkern200mu\lesssim \omega^{-(m-1)}\, \beta^{-1}\e \,\VERT{\ubf - \vbf}\,\VERT{\widetilde{\Hbf}(\ubf,\vbf)}
\end{align*}
as $\vbf\in B_{\EE}(0,1)$, and recalling the prefactor $\e$ in notation~\eqref{1.def.mathbf.H}. In particular, for $H = A - \und{A} $ we have just for all $\ubf$ and $\vbf$ in $B_{\EE}(0,1)$ both
\[
\VERT{\Hbf(\ubf)} \lesssim \VERT{\mathbf{A}(\ubf) - \und{\A}(\ubf)} \quad \text{and} \quad \VERT{\widetilde{\Hbf}(\ubf,\vbf)} \lesssim 1
\]
thanks to Lemma~\ref{1.lemma.holomorphic}. Finally there holds for all $\ubf$ and $\vbf$ in $B_{\EE}(0,1)$:
\[
\VERT{T^{[\theta]}(\ubf)-T^{[\theta]}(\vbf)} \lesssim \omega^{-(m-1)}\, \beta^{-1} \left(\VERT{\mathbf{A}(\ubf) - \und{\A}(\ubf)} + \e \right) \VERT{\ubf - \vbf}.
\]

For both $T^{[x]}(\ubf) - T^{[x]}(\vbf)$ and $T^{[\ubf]}(\ubf) - T^{[\ubf]}(\vbf)$ we do the same to finally get
\begin{align*}
\VERT{T^{[x]}(\ubf) - T^{[x]}(\vbf)} & \lesssim \omega^{-(m-1)}\,R\rho^{-1} \VERT{\ubf - \vbf} \\
\VERT{ T^{[\ubf]}(\ubf) - T^{[\ubf]}(\vbf)}& \lesssim \omega^{-(m-1)}\, \beta^{-1}\e \VERT{\ubf - \vbf}
\end{align*}
as $\e$ is small.
\end{proof}
\goodbreak
Thanks to Corollary~\ref{1.reg_dtheta.bis}, we have a finer version of the contraction estimates:
\begin{coro}[Finer contraction estimates in $\EE$]\label{1.prop.estimates.bis}
There are $R_0$, $\rho_0>0$ such that for all $ \beta >0$, $R\geq R_0$, $\rho>\rho_0$ and $\e \in (0,1)$, we get the following estimates for all $\ubf$ and $\vbf$ in $B_{\EE}(0,1)$:
\begin{align}
 \VERT{T(\ubf)} &\lesssim \omega^{-(m-1)}\,\left(\beta^{-1}\, \e\VERT{\ubf} + R\rho^{-1}\right) \VERT{\ubf}\label{1.esti.T.bis} \\
 \VERT{T(\ubf) - T(\vbf)} &\lesssim \omega^{-(m-1)}\,\left(\beta^{-1} \, \e\VERT{\ubf} + R\rho^{-1}\right) \VERT{\ubf - \vbf}.\label{1.esti.TT.bis}
\end{align}
\end{coro}

\section{Existence of solutions and estimates from below}\label{1.section.existence}



\subsection{Existence of solutions}

Thanks to the Corollary~\ref{1.prop.estimates.bis}, we can now solve the fixed point equation~\eqref{1.fixed.point.equation} in the ball $B_{\EE}\left(0,\VERT{\f_{\e}}\right)$, provided that $\VERT{\f_{\e}} \leq 1/2$:

\begin{coro}[Existence of solutions]\label{1.coro.fixedpoint}
Let $R(\e) > R_0$, $\rho(\e) > \rho_0$, $\beta(\e) >0$ and $\und{s}(\e)$ be such that
\begin{equation}\label{1.hypo.coro}
\lim_{\e\to 0} \omega^{-(m-1)}\,\left(\beta^{-1} \e\VERT{\f_{\e}} + R\rho^{-1}\right) = 0.
\end{equation}

Then for any $\e$ small enough, the fixed point equation~\eqref{1.fixed.point.equation}, with $\f_{\e}$ defined by~\eqref{1.free.solution}, has a unique solution $\ubf_{\e}$ in $B_{\EE(R,\rho)}\left(0,2\VERT{\f_{\e}}\right)$. This solution satisfies
\begin{equation}\label{1.esti.solution}
\VERT{\ubf_{\e} - \f_{\e}} \lesssim \omega^{-(m-1)}\,\left(\beta^{-1} \e \VERT{\f_{\e}} + R\rho^{-1}\right)\VERT{\f_{\e}}.
\end{equation}
\end{coro}

The proof of the Corollary~\ref{1.coro.fixedpoint} is straightforward using the estimates of Corollary~\ref{1.prop.estimates.bis}, under the condition of smallness~\eqref{1.hypo.coro}. For convenience we introduce
\begin{equation}\label{1.def.K_epsilon}
K(\e) = \omega^{-(m-1)}\left(\beta^{-1} \, \e \VERT{\f_{\e}} + R\rho^{-1}\right).
\end{equation}



\subsection{Bounds from below for the solutions}\label{1.subsection.below}

Recall that in Section~\ref{1.subsection.sketch}, we explained that to prove Hadamard instability, we prove first that the solution $\ubf_{\e}$ of~\eqref{1.fixed.point.equation} has the same growth as $\f_{\e}$ given by Lemma~\ref{1.lemma.growth.free.solution}. That is, the goal is to prove
\begin{multline}\label{1.growth.solution}
|\ubf_{\e}(s,x,\theta)| \gtrsim \omega^{-(m-1)}\,e^{-M} \exp \left(\int_{0}^{s} \g^{\flat}(\tau \,;r) d\tau \right)\,,\\
\forall\,(s,x,\theta) \in (\und{s} - 1, \und{s})\times B_{r}(0) \times\mathbb{T}
\end{multline}
with $\g^{\flat}$ given by either~\eqref{1.bound.g.flat} (under Assumption~\ref{1.hypo.1}), \eqref{1.bound.g.flat.bis} (under Assumption~\ref{1.hypo.2}) or~\eqref{1.bound.g.flat.max} (under Assumptions~\ref{1.hypo.2} and~\ref{1.hypo.3}). It is indeed sufficient to prove this kind of estimate only on a small neighborhood of $(\und{s},0)\times\mathbb{T}$, and not on all the domain $\Omega_{R,\e\rho}(0)\times\mathbb{T} $. To this effect in view of Lemma~\ref{1.lemma.growth.free.solution} it suffices to prove that
\begin{equation}\label{1.esti.Ceps}
|(\ubf_{\e}-\f_{\e})(s,x,\theta)| \lesssim C(\e)\omega^{-(m-1)}\,e^{-M(\e)} \exp \left(\int_{0}^{s} \g^{\flat}(\tau \,;r) d\tau \right)
\end{equation}
for some constant $C(\e)$ such that $C(\e) \to 0$ as $\e \to 0$. The constant $C(\e)$ will depend on the parameters $M'$, $R$, $\rho$, $\beta$ and $\omega$. Finding suitable parameters such that $C(\e) \to 0$ as $\e \to 0$ will depend on under which Assumption we work, as it is specified in Propositions~\ref{1.prop.below}, \ref{1.prop.below.bis} and~\ref{1.prop.below.max}.

First, we decompose $\ubf_{\e}-\f_{\e}$ with its Fourier modes
\[
(\ubf_{\e}-\f_{\e})(s,x,\theta) = \sum_{n\in\Z} (\ubf-\f_{\e})_n(s,x) e^{in\theta}.
\]


Thanks to the first property of Lemma~\ref{1.majoring.properties} and estimate~\eqref{1.esti.u}, for all $(s,x,\theta)\in\Omega_{R,\e\rho}(0)\times\mathbb{T}$ there holds
\begin{align*}
|(\ubf_{\e}-\f_{\e})(s,x,\theta)| \leq &\sum_{n\in\Z} |(\ubf_{\e}-\f_{\e})_n|(s,x) \\
\leq \dsp \VERT{\ubf_{\e}-\f_{\e}} &\sum_{n\in\Z} \frac{\dsp c_1}{\dsp n^2+1} \exp\left(-\left(M'- \int_{0}^{s} \g(\tau) d\tau \right) \left<n\right> \right) \Phi\left(R|x|_1 + \e\rho s\right)
\end{align*}
where $\g$ is defined in~\eqref{1.def.g}. Then, as $M'- \int_{0}^{s} \g(\tau) d\tau >0$ for any $s\in[0,\und{s})$ (recall definition~\eqref{1.def.und.s.1} of $\und{s}_1$ and definition~\eqref{1.finaltime} of $\und{s}$) and $\left<n\right> \geq 1$ for all $n$, we have
\begin{align*}
|(\ubf_{\e}-\f_{\e})(s,x,\theta)| & \leq \dsp \VERT{\ubf_{\e} - \f_{\e}}\,\exp\left(-\left(M'- \int_{0}^{s} \g(\tau) d\tau \right) \left<n\right> \right)\\
& \mkern263mu \sum_{n\in\Z} \frac{\dsp c_1}{\dsp n^2+1} \,\Phi\left(R|x|_1 + \e\rho s\right) \\
& \leq \dsp \VERT{\ubf_{\e} - \f_{\e}}\,\exp\left(-\left(M'- \int_{0}^{s} \g(\tau) d\tau \right) \left<n\right> \right) \sum_{n\in\Z} \frac{\dsp c_1}{\dsp n^2+1} \,\Phi(1)
\end{align*}
and the last inequality holds because $\Phi$ is convergent in $1$. As the series of the right-hand side of the previous inequality is convergent, there holds
\[
|(\ubf_{\e}-\f_{\e})(s,x,\theta)| \lesssim \dsp \VERT{\ubf_{\e} - \f_{\e}}\,\exp\left(-\left(M'- \int_{0}^{s} \g(\tau) d\tau \right) \left<n\right> \right)
\]
for all $(s,x,\theta)\in\Omega_{R,\e\rho}(0)\times\mathbb{T}$.

Next, by Lemma~\ref{1.lemma.norm.free.solution}, estimate~\eqref{1.hypo.coro} of Corollary~\ref{1.coro.fixedpoint} and notation~\eqref{1.def.K_epsilon}, we have successively
\begin{equation}
\begin{aligned}\label{1.esti.local.2}
|(\ubf_{\e}-\f_{\e})(s,x,\theta)| & \lesssim K(\e)\,\VERT{\f_{\e}}\,\exp\left(-\left(M'- \int_{0}^{s} \g(\tau) d\tau \right) \left<n\right> \right)\\
& \lesssim K(\e)\, \omega^{-(m-1)} e^{M'-M(\e)}\,\exp\left(-\left(M'- \int_{0}^{s} \g(\tau) d\tau \right) \left<n\right> \right)\\
& \lesssim K(\e)\, \omega^{-(m-1)} e^{-M(\e)} \exp\left(\int_{0}^{s} \g(\tau) d\tau \right) 
\end{aligned}
\end{equation}
using $\langle n \rangle \geq 1$ for all $n\in\Z$. Note that estimate~\eqref{1.esti.local.2} holds pointwise for all $(s,x,\theta)\in\Omega_{R,\e\rho}(0)\times\mathbb{T} $. Now we focus our analysis to the smaller domain $(\und{s} - 1, \und{s}) \times B_{r}(0) \times \mathbb{T}$. Having~\eqref{1.esti.Ceps} in mind, we rewrite~\eqref{1.esti.local.2} to get
\begin{multline}\label{1.esti.local}
|(\ubf_{\e}-\f_{\e})(s,x,\theta)| \\
\begin{aligned}
& \lesssim K(\e)\,\exp\left(\int_{0}^{s} (\g(\tau) - \g^{\flat}(\tau\,;r)) d\tau \right) \,\omega^{-(m-1)}\,e^{-M} \exp \left(\int_{0}^{s} \g^{\flat}(\tau\,;r) d\tau \right) \\
& \lesssim K(\e)\,\exp\left(\int_{0}^{\und{s}} (\g(\tau) - \g^{\flat}(\tau\,;r)) d\tau \right) \,\omega^{-(m-1)}\,e^{-M} \exp \left(\int_{0}^{s} \g^{\flat}(\tau\,;r) d\tau \right) \\
& \lesssim K(\e)\,\exp\left(\und{s}\,\beta + \int_{0}^{\und{s}} (\g^{\sharp}(\tau\,;R, \omega) - \g^{\flat}(\tau\,;r, \omega)) d\tau \right) \\
& \mkern 295mu \omega^{-(m-1)}\,e^{-M} \exp \left(\int_{0}^{s} \g^{\flat}(\tau\,;r, \omega) d\tau \right) 
\end{aligned}
\end{multline}
by definition~\eqref{1.finaltime} of $\und{s}$ and definition~\eqref{1.def.g} of $\g$. So to get~\eqref{1.esti.Ceps} we need
\[
\lim_{\e\to0} \,K(\e)\,\exp\left(\und{s}\,\beta + \int_{0}^{\und{s}} (\g^{\sharp}(\tau\,;R, \omega) - \g^{\flat}(\tau\,;r, \omega)) d\tau \right) = 0.
\]


If $K(\e) \to 0$ as in~\eqref{1.hypo.coro}, and as $\omega(\e)$ is a small parameter, it suffices then to have
\begin{equation}\label{1.first.constraint}
\lim_{\e\to0} \,\exp\left(\und{s}\,\beta + \int_{0}^{\und{s}} (\g^{\sharp}(\tau\,;R, \omega) - \g^{\flat}(\tau\,;r, \omega)) d\tau \right) = 0
\end{equation}
which brings another constraint on the parameters, after~\eqref{1.hypo.coro}.

We recall also the constraint on the parameters $M'$ and $\rho$ coming from the competition between the growth time $\und{s}_1$ defined in~\eqref{1.def.und.s.1} and the regularity time $(\e\rho)^{-1}$. To see the growth of the solution, we need it to exist on a sufficiently large time compared to the growth time, that is we need $\und{s} = \und{s}_1$. This is equivalent to
\begin{equation}\label{1.second.constraint}
\lim_{\e\to0} \,\und{s}_1\e\rho =0.
\end{equation}

A last constraint on the parameters comes from the smallness of the norm of the free solution, that is
\begin{equation}\label{1.third.constraint}
\lim_{\e \to0} \omega^{-(m-1)}\,e^{M' - M} = 0
\end{equation}
following Lemma~\ref{1.lemma.norm.free.solution}.

In constraint~\eqref{1.first.constraint}, recall that bound $\g^{\sharp}(\tau\,;R,\omega)$ is defined in Lemma~\ref{1.lemma.growth.propa}. Under Assumption~\ref{1.hypo.1}, the bound $\g^{\sharp}$ is given by~\eqref{1.bound.g.sharp} ; under Assumption~\ref{1.hypo.2}, by~\eqref{1.bound.g.sharp.bis} ; and under Assumptions~\ref{1.hypo.2} and~\ref{1.hypo.3}, by~\eqref{1.bound.g.sharp.max}. Similarly, recall that bound $\g^{\flat}(\tau\,;r,\omega)$ is defined in Lemma~\ref{1.lemma.growth.free.solution}. Under Assumption~\ref{1.hypo.1}, the bound $\g^{\flat}$ is given by~\eqref{1.bound.g.flat} ; under Assumption~\ref{1.hypo.2}, by~\eqref{1.bound.g.flat.bis} ; and under Assumptions~\ref{1.hypo.2} and~\ref{1.hypo.3}, by~\eqref{1.bound.g.flat.max}. In each case, we combine altogether constraints~\eqref{1.hypo.coro}, \eqref{1.first.constraint}, \eqref{1.second.constraint} and~\eqref{1.third.constraint}, and we give in the following three Propositions a choice of parameters satisfying those constraints.

\begin{prop}\label{1.prop.below}
Under Assumption~\ref{1.hypo.1}, with the following choice of parameters
\begin{equation}\label{1.good.parameters}
\begin{aligned}
\omega &= \e^{\delta}, \quad \beta = \e^{\delta}, \quad R^{-1} = \e^{\delta}, \quad \rho^{-1} = \e^{(1 + (m-1)\delta)/2},\\
M' &= M(\e) - \min\{ 0, 1 - (2m-1)\delta \}|\ln(\e)|
\end{aligned}
\end{equation}
and the limitation on the Gevrey index
\[
\sigma < \delta <1/(m+1)
\]
where $m$ is the algebraic multiplicity of $\lambda_0$, the fixed point equation~\eqref{1.fixed.point.equation} has a unique solution $\ubf_{\e}$ in $\EE$ which satisfies
\begin{multline}
|\ubf_{\e}(s,x,\theta)| \gtrsim \e^{-\delta(m-1)} e^{-M(\e)} \exp \left(\int_{0}^{s} \g^{\flat}(\tau\,; r, \omega) d\tau \right) \,,\\
\forall\,(s,x,\theta)\in(\und{s} - 1, \und{s}) \times B_r(0)\times\mathbb{T}
\end{multline}
for any $r \lesssim \e^{\delta}$. Another consequence of~\eqref{1.good.parameters} is
\begin{equation}\label{1.final.und.s}
\und{s} \approx \e^{-\delta}.
\end{equation}
\end{prop}

\begin{proof}
It is straightforward to verify that parameters given by~\eqref{1.good.parameters} satisfy the four constraints~\eqref{1.hypo.coro}, \eqref{1.first.constraint}, \eqref{1.second.constraint} and~\eqref{1.third.constraint}. The aim of the proof is to show that those parameters are optimal, in some sense. For that, we assume that the constraints are satisfy and we get constraints directly on $M'$, $\rho$, $R$, $\omega$ and $\beta$.

First, \eqref{1.second.constraint} being satisfied the final time is
\[
\und{s} = \und{s}_1
\]
defined by~\eqref{1.def.und.s.1}. In the asymptotic $\e \to 0$ there holds
\begin{align*}
\int_{0}^{\und{s}_1} \g(\tau) d\tau & \sim \und{s}_1 \g(\und{s_1}) \\
& \approx \g_0 \und{s}_1
\intertext{which implies that}
\und{s}_1 &\approx \frac{M'}{\g_0}.
\end{align*}

Constraint~\eqref{1.third.constraint} implies that $M' - M = {}- c(\e) + (m-1)\ln\omega$ with $\lim_{\e\to 0} c(\e)\linebreak= +\infty$. We assume that $c(\e) = o\left(\e^{-\delta}\right)$ to get $M' \sim M$, hence
\[
\und{s} \approx M = \e^{-\delta}.
\]

We also rewrite~\eqref{1.second.constraint} as
\begin{equation}\label{1.local.1}
\lim_{\e\to0}\e^{1-\delta} \rho = 0.
\end{equation}

Second, we focus on~\eqref{1.first.constraint}. By definitions~\eqref{1.bound.g.sharp} and~\eqref{1.bound.g.flat} we have
\[
\int_{0}^{\und{s}} (\g^{\sharp}(\tau\, ; R,\omega) - \g^{\flat}(\tau\, ; r,\omega)) d\tau \lesssim \und{s}\left(\e \und{s} + R^{-1} + r + \omega \right).
\]


As $\und{s} \approx \e^{-\delta}$, for~\eqref{1.first.constraint} to be satisfied we need $ \und{s}\left(\beta + \e \und{s} + R^{-1} + r + \omega \right) $ to be bounded, hence the choices
\[
\beta = \e^{\delta} \,, \quad r = \e^{\delta} \,, \quad \omega = \e^{\delta}
\]
and the constraints
\begin{equation}\label{1.local.cons}
\e \und{s}^2 \lesssim 1 \,, \quad R^{-1} \lesssim \e^{\delta}.
\end{equation}


The first one implies in particular
\[
\delta < 1/2.
\]

The constraint~\eqref{1.hypo.coro} is now
\[
\lim_{\e\to0} \e^{-\delta(m-1)} \left(\e^{1-\delta} \e^{-\delta(m-1)} e^{M'-M} + R\rho^{-1} \right) = 0
\]
using~\eqref{1.norm.free.solution}, and that is equivalent to both
\[
\lim_{\e\to0} e^{M'-M} \e^{1- \delta(2m-1)} = 0 \quad \text{and} \quad \lim_{\e\to0} \e^{-\delta(m-1)} R\rho^{-1} = 0.
\]


The first limit leads to the choice
\[
M' = M - \min\{ 0, 1 - (2m-1)\delta \}|\ln(\e)|
\]
reminding that $\delta\in(0,1/m)$. The second limit, combined with~\eqref{1.local.1}, gives us
\begin{equation}\label{1.local.voila.2}
\e^{1-\delta} \ll \rho^{-1} \ll \e^{\delta(m-1)} R^{-1}
\end{equation}
using notation~\eqref{1.notation.ll}. We note then that in particular, $R^{-1}$ has to be greater than $\e^{1-m\delta}$. As $R^{-1}$ has to be also smaller than $\e^{\delta}$, it implies the limitation
\begin{equation}\label{1.local.voila}
\e^{1-\delta} \ll \e^{\delta(m-1)} \e^{\delta}
\end{equation}
which is equivalent to
\[
\delta <1/(m+1),
\]
compatible with the previous limitation $\delta <1/2$ as $m \geq 1$.
\end{proof}


\begin{prop}\label{1.prop.below.bis}
Under Assumption~\ref{1.hypo.2}, with the following choice of parameters
\begin{equation}\label{1.good.parameters.bis}
\omega = 0, \quad \beta = \e^{\delta}, \quad R^{-1} = \e, \quad \rho^{-1} = \e^{1-\delta/2}, \quad M' = M(\e) - (1-\delta)|\ln(\e)|
\end{equation}
and the limitation on the Gevrey index
\[
\sigma < \delta <1/2
\]
the fixed point equation~\eqref{1.fixed.point.equation} has a unique solution $\ubf_{\e}$ in $\EE$ which satisfies
\begin{equation}
|\ubf_{\e}(s,x,\theta)| \gtrsim e^{-M(\e)} \exp \left(\int_{0}^{s} \g^{\flat}(\tau\,; r, \omega) d\tau \right) \,, \quad \forall\,(s,x,\theta)\in(\und{s} - 1, \und{s}) \times B_r(0)\times\mathbb{T}
\end{equation}
for any $r \lesssim \e^{\delta}$. Another consequence of~\eqref{1.good.parameters} is
\begin{equation}
\label{1.final.und.s.bis}
\und{s} \approx \e^{-\delta}.
\end{equation}
\end{prop}

\begin{proof}
The proof is the same the one of Proposition~\ref{1.prop.below}, with the difference that with Assumption~\ref{1.hypo.2}, estimate~\eqref{1.local.voila.2} is replaced by $\e^{1-\delta} \ll \rho^{-1} \ll R^{-1}$ as $m=1$. Hence constraint~\eqref{1.local.voila} is now $ \e^{1-\delta} \ll \e^{\delta(m-1)} \e^{\delta} $ which is equivalent to $\delta <1/2$.
\end{proof}

\begin{prop}\label{1.prop.below.max}
Under Assumptions~\ref{1.hypo.2} and~\ref{1.hypo.3}, with the following choice of parameters
\begin{equation}
\label{1.good.parameters.max}
\omega = 1, \quad \beta = \e^{\delta}, \quad R^{-1} = \e, \quad \rho^{-1} = \e^{1-\delta/2}, \quad M' = M(\e) - (1-\delta)|\ln(\e)|
\end{equation}
and the limitation on the Gevrey index
\[
\sigma < \delta <2/3
\]
the fixed point equation~\eqref{1.fixed.point.equation} has a unique solution $\ubf_{\e}$ in $\EE$ which satisfies
\begin{multline}
|\ubf_{\e}(s,x,\theta)| \gtrsim e^{-M(\e)} \exp \left(\int_{0}^{s} \g^{\flat}(\tau\,; r, \omega) d\tau \right) \,, \\
\forall\,(s,x,\theta)\in(\und{s} - 1, \und{s}) \times B_r(0)\times\mathbb{T}
\end{multline}
for any $r \lesssim \e^{\delta}$. Another consequence of~\eqref{1.good.parameters} is
\begin{equation}
\label{1.final.und.s.max}
\und{s} \approx \e^{-\delta}.
\end{equation}
\end{prop}

\begin{proof}
The proof is the same the one of Proposition~\ref{1.prop.below}, with the difference that with Assumption~\ref{1.hypo.2}, the bounds~\eqref{1.bound.g.sharp} and~\eqref{1.bound.g.flat} are replaced by the sharper bounds~\eqref{1.bound.g.sharp.max} and~\eqref{1.bound.g.flat.max}, respectively. First, note that the parameter of trigonalization $\omega$ does not appear anymore, and is then taken equal to one. Second, thanks to Assumption~\ref{1.hypo.2}, difference $\g^{\sharp} - \g^{\flat}$ is improved:
\begin{equation}
\label{1.proof.local.cool.2}
\g^{\sharp}(\tau\, ; R,\omega) - \g^{\flat}(\tau\, ; r,\omega) \lesssim \e^{2}\und{s}^{2} + r
\end{equation}

This implies in particular that
\[
\int_{0}^{\und{s}} (\g^{\sharp}(\tau\, ; R,\omega) - \g^{\flat}(\tau\, ; r,\omega)) d\tau \lesssim \und{s}\left(r + \e^{2}\und{s}^2 \right)
\]
which no longer implies constraints~\eqref{1.local.cons}. It suffices then to follow the rest of the proof of Proposition~\ref{1.prop.below}.
\end{proof}

\begin{rema}\label{1.remark.amelioration}

Estimate~\eqref{1.proof.local.cool.2} in the previous proof shows that the limiting Gevrey index increases as $\g^{\sharp} - \g^{\flat}$ decreases (with $\g^{\sharp}$ and $\g^{\flat}$ the upper and lower rates of growth introduced in Lemmas~\ref{1.lemma.growth.propa} and~\ref{1.lemma.growth.free.solution}). In particular, if the distinguished eigenvalue $\lambda$ is very flat at the distinguished point $(0,x_0)$, then the limiting Gevrey index is close to $1$, as claimed in Remark~\ref{1.remark.aprestheo3}.
\end{rema}

\section{Conclusion: Hadamard instability in Gevrey spaces}\label{1.section.Hadamard}

To close the proofs of Theorems~\ref{1.theorem.2}, \ref{1.theorem.3} and~\ref{1.theorem.4} we have now to get an estimate of the ratio
\[
\frac{\dsp \|u_{\e}\|_{L^2(\Omega_{R,\rho}(0))}}{\dsp \|h_{\e}\|^{\alf}_{\sigma,c,B_{r_0}(0)}}
\]


The previous Sections show the existence of a family of solutions $\ubf$ starting from $\f_{\e}$ of the fixed point equation~\eqref{1.fixed.point.equation}. Thanks to the ansatz~\eqref{1.ansatz} which we recall here
\[
u_{\e}(t,x) = \e\ubf(\e^{-1}\,t,x,x\cdot\xi_0/\e)
\]
we have then a family of solutions $u_{\e}$ existing in domains $\Omega_{R,\rho}(0)$, with $R$ and $\rho$ given by~\eqref{1.good.parameters}. As $\und{s} < (\e\rho)^{-1}$ the domain of regularity $\Omega_{R,\rho}(0)$ for $\ubf$ contains the cube of size $\e$
\[
C_{\e} = \{ (t,x) \,|\, \e\und{s} - \e < t <\e\und{s},\quad |x| < \e \}
\]


On one hand, thanks to estimate~\eqref{1.growth.solution} with $r=\e$ there holds
\begin{align*}
\|u_{\e}\|_{L^2(\Omega_{R,\rho})} & \geq \|u_{\e}\|_{L^2(C_{\e})} \\
& \gtrsim \inf_{\e\und{s} - \e < t <\e\und{s}} \left(\e^{-\delta(m-1)} e^{-M(\e)} \exp \left(\int_{0}^{t/\e} \g^{\flat}(\tau/\e) d\tau \right) \right) \,\|1\|_{L^2(C_{\e})} \\
& \gtrsim \e^{-\delta(m-1)} e^{-M(\e)} \exp \left((\und{s}-1) \left(\g_0 - \e\und{s} - r - \omega \right) \right) \, \e^{(d+1)/2} \\
& \gtrsim \e^{-\delta(m-1)} e^{-M(\e)} e^{\g_0\und{s}}\, \e^{(d+1)/2}
\end{align*}


 Next, by choice of $M' = M -(m\delta-1)|\ln(\e)|$ we get
\[
\|u_{\e}\|_{L^2(\Omega_{R,\rho})} \gtrsim \e^{-\delta(2m+1)+1} e^{-M'(\e)} e^{\g_0\und{s}} \, \e^{1+(d+1)/2}.
\]
As
\[
M' = \und{s} \g = \und{s}\g_0 (1+ 2\e^{\delta})
\]
this implies that
\begin{align*}
\|u_{\e}\|_{L^2(\Omega_{R,\rho})} & \gtrsim e^{ - \und{s}\g_0(1 + 2\e^{\delta}) + \g_0 \und{s}} \, \e^{1+(d+1)/2-\delta(2m+1)} \\
& \gtrsim \e^{1+(d+1)/2-\delta(2m+1)}
\end{align*}
as $\und{s}\e^{\delta} \approx 1$.

On the other hand, by Lemma~\ref{1.size.gevrey.exp} and definition~\eqref{1.size.gevrey} of $M$ there holds
\[
\|h_{\e}\|_{\sigma,c,B_{r_0}(0)} \lesssim \e\,\exp\left(- M(\e) + \frac{\e^{-\sigma}}{\sigma c^{\sigma}}\right)
\]
which is small as soon as $\sigma < \delta$. Combining those two estimates we have then
\[
\frac{\dsp \|u_{\e}\|_{L^2(\Omega_{R,\rho})}}{\dsp \|h_{\e}\|^{\alf}_{\sigma,c,K}}
\gtrsim
\e^{1+(d+1)/2-\delta(2m+1)-\alf} \exp\left(-\alf \frac{\e^{-\sigma}}{\sigma c^{\sigma}} + \alf \e^{-\delta}\right)
\]
that tends to $+\infty$ as $\e\to0$ because $\sigma < \delta$ no matter whether $1+(d+1)/2\linebreak-\delta(2m+1)-\alf$ is positive or negative, which ends the proof of Theorem~\ref{1.theorem.2}.

The proofs of Theorems~\ref{1.theorem.3} and~\ref{1.theorem.4} rely on the exact same computations, using Proposition~\ref{1.prop.below.bis} and Proposition~\ref{1.prop.below.max} respectively, instead of Proposition~\ref{1.prop.below}.



\bibliography{AHL_morisse}
\end{document}
