%~Mouliné par MaN_auto v.0.27.3 2023-08-09 10:03:21
\documentclass[AHL,Unicode,longabstracts,published]{cedram}



%
\makeatletter
\def\editors#1{%
\def\editor@name{#1}
\if@francais
\def\editor@string{Recommand\'e par les \'editeurs \editor@name.}
\else
\def\editor@string{Recommended by Editors \editor@name.}
\fi}   
\makeatother


\newcommand{\bfun}{\mathbf{1}}
\newcommand{\bbE}{\mathbb{E}}
\newcommand{\bbZ}{\mathbb{Z}}
\newcommand{\bbN}{\mathbb{N}}
\newcommand{\clF}{\mathcal{F}}
\newcommand{\clN}{\mathcal{N}}
\newcommand{\clL}{\mathcal{L}}
\newcommand{\clB}{\mathcal{B}}
\newcommand{\clG}{\mathcal{G}}
\newcommand{\clC}{\mathcal{C}}
\newcommand{\clM}{\mathcal{M}}
\newcommand{\clA}{\mathcal{A}}
\newcommand{\clU}{\mathcal{U}}
\newcommand{\clV}{\mathcal{V}}
\newcommand{\bbP}{\mathbb{P}}
\newcommand{\bbR}{\mathbb{R}}
\newcommand{\bbI}{\mathbb{I}}
\newcommand{\bbL}{\mathbb{L}}
\newcommand{\rmI}{\mathrm{I}}
\newcommand{\Cov}{\mathrm{Cov}}
\newcommand{\Var}{\mathrm{Var}}
\newcommand{\covv}{\mathrm{cov}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\graphicspath{{./figures/}}

\newcommand*{\mk}{\mkern -1mu}
\newcommand*{\Mk}{\mkern -2mu}
\newcommand*{\mK}{\mkern 1mu}
\newcommand*{\MK}{\mkern 2mu}

%\hypersetup{urlcolor=purple, linkcolor=blue, citecolor=red}

\newcommand*{\romanenumi}{\renewcommand*{\theenumi}{\roman{enumi}}}
\newcommand*{\Romanenumi}{\renewcommand*{\theenumi}{\Roman{enumi}}}
\newcommand*{\alphenumi}{\renewcommand*{\theenumi}{\alph{enumi}}}
\newcommand*{\Alphenumi}{\renewcommand*{\theenumi}{\Alph{enumi}}}
\let\oldtilde\tilde
\renewcommand*{\tilde}[1]{\mathchoice{\widetilde{#1}}{\widetilde{#1}}{\oldtilde{#1}}{\oldtilde{#1}}}
\let\oldhat\hat
\renewcommand*{\hat}[1]{\mathchoice{\widehat{#1}}{\widehat{#1}}{\oldhat{#1}}{\oldhat{#1}}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\title[Quadratic transportation cost in the conditional CLT]{Quadratic transportation cost in the conditional central limit theorem for dependent sequences}

\alttitle{Coût de transport quadratique dans le théorème limite central conditionnel pour des suites dépendantes}

\subjclass{60F05, 60F25, 60E15, 37E05}

\keywords{Quadratic transportation cost, conditional central limit theorem, Wasserstein distance, Minimal distance, strong mixing, stationary sequences, weak dependence, rates of convergence}

\author[\initial{J.} \lastname{Dedecker}]{\firstname{Jérôme} \lastname{Dedecker}}
\address{Université Paris Cité,\\
Laboratoire Map5,\\
UMR 8145 CNRS,\\
45 rue des Saints-Pères,\\
75006 Paris France}
\email{jerome.dedecker@u-paris.fr}


\author[\initial{F.} \lastname{Merlevède}]{\firstname{Florence} \lastname{Merlevède}}
\address{Univ Gustave Eiffel,\\
Univ Paris Est Créteil,\\
LAMA, UMR 8050 CNRS,\\
F-77454 Marne-La-Vallée, France}
\email{florence.merlevede@univ-eiffel.fr}

\author[\initial{E.} \lastname{Rio}]{\firstname{Emmanuel} \lastname{Rio}}
\address{Université de Versailles,\\
LMV, UMR 8100 CNRS,\\
45 avenue des Etats-Unis,\\
F-78035 Versailles France}
\email{emmanuel.rio@uvsq.fr}



\begin{abstract}
In this paper, we give estimates of the quadratic transportation cost in the conditional central limit theorem for a large class of dependent sequences. Applications to irreducible Markov chains, dynamical systems generated by intermittent maps and $\tau$-mixing sequences are given.
\end{abstract}

\begin{altabstract}
Dans cet article, nous obtenons des estimations du coût de transport quadratique dans le théorème central limite conditionnel pour une vaste classe de suites dépendantes. Nous donnons des applications aux chaînes de Markov irréductibles, aux systèmes dynamiques engendrés par des transformations intermittentes, ainsi qu'aux suites $\tau$-mélangeantes.
\end{altabstract}


\datereceived{2021-11-25}
\daterevised{2023-04-11}
\dateaccepted{2023-05-15}

\editors{S. Gou\"ezel and A. Philippe}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\dateposted{2023-10-02}
\begin{document}
\maketitle

\section{Introduction}

Let $(X_i)_{i\,\in\,\bbZ}$ be a strictly stationary sequence of real-valued random variables (r.v.) with mean zero and finite variance. Set $S_n = X_1 + X_2 + \cdots + X_n$. By $P_{n^{-1/2}S_n}$ we denote the law of $n^{-1/2}S_{n}$ and by $G_{\sigma^2}$ the normal distribution $ N(0, \sigma^2)$. In this paper, we assume furthermore that the series $\sigma^2=\sum_{k\,\in\,\bbZ} \Cov (X_0,X_k)$ is convergent (under this assumption $\lim_n n^{-1} \Var S_n = \sigma^2$) and we shall give quantitative estimates of the approximation of $P_{n^{-1/2}S_n}$ by $G_{\sigma^2}$ in terms of the quadratic cost, which is the square of the $\bbL^2$-minimal distance. With this aim, we first recall the definition of the $\bbL^p$-minimal metrics.

Let $\clL(\mu, \nu)$ be the set of probability laws on $\bbR^2$ with marginals $\mu$ and $\nu$. For $p\geq 1$, let
\[
W_p(\mu, \nu) =  \inf \left\{ \left (\int |x-y|^p P(dx, dy) \right)^{1/p} : P \in \clL(\mu, \nu) \right \} \,.
\]
$W_p$ is usually called the $\bbL^p$-minimal distance, and sometimes the Wasserstein distance of order $p$. It is well known that for probability laws $\mu$ and $\nu$ on $\bbR$ with respective distributions functions (d.f.) $F$ and $G$,
\begin{equation}\label{def2wasser}
W_p(\mu, \nu) = \left(\int_0^1 |F^{-1}(u) - G^{-1}(u) |^p du\right)^{1/p} \,,
\end{equation}
where $F^{-1}$ and $G^{-1}$ denote respectively the generalized inverse functions of $F$ and $G$. We refer to~\cite[Chapter~6]{Vi2009} in Villani for the properties of this metric.

For $(X_i)_{i\,\in\,\bbZ}$ a sequence of independent and identically distributed (iid) centered real valued random variables in $\bbL^4$, with variance $\sigma^2$,  in~\cite[inequality (1.7)]{Rio09} Rio states that there exists a universal constant $c$ such that for any positive integer $n$
\begin{equation}\label{ineRioUB}
n W_2^2 \left(P_{S_n / \sqrt n}, G_{\sigma^2}\right) \leq c \, \sigma^{-2} \Vert X_1 \Vert_4^4 \,.
\end{equation}
In addition, it is also shown in the same paper that this upper bound is optimal. More precisely, for any $\kappa \geq 1$, let $\clM(4, \kappa)$ be the class of the probability measures $\mu$ on the real line such that $\int x d \mu (x) = 0$, $\int x^2 d \mu (x) = 1$ and $\int x^4 d \mu (x) = \kappa$. In case where $(X_i)_{i\,\in\,\bbZ}$ is a sequence of iid random variables with common law $\mu $ in $\clM(4, \kappa)$, Theorem~5.1 in~\cite{Rio09} asserts that
\begin{equation}\label{ineRioLB}
\sup_{\mu\,\in\,\clM(4, \kappa)} \liminf_{n\,\rightarrow\,\infty} n W_2^2 \left(P_{S_n / {\sqrt n} }, G_{1}\right) \geq \kappa /12 \,.
\end{equation}

We refer to Bobkov~\cite{Bob13} for another proof of~\eqref{ineRioUB} based on relative entropy and Talagrand's entropy-transport inequality. Actually, the following more general result holds: for any $p \geq 1$, there exists a universal constant $c_p$ such that for any positive integer $n$,
\[
n^{p/2} W_p^p \left(P_{S_n / \sqrt n}, G_{\sigma^2}\right) \leq c_p \, \sigma^{-2} \Vert X_1 \Vert_{p+2}^{p+2} \,.
\]
(see Rio~\cite{Rio09} for $p \in [1,2]$ and Bobkov~\cite{Bob18} for $p>2$). Extensions to random vectors in $\bbR^d$ are given in Bonis~\cite{Bo20}. We also mention the extensions of the upper bound~\eqref{ineRioUB} to the $m$-dependent case and to $U$-statistics obtained by Fang~\cite{Fa19}.

In this paper, one of our motivations is to relax the independence assumption and to find sufficient conditions in case of dependent sequences ensuring that
\begin{equation}\label{conj1}
W_2 \left(P_{S_n / \sqrt n }, G_{\sigma^2}\right) = O (n^{-1/2}) \,.
\end{equation}
In the dependent setting, a well known class is the class of irreducible aperiodic and positively recurrent Markov chains $(\xi_n)$ with an atom denoted by $A$ (see the definition~\cite[p.~286]{Bo82}). Let $\pi $ be the unique invariant distribution of the Markov chain. From now on, $(\xi_n)$ will be the Markov chain starting from $\pi$. Let us then consider the strictly stationary sequence $(X_k) $ defined by $X_k =f (\xi_k)$ with $f$ a bounded function such that $\pi(f) =0$. In view of the regeneration scheme and the upper bound~\eqref{ineRioUB}, one can conjecture that~\eqref{conj1} holds for $S_n = \sum_{k=1}^n X_k$ provided that $\bbE_A (\tau_A^4) < \infty$ where $\tau_A$ is the first return time in $A$ and $\bbE_A$ stands for the expectation under $\bbP_x$ for $x \in A$. Next, from~\cite[Lemma~3]{Bo82} and~\cite[p.~165]{Rio17}, it is known that $\bbE_A (\tau_A^4) < \infty$ is equivalent to
\begin{equation}\label{condalpha4}
\sum_{n\,>\,0} n^{2} \alpha_n < \infty \,,
\end{equation}
where $
\alpha_n = { \textstyle \frac{1}{4} } \sup_{\Vert f \Vert_{\infty}\,\leq\,1} \Vert \bbE (f(\xi_n) | \xi_0) - \bbE (f (\xi_n)) \Vert_1 $.

In this paper we shall prove that~\eqref{conj1} holds true for any stationary sequence $(X_k)_{k \,\in\,\bbZ}$ of bounded real-valued random variables satisfying~\eqref{condalpha4} for the sequence $(\alpha_n)_{n\,\geq\,0}$ of strong mixing coefficients in the sense of Rosenblatt (see for instance \cite[Section~5.1.1.]{MPU} for a definition of these coefficients in the general case), which includes the case of Markov chains described above. This will be a consequence of a more general result also valid for a class of weakly dependent sequences, which may fail to be strongly mixing. In order to give more precise statements of our results, let us now introduce the dependence coefficients that we will use in this paper.

\begin{defi} \label{deftheta}
Let $(X_i)_{i\,\in\,\bbZ}$ be a stationary sequence of bounded real-valued random variables and $\clF_0 = \sigma (X_i, i \leq 0)$. Let
$ \Gamma_{p,q} = \{ (a_i)_{1\,\leq\,i \,\leq\,p} \in \bbN^p \, : \, a_1 \geq 1$ and  $\sum_{i=1}^p a_i \leq q \} $, for $p$ and $q$ positive integers. For $k\geq 0$, set
\[
\theta_{X,p,q} (k) = \sup_{ k_p\,>\,k_{p-1}\,>\,\ldots\,>\,k_2\,>\,k_1\,\geq\,k\,\atop (a_1,\,\dots,\,a_p)\,\in\,\Gamma_{p,q}} \left \Vert \bbE \left(\prod_{i=1}^p X_{k_i}^{a_i} | {\clF_0 } \right) - \bbE \left (\prod_{i=1}^p X_{k_i}^{a_i} \right) \right\Vert_1 \,.
\]
\end{defi}
As a consequence of our Theorem~\ref{thW2}, we will obtain that if
\begin{equation}\label{condtheta4}
\sum_{k\,\geq\,1} k^2 \theta_{X,4,4} (k) < \infty \,,
\end{equation}
then~\eqref{conj1} holds, which immediately implies that~\eqref{conj1} holds for additive bounded functionals of a Markov chain satisfying~\eqref{condalpha4}. In fact we shall give a conditional version of~\eqref{conj1} and show that when $(X_k)_{k\,\in\,\bbZ}$ is a stationary sequence of centered and bounded real-valued random variables satisfying~\eqref{condtheta4} then
\begin{equation}\label{condW2res}
\bbE \left(W_2^2 \left(P_{S_n / {\sqrt n} |\clF_0 }, G_{\sigma^2}\right) \right) = O (n^{-1}) \,.
\end{equation}
Note that in case of bounded functions of a Markov chain $(\xi_k)_k$ satisfying $\bbE_A (\tau_A^4) < \infty$, with invariant distribution $\pi$, the Schwarz inequality together with~\eqref{condW2res} imply that
\[
\bbE_\mu \left(W_2 \left(P_{S_n / {\sqrt n} |{\xi}_0 }, G_{\sigma^2}\right) \right) = O (n^{-1/2})
\]
for any positive measure $\mu$ such that $d \mu = f d \pi $ with $\int f^2 d \pi < \infty$. Above $ \bbE_\mu$ stands for the expectation of the chain under the initial law $\mu$.

%\smallskip

It is noteworthy to indicate that~\eqref{condW2res} implies~\eqref{conj1}. Indeed the following fact is valid.


\begin{enonce}
{Fact}\label{CompQCCondQC}
Let $X$ and $Y$ be two random variables defined on $(\Omega,\clA, \bbP)$ and $\clF$ be a sub $\sigma$-algebra of $\clA$. Then $W_2^2 (P_{X }, P_{Y }) \leq \bbE (W_2^2 (P_{X | \clF}, P_{Y | \clF}))$.
\end{enonce}

To see this, let $U$ be a random variable with uniform distribution over $[0,1]$, independent of $\clF$, and let $F_{ X| \clF }$ and $F_{ Y| \clF }$ denote respectively the conditional distribution functions of $X$ and $Y$ given $\clF$. Set $X^* = F_{ X| \clF }^{-1} (U)$ and $Y^* = F_{Y| \clF }^{-1} (U)$. Then $X^*$ has the law $P_X$, $Y^*$ has the law $P_Y$ and, by~\eqref{def2wasser}, $W_2^2 (P_{X | \clF}, P_{Y | \clF}) = \bbE (|X^* - Y^*|^2 | \clF)$. Taking the expectation, it implies the above fact, since $W_2^2$ is the minimal quadratic cost.


%\medskip

To prove Theorem~\ref{thW2}, we shall apply Lindeberg's method, which was used by Billingsley~\cite{Bil61} and Ibragimov~\cite{Ibr63} in the case of martingales with stationary differences to prove the central limit theorem (we also consider this particular case in our Theorem~\ref{thW2M}). Note that this method was adapted to a large class of dependent sequences (non necessarily martingale differences) to evaluate the $\bbL^1$-minimal distance between $P_{S_n / {\sqrt n}}$ and $G_{\sigma^2}$, by Pène~\cite{Pe05} in the bounded multidimensional case, and next by Dedecker and Rio~\cite{DR08} in the unbounded case (under conditions involving some coefficients similar to $\theta_{X,4,3}$, or weak mixing coefficients such as those described in Definition~\ref{defalpha} below). Recently, estimates of the $\bbL^1$-minimal distance between $P_{S_n / {\sqrt n}}$ and $G_{\sigma^2}$ when the underlying process is a function of iid random variables are given in~\cite[Theorem~3.1]{JWZ}. Their conditions are expressed in terms of some coupling coefficients.



Our paper is organized as follows. Section~\ref{section2} is devoted to the statements of upper bounds concerning the quadratic transportation cost in the conditional central limit theorem and their applications to pointwise estimates for the distribution function of the normalized sums and its generalized inverse. Applications to $\alpha$-dependent sequences, $\tau$-mixing sequences and symmetric random walk in the circle are given in Section~\ref{section3}. The proofs are postponed to Section~\ref{section4}. Links between $ | F_{S_n/\sigma_n}^{-1}(u) - \Phi^{-1} (u) | $ and $W_p (P_{S_n / \sigma_n}, G_1)$, for any $p \geq 1$, are given in Proposition~\ref{PropVaR}, where $\sigma_n = \sqrt{ \Var S_n}$, $\Phi^{-1}$ is the inverse of the distribution function of the standard normal distribution and $F_{S_n/\sigma_n}^{-1}$ is the generalized inverse of the distribution function of $S_n/\sigma_n$. In particular, rates of convergence for the quadratic cost provide rates of convergence for $ | F_{S_n/\sigma_n}^{-1}(u) - \Phi^{-1} (u) | $ (see Corollary~\ref{AppliVaR}).

%\medskip

In the rest of the paper, we shall use the following notation: for two sequences $(a_n)_{n\,\geq\,1}$ and $(b_n)_{n\,\geq\,1}$ of positive reals, $a_n \ll b_n$ means there exists a positive constant $C$ not depending on $n$ such that $a_n \leq C b_n$ for any $n\geq 1$. Moreover, for a real-valued random variable $X$ in $\bbL^1$, the notation $X^{(0)}$ means $X - \bbE (X)$.



\section{Quadratic cost in the conditional CLT} \label{section2}
%\setcounter{equation}{0}

The main result of this paper is Theorem~\ref{thW2} below.

\begin{theo} \label{thW2}
Assume that $\Vert X_0 \Vert_{\infty} \leq M$ and that $\sum_{k\,\geq\,1} \theta_{X,2,2} (k) < \infty$. Then $\sigma^2 = \bbE (X_0^2) + 2 \sum_{k\,\geq\,1}\bbE (X_0X_k)$ converges and
\begin{align}
\tag{$\rm a$}\bbE \left(W_2^2 \left(P_{S_n / {\sqrt n} |\clF_0 }, G_{\sigma^2}\right) \right) \ll n^{-1/2} \left(1 + \sum_{k\,\geq\,1} \left(k \wedge \sqrt{n}\right) \theta_{X,2,2} (k) \right) \,.\label{theo2.1.a}
\\
\intertext{If furthermore $ \sum_{k\,\geq\,1} k \theta_{X,4,4} (k) < \infty$, then}
\tag{$\rm b$}\bbE \left(W_2^2 \left(P_{S_n / {\sqrt n} |\clF_0 }, G_{\sigma^2}\right) \right) \ll n^{-1} \left(1 + \sum_{k\,\geq\,1} k \left(k \wedge \sqrt{n}\right) \theta_{X,4,4} (k) \right)\,.\label{theo2.1.b}
\end{align}
\end{theo}

\begin{enonce}{Comment}\label{comm2.2}
Item~\eqref{theo2.1.a} provides a rate in the CLT for the $W_2$-metric as soon as $\sum_{k\,\geq\,1} \theta_{X,2,2} (k) < \infty$. In addition, if $ \sum_{k\geq 1} k \theta_{X,2,2} (k) < \infty$, then the rate in the $W_2$-metric is of order $n^{-1/4}$. Furthermore, by Item~\eqref{theo2.1.b}, if $ \sum_{k\,\geq\,1} k \theta_{X,4,4} (k) < \infty$, then the rate in the CLT for the $W_2$-metric is $o (n^{-1/4})$. For example, if $\theta_{X,4,4} (k) = O (k^{-a}) $ with $ a \in]1,3[$ and $a \neq 2$, Theorem~\ref{thW2} implies that $W_2 (P_{S_n / {\sqrt n} }, G_{\sigma^2}) \ll n^{-(a-1)/4} $. Moreover $ W_2 (P_{S_n / {\sqrt n} }, G_{\sigma^2}) \ll n^{-1/2}$ as soon as $ \sum_{k\,\geq\,1} k^2 \theta_{X,4,4} (k) < \infty$.
\end{enonce}

\begin{enonce}{Comment} \label{commentaftermainth}
Assume $\sigma >0$. Set $\sigma_n= \sqrt{ \Var S_n }$. If $\sigma>0$, then $\sigma_n>0$ for any positive $n$. Set
$\kappa_2 = \bbE (W_2^2 (P_{S_n / \sigma {\sqrt n} |\clF_0 }, P_{S_n / \sigma_n |\clF_0 }))$:
\[
\kappa_2 =
\left(\frac{\sigma_n}{\sigma \sqrt n} - 1 \right)^2 \leq \left (\frac{\sigma_n^2}{n\sigma^2} - 1 \right)^2 = \left(n\sigma^2\right)^{-1} \left| \frac{\sigma_n^2}{n\sigma^2} - 1 \right| \, \left| 2 \sum_{k\,\geq\,1} (k \wedge n) \Cov (X_0,X_k)\right|.
\]
Now, from the definition of the coefficients $ \theta_{X,1,1} (k)$,
\begin{equation}\label{UpperBoundCov1}
\sum_{k\, \geq\,1} (k \wedge n) | \Cov (X_0,X_k) |
\leq \Vert X_0 \Vert_{\infty} \sum_{k\,\geq\,1} (k \wedge n) \theta_{X,1, 1} (k).
\end{equation}
Therefore, if in addition $\Vert X_0 \Vert_{\infty} \leq M$, $\kappa_2 \ll n^{-1} M \sum_{k\,\geq\,1} (k \wedge n) \theta_{X,1, 1} (k)$, which is always of a smaller order than the upper bounds~\eqref{theo2.1.a} and~\eqref{theo2.1.b}. Hence Theorem~\ref{thW2} also holds for $\bbE (W_2^2 (P_{S_n / \sigma_n |\clF_0 }, G_{1}) ) $.
\end{enonce}

We now give applications of Theorem~\ref{thW2} to pointwise estimates. We start by Berry--Esseen type estimates. Arguing for instance as in~\cite[Remark~2.4]{DMR09}, Theorem~\ref{thW2} together with Comment~\ref{comm2.2} imply the following upper bound.

\begin{coro}\label{AppliBE}
Assume that $\sigma>0$, $\Vert X_0 \Vert_{\infty} \leq M$ and $\sum_{k\, \geq\,1} k^2 \theta_{X,4,4} (k) < \infty$. Then
\[
\Delta_{n} = \sup_{x\,\in\,\bbR}\left| \bbP (S_n/\sigma_n \leq x) - \Phi(x)\right|\ll n^{-1/3} \,.
\]
\end{coro}

We now give applications of our main result to estimates of the quantiles and the superquantiles of $S_n/\sigma_n$ in the nondegenerate case. Define the $1$-risk $Q_{1,X}$ of $X$, as in Pinelis~\cite{Pi14}, by
\begin{equation}\label{DefSuperQuantile}
Q_{1,X} (u) = \frac 1 u \int_0^u F_X^{-1} (1-t) dt.
\end{equation}
Then $Q_{1,X} (u)$ is the value of the superquantile of $X$ at point $(1-u)$. The corollary below, which is a consequence of Theorem~\ref{thW2} and Proposition~\ref{PropVaR} provides estimates of the accuracy in the central limit theorem for $F^{-1}_{S_n/\sigma_n}$ and $Q_{1,S_n/\sigma_n}$. Its proof is given in Section~\ref{Annex}.

\begin{coro}\label{AppliVaR}
Assume that $\Vert X_0 \Vert_{\infty} \leq M$, $\sum_{k\,\geq\,1} k^2
\theta_{X,4,4} (k) < \infty$ and $\sigma^2 >0$. Let $Y$ be a standard normal. Then there exists some constant $C>0$ such that, for any $n\geq 1$ and any $u$ in $(0,1)$,
\begin{multline}\label{coro2.5.a}
\tag{$\rm a$} \left| F_{S_n/\sigma_n}^{-1} (u) - \Phi^{-1} (u) \right| \\ \leq C \max \bigl((nu(1-u))^{-1/2}, (nu(1-u))^{-1/3} |\log (u(1-u)) |^{-1/6} \bigr)
\end{multline}
and
\begin{equation}\label{coro2.5.b}
\tag{$\rm b$}\left| Q_{1, S_n/\sigma_n} (u) - Q_{1,Y} (u)\right| \leq C (nu)^{-1/2} \sqrt{1-u}.
\end{equation}
\end{coro}

\begin{enonce}{Comment}
From Corollary~\ref{AppliVaR}(a), for any sequence $(\varepsilon_n)_n$ of reals in $(0,1/2)$
such that $\lim_n \varepsilon_n= 0$ and $\lim_n n\varepsilon_n = \infty$,
\[
\lim_{n\,\rightarrow\,\infty}
\sup_{u\,\in\,\left[\varepsilon_n,1-\varepsilon_n\right]} \left| F_{S_n/\sigma_n}^{-1} (u) - \Phi^{-1} (u) \right| = 0,
\]
which can not be deduced from a Berry--Esseen type bound with the rate $n^{-1/2}$. Indeed,
if $\Delta_n$ is defined as in Corollary~\ref{AppliBE}, one can only get that
\[
\left| F_{S_n/\sigma_n}^{-1} (u) - \Phi^{-1} (u)\right| \leq \Phi^{-1} (\min (1, u + \Delta_n)) - \Phi^{-1} (u)
\]
for $u\geq 1/2$, which is of interest only if $u < 1 - \Delta_n$.
\end{enonce}

If furthermore the sequence $(X_i)_{i \,\in\,\bbZ}$ is a sequence of martingale differences, then the conditions on the dependence coefficients can be weakened as follows (the proof being less intricate is left to the reader).

\begin{theo} \label{thW2M}
Assume that $(X_i)_{i\,\in\,\bbZ}$ is a sequence of martingale differences such that $\Vert X_0 \Vert_{\infty} \leq M$ and $\bbE (X_0^2) = \sigma^2$. Then
\begin{align}
\tag{$\rm a$}\bbE \left(W_2^2 \left(P_{S_n / {\sqrt n} |\clF_0 }, G_{\sigma^2}\right) \right) \ll n^{-1/2}
\left(1 + \sum_{k=1}^ {[\sqrt n]} \theta_{X,1,2} (k) \right) \,. \label{theo2.7.a}
\\
\intertext{If furthermore $ \sum_{k\geq 1} \theta_{X,3,4} (k) < \infty$, then}
\tag{$\rm b$}\bbE \left(W_2^2 \left(P_{S_n / {\sqrt n} |\clF_0 }, G_{\sigma^2}\right) \right) \ll n^{-1} \left(1 + \sum_{k\,\geq\,1} \left(k \wedge \sqrt{n}\right) \theta_{X,3,4} (k) \right)\,. \label{theo2.7.b}
\end{align}
\end{theo}

\begin{enonce}{Comment}\label{comm2.8}
Item~\eqref{theo2.7.a} provides a rate in the CLT as soon as $\theta_{X,1,2} (k) = o (1)$. If $\theta_{X,1,2} (k) = O (k^{-a}) $ with $a$ in $(0,1) $, \eqref{theo2.7.a} ensures that $W_2 (P_{S_n / {\sqrt n} }, G_{\sigma^2}) \ll n^{-a/4} $. If $ \sum_{k\geq 1} \theta_{X,1,2} (k) < \infty$, then the rate is of order $n^{-1/4}$.
Item~\eqref{theo2.7.b} provides faster rates under the condition $ \sum_{k\,\geq\,1} \theta_{X,3,4} (k) < \infty$. Indeed the rate of convergence under this condition is $o (n^{-1/4})$. If $\theta_{X,3,4} (k) = O (k^{-a}) $ with $a$ in $(1,2)$, \eqref{theo2.7.b} ensures that $W_2 (P_{S_n / {\sqrt n} }, G_{\sigma^2}) \ll n^{-a/4} $. Moreover $ W_2 (P_{S_n / {\sqrt n} }, G_{\sigma^2}) \ll n^{-1/2}$ as soon as $ \sum_{k\,\geq\,1} k \theta_{X,3,4} (k) < \infty$.
\end{enonce}

\section{Examples} \label{section3}

%\setcounter{equation}{0}

\subsection{\texorpdfstring{$\alpha$}{alpha}-mixing sequences}\label{alphamixing}
Let $(\Omega, \clA, \bbP)$ be a probability space and let $ \clU$ and $ \clV$ be two $\sigma$-algebras of $ \clA$. The strong mixing coefficient $ \alpha (\clU,\clV) $ between these $\sigma$-algebras is defined as follows:
\[
\alpha (\clU,\clV) =\sup \left\{\left| {\ \bbP} (U\cap V) - {\bbP} (U) { \bbP} (V)\right|: U \in \clU, V\in
\clV \right\} \,.
\]
Next, for a stationary sequence $(Y_i)_{i\,\in\,\bbZ}$ of random variables with values in a Polish space $S$, define its strong mixing (or $\alpha$-mixing) coefficients of order $4$ as follows: Let
\[
\alpha_{\infty, 4}(n) = \sup_{i_4\,> \,i_3\,>\,i_2\,>\,i_1\,\geq\,n} \alpha \left(\clF_{0}, \sigma (Y_{i_1}, Y_{i_2}, Y_{i_3}, Y_{i_4})\right) \,.
\]
where $\clF_0= \sigma(Y_i, i \leq 0)$. As, \cite[p.~146]{MPU}, these coefficients can be rewritten in the following form: Let $B_1$ be the class of measurable functions from $S^4$ to $\bbR $ and bounded by one. Then
\[
\alpha_{\infty, 4}(n) = \frac{1}{4} \sup_{f\,\in\,B_1} \sup_{i_4\,>\,i_3 \,>\,i_2\,>\,i_1\,\geq\,n} \big \Vert \bbE (f(Y_{i_1}, Y_{i_2}, Y_{i_3}, Y_{i_4}) | \clF_0) - \bbE (f(Y_{i_1}, Y_{i_2}, Y_{i_3}, Y_{i_4})) \big \Vert_1 \,.
\]
Hence, an application of Theorem~\ref{thW2}$\MK$\eqref{theo2.1.b} provides the following result.

\begin{coro}\label{coralphamixing}
Let $(Y_k)_{k\,\in\,\bbZ}$ be a stationary sequence of random variables with values in a Polish space and such that $\sum_{k\,\geq\,1} k^2 \alpha_{\infty, 4} (k) < \infty$. Let $f $ be a bounded measurable numerical function and $X_k= f(Y_k) - \bbE (f(Y_k))$. Set $S_n = \sum_{k=1}^n X_k$. Then $ W_2 (P_{S_n / {\sqrt n} }, G_{\sigma^2}) \ll n^{-1/2}$.
\end{coro}

As mentioned in the introduction, this results applies to the class of irreducible aperiodic and positively recurrent Markov chains $(\xi_n)$ with an atom denoted by $A$, under the condition $\bbE _A (\tau_A^4) < \infty$. Here $\tau_A$ is the first return time in $A$ and $\bbE _A$ stands for the expectation under $\bbP _x$ for $x \in A$.

\subsection{\texorpdfstring{$\alpha$}{alpha}-dependent sequences and \texorpdfstring{$\tau$}{tau}-mixing sequences}\label{taualpha}

We start by recalling the definition of the $\alpha$-dependence coefficients as considered in~\cite{DGM10}.
\begin{defi}\label{defalpha}
For any random variable $Y=(Y_1, \cdots, Y_k)$ with values in $\bbR^k$ and any $\sigma$-algebra $\clF$, let
\[
\alpha(\clF, Y)= \sup_{(x_1,\, \ldots,\,x_k)\,\in\,\bbR^k}
\left \| \bbE \left(\prod_{j=1}^k \left({ 1\hspace{-1,2mm}\rmI}_{Y_j\, \leq\,x_j}\right)^{(0)} \,\middle|\,\clF \right)^{(0)} \right\|_1.
\]
For the sequence ${\bf Y}=(Y_i)_{i\,\in\,\bbZ}$, let
\begin{equation*}
\alpha_{k, {\bf Y}}(0) =1 \text{ and }\alpha_{k, {\bf Y}}(n) = \max_{1\,\leq\,l\,\leq\,k} \ \sup_{n\,\leq\, i_1\,\leq\,\ldots\,\leq\,i_l} \alpha(\clF_0, (Y_{i_1}, \ldots, Y_{i_l})) \text{ for }n>0\,,
\end{equation*}
where $\clF_0 = \sigma (Y_i, i \leq 0)$.
\end{defi}
Theorem~\ref{thW2}$\MK$\eqref{theo2.1.b} together with~\cite[equality~(A.4)]{DR08} (with $\bbE (\cdot | \clF_0)$ instead of $\bbE$) provide the following result.

\begin{coro}\label{coralpha}
Let $f $ be a bounded variation (BV) function and $X_k= f(Y_k) - \bbE (f(Y_k))$ where $(Y_k)_{k\,\in\,\bbZ}$ is a stationary sequence of real-valued random variables. Let $S_n = \sum_{k=1}^n X_k$. If $\sum_{k\,\geq\,1} k^2 \alpha_{4, {\bf Y}} (k) < \infty$, then $ W_2 (P_{S_n / {\sqrt n} }, G_{\sigma^2}) \ll n^{-1/2}$.
\end{coro}

From this result, we can derive rates in the CLT for the partial sums associated with BV observables of the LSV map. More precisely, for $\gamma \in]0,1[$, let $T_\gamma$ defined from $[0,1]$ to $[0,1]$ by
\begin{equation*}
T_\gamma(x)=
\begin{cases}
x(1+ 2^{\gamma}x^{\gamma}) \quad \text{ if }x \in [0, 1/2[ \\
2x-1 \quad \quad \quad \ \ \text{if }x \in [1/2, 1] \,.
\end{cases}
\end{equation*}
This is the so-called LSV~\cite{LSV} map with parameter $\gamma$. Recall, that there exists a unique $T_\gamma$-invariant measure $\nu_\gamma$ on $[0, 1]$, which is absolutely continuous with respect to the Lebesgue measure with positive density denoted by $h_\gamma$. From Corollary~\ref{coralpha} above and~\cite[Prop.~1.17]{DGM10}, we derive that $ W_2 (P_{S_n / {\sqrt n} }, G_{\sigma^2}) \ll n^{-1/2}$ for any $\gamma < 1/4$, where $f$ is a bounded variation function and $S_n = \sum_{k=1}^n (f(T^k_{\gamma}) -\nu_\gamma (f))$.


We now apply Theorem~\ref{thW2} to functions of $\tau$-dependent sequences. Before stating the result, some definitions are needed.

\begin{defi}
Let $\eta \in]0, 1]$, $\ell$ be a positive integer and let $\Lambda_\eta(\bbR^\ell)$ be the set of functions $f$ from $\bbR^\ell$ to $\bbR$ such that for $x = (x_1,\,\dots,\,x_\ell)$ and $y = (y_1,\,\dots,\,y_\ell)$,
\[
\left|f(x)-f(y)\right|\leq \frac{1}{\ell} \sum_{i=1}^{\ell} |x_i-y_i|^{\eta} \,.
\]
Define the dependence coefficients $(\tau_{\eta, \ell,{\bf Y}} (k))_{k\, \geq\,1}$ of the sequence $(Y_i)_{i\,\in\,\bbZ}$ by
\begin{multline*}
\tau_{\eta,\ell,{\bf Y}} (k) =\\
\max_{1 \,\leq\,j\,\leq\,\ell}\; \sup_{i_j\,>\,\ldots\,>\,i_1\,\geq\,k} \left \| \sup_{f\,\in\,\Lambda_\eta(\bbR^j)}\Big |\bbE \left(f(Y_{i_1},\,\dots,\,Y_{i_j}) \,\middle|\,\clF_0\right)-\bbE\left(f(Y_{i_1}, \,\dots,\,Y_{i_j})\right)\Big | \right\|_1 \,.
\end{multline*}
\end{defi}

Examples of $\tau_\eta$-dependent sequences are given in~\cite{DP05}.

%\medskip

Let $(Y_k)_{k\,\in\,\bbZ}$ be a stationary sequence of real-valued random variables and $f$ be a bounded and $\eta$-H\"older function, with $\eta \in]0,1]$. Define $X_k = f(Y_k) - \bbE (f(Y_k))$. Then, for any positive integers $p,q$ and $k$, $\theta_{X, p,q } (k) \leq C \tau_{\eta, p,{\bf Y}} (k) $ where $C$ is a positive constant depending only on $p$, $q$ and $\Vert f \Vert_{\infty}$. Hence the following result holds.

\begin{coro}\label{cortaufirst}
Let $f$ be a bounded and $\eta$-H\"older function with $\eta \in]0,1]$ and $X_k= f(Y_k) - \bbE (f(Y_k))$ where $(Y_k)_{k\,\in\,\bbZ}$ is a stationary sequence of real-valued random variables. Let $S_n = \sum_{k=1}^n X_k$. If $\sum_{k\,\geq\,1} k^2 \tau_{\eta, 4,{\bf Y}} (k) < \infty$, then $ W_2 (P_{S_n / {\sqrt n} }, G_{\sigma^2}) \ll n^{-1/2}$.
\end{coro}

From this result, we can derive rates in the CLT for the partial sums associated with H\"older functions of the LSV map above. Starting from Corollary~\ref{cortaufirst} and taking into account~\cite[Prop.~5.3 and Inequality~(4.2)]{DM15}, we derive that if $ \gamma <1/4$, then $ W_2 (P_{S_n / {\sqrt n} }, G_{\sigma^2}) \ll n^{-1/2}$, where $S_n = \sum_{k=1}^n (f(T^k_{\gamma}) -\nu_\gamma (f)) $ and $f$ is an $\eta$-H\"older observable with $\eta \in]0,1]$.

%\medskip

We now define another class of functions which are well adapted to $\tau$-dependence.



\begin{defi}
Let $c$ be any concave function from $\bbR^+$ to $\bbR^+$, with $c(0)=0$. Let $\clL_c$ be the set of functions $g$ from $\bbR$ to $\bbR$ such that
\[
|g(x)-g(y)| \leq K c(|x-y|), \quad \text{for some positive $K$.}
\]
\end{defi}


Let $g \in \clL_c$ and $X_k= g(Y_k) - \bbE (g(Y_k))$ where $(Y_k)_{k\,\in\,\bbZ}$ be a stationary sequence of bounded real-valued random variables. Then, for any positive integers $\ell$ and $k$, $\tau_{1,\ell,{\bf X}} (k) \leq K c (\tau_{1,\ell,{\bf Y}} (k)) $. As a consequence of Corollary~\ref{cortaufirst}, the following result holds:



\begin{coro}\label{cortau}
Let $g \in \clL_c$ and $X_k= g(Y_k) - \bbE (g(Y_k))$ where $(Y_k)_{k\,\in\,\bbZ}$ is a stationary sequence of bounded real-valued random variables such that $ \tau_{1,4,{\bf Y}} (k)) = O (\rho^k)$ for some $\rho$ in $]0,1[$. Let $S_n = \sum_{i=1}^n X_i$. If
\[
\int_0^1 \frac{ (\log t)^2}{t} c(t) dt < \infty \,,
\]
then $W_2 (P_{S_n / {\sqrt n} }, G_{\sigma^2}) \ll n^{-1/2}$.
\end{coro}

Corollary~\ref{cortau} applies in particular to $X_k= g (T^k) - \nu(g) $ where $T$ is a map from $[0,1]$ to $[0,1]$ that can be modelled by a Young tower with exponential tails of the return times and $\nu$ is the usual invariant measure (see~\cite[Section~4]{DM15} adapted to the case of exponential tails of the return times).

\subsection{Symmetric random walk on the circle} \label{sectcircle}

Let $K$ be the Markov kernel defined by $Kf (x) =(f (x+a) + f (x-a))/2$ on the torus $\bbR/\bbZ$, with $a$ irrational in $[0,1]$. The Lebesgue--Haar measure $m$ is the unique probability which is invariant by $K$. Let $(\xi_i)_{i\,\in\,\bbZ}$ be the stationary Markov chain with transition kernel $K$ and invariant distribution $m$. For $f \in \bbL^2(m)$, let
\begin{equation}\label{defSnf}
X_k=f(\xi_k)-m(f) \,.
\end{equation}
This example has been considered by Derriennic and Lin~\cite{DL01} who showed that the central limit theorem holds with the normalization $\sqrt{n}$ as soon as
\begin{equation}\label{Paroux}
\sum_{k\,\in\,{\bbZ}^*} \frac{|\hat f (k)|^2}{d (ka, {\bbZ})^2} < \infty
\,,
\end{equation}
where $\hat f(k)$ are the Fourier coefficients of $f$ and $d (ka, \bbZ)=\min_{i\, \in\,\bbZ} |ka-i|$. The aim in this section is to give additional conditions on $f$ and on the properties of the irrational number $a$ ensuring rates of convergence in the CLT. Let us then introduce the following definition: $a$ is said to be \emph{badly approximable in the weak sense} by rationals if for any positive $\varepsilon$,
\begin{multline}\label{badlyweak}
\text{ the inequality } d(ka, \bbZ) < |k|^{-1-\varepsilon} \\
\text{has only finitely many
solutions for }k \in \bbZ^*.
\end{multline}
From Roth's theorem the algebraic numbers are badly approximable in the weak sense (cf. Schmidt~\cite{Sc80}). Note also that the set of badly approximable numbers in $[0,1]$ has Lebesgue measure $1$.


An application of Theorem~\ref{thW2} together with~\cite[Lemma~5.2]{DR08} and their inequality (5.18) give the following corollary.



\begin{coro}\label{circle}
Let $X_k$ be defined by~\eqref{defSnf}. Suppose that the irrational number $a$ satisfies~\eqref{badlyweak}. Assume that for some positive $\varepsilon$,
\begin{equation*}\label{condirra}
\sup_{k\not= 0} |k|^{ 6 +\varepsilon} \left|\hat f (k)\right| < \infty \,.
\end{equation*}
Then $ W_2 (P_{S_n / {\sqrt n} }, G_{\sigma^2}) \ll n^{-1/2}$.
\end{coro}

\section{Proofs} \label{section4}

%\setcounter{equation}{0}

\subsection{Proof of Theorem~\ref{thW2}} \label{subproofTh}
It is based on the Lindeberg method, which naturally extends to the dependent case. Let us start by giving an overview of the proof in a simplified framework.

\subsubsection{Outline of the proof in a simplified framework}


Assume in this subsection that $(X_k)_{k\,\in\,\bbZ}$ is a strictly stationary sequence of random variables such that $\bbE (X_k | \clF_{k-1}) =0$, $\bbE (X^2_k | \clF_{k-1}) =\sigma^2$, $\bbE (X^3_k | \clF_{k-1}) =0$, $\bbE(X^4_k | \clF_{k-1}) =\beta_4$ and $\bbE (|X_k|^5) \leq M$. In this context let us show that $W_2 (P_{S_n}, G_{n \sigma^2})\linebreak = O (1)$.

Let $(Y_k)_{k\,\in\,\bbZ}$ be a sequence of iid random variables with $\clN (0, \sigma^2)$ distribution, independent of $(X_k)_{k\,\in\,\bbZ}$. Let also $Z$ be a random variable with $\clN (0,\sigma^2)$ distribution, independent of $(X_k,Y_k)_{k\,\in\,\bbZ}$. Let $T_n=Y_1 + \cdots +Y_n$. We first note that, by the triangle inequality,
\[
W_2  \left(P_{S_n}, G_{n \sigma^2}\right)\leq W_2 \left(P_{S_n+Z}, P_{T_n+Z}\right) + 2 \sigma \,.
\]
It remains to prove that $W_2 (P_{S_n+Z}, P_{T_n+Z})= O (1)$.

By~\cite[Theorem~3.1]{Rio09}, if $\mu$ and $\nu$ are two probability laws on the real line,
\[
W^2_2 (\mu, \nu) \leq 4 \zeta_2 (\mu, \nu) \text{ where } \zeta_2 (\mu, \nu) = \sup \left\{ \int f d \mu - \int f d \nu \, : \, f \in \clC^2, \Vert f'' \Vert_{\infty} \leq 1\right\}.
\]
To control $ \zeta_2 (P_{S_n+Z}, P_{T_n+Z}) $ we apply the Lindeberg method. Let $T_{n,k} = \sum_{i=k+1}^n Y_i$, and let $f$ in $ \clC^2$ such that $ \Vert f'' \Vert_{\infty} \leq 1$. We have
\begin{align*}
\Delta & = \bbE \big(f (X_1+\cdots + X_n + Z)\big) - \bbE \big(f (Y_1+\cdots + Y_n + Z)\big) \\
& = \sum_{k=1}^n \Big(\bbE \big(f (S_{k-1} + X_k + T_{n,k} + Z)\big) - \bbE \big(f (S_{k-1} + Y_k + T_{n,k} + Z)\big) \Big) \,.
\end{align*}
Hence, by independence between sequences,
\[
\Delta := \sum_{k=1}^n \underbrace{ \Big (\bbE \big(f _{n-k}(S_{k-1} + X_k)\big) - \bbE \big(f _{n-k} (S_{k-1} + Y_k)\big) \Big)}_{ \Delta_{n,k} }
\]
where $f_{n-k} (x) =\bbE (f (x+ T_{n,k} + Z))$. The functions $f_{n-k}$ are $ \clC^{\infty}$ and satisfy $
\Vert f^{(\ell)}_{n-k} \Vert_{\infty} \leq c_{\ell} (n-k+1)^{-(\ell -2) /2 } $ for $\ell \geq 2$ (see Item~\eqref{lemm4.4.1} of the next Lemma~\ref{lmacrucial}). By Taylor's expansion at order $5$,
\[
\Delta_{n,k} = \sum_{\ell =1}^4 \frac{1 }{\ell !} \bbE \left (f^{(\ell)}_{n-k}(S_{k-1}) \left(X_k^{\ell} - Y_k^{\ell}\right) \right) +R_{n,k} \,,
\]
where the remainder term $R_{n,k}$ satisfies $|R_{n,k}| \leq \Vert f^{(5)}_{n-k} \Vert_{\infty} (M + \bbE (|Y_k|^5))/5! $. Since $\Vert f^{(5)}_{n-k} \Vert_{\infty} \leq c_{5} (n-k+1)^{-3 /2 } $, we get that $\sum_{k=1}^n |R_{n,k}| = O (1) $. On another hand, for any positive integer $\ell \leq 3$, we deduce from the assumptions on the conditional moments that $\bbE  (f^{(\ell)}_{n-k}(S_{k-1}) (X_k^{\ell} - Y_k^{\ell}) ) =0$. Recall that $\bbE(X^4_k | \clF_{k-1}) =\beta_4$ and that $ \bbE (Y_k^{4}) = 3 \sigma^4$. Hence
\[
I^{(4)}_{n,k} := \bbE \left(f^{(4)}_{n-k}(S_{k-1}) \left(X_k^{4} - Y_k^{4}\right) \right) = \left(\beta_4- 3 \sigma^4\right) \bbE \left(f^{(4)}_{n-k}(S_{k-1}) \right) \,.
\]
Clearly if we use the bound $| \bbE (f^{(4)}_{n-k}(S_{k-1})) | \leq \Vert f^{(4)}_{n-k} \Vert_{\infty} \leq c_4 (n-k+1)^{-1}$, we will get a bound of order $O (\log n)$ for $ \zeta_2 (P_{S_n+Z}, P_{T_n+Z}) $. To get the bound $O(1)$, an additional trick is needed. Actually this additional trick is the content of Item~\eqref{lemm4.4.2} of the next Lemma~\ref{lmacrucial}. Indeed by the assumptions on the conditional moments, we get that $\theta_{X,3,4} (k) =0$ for $k \geq 1$. Therefore Item~\eqref{lemm4.4.2} of Lemma~\ref{lmacrucial} entails that there exists a constant $c>0$ such that
\[
\left| \bbE \left(f_{n-k}^{(4) } ({ S}_{k-1})\right) \right | \leq c (n-k+1)^{-3/2} + c n^{ -1} \,.
\]
So, overall, $\sum_{k=1}^n |I^{(4)}_{n,k}| = O(1)$. This ends the proof of the theorem in this simplified framework of constant conditional moments up to order $4$, with $\bbE (X^3_k | \clF_{k-1}) =0$.

Clearly in the more general framework of Theorem~\ref{thW2} much work remains to be done. Indeed $(X_k)_{k \,\in\,\bbZ} $ does not necessarily form a sequence of martingale differences and we do not assume that $\bbE (X_k^3) =0$. To solve the latter problem, we will introduce another sequence of random variables which, in the context of independent random variables, have the same first three moments as the initial random variables.

\subsubsection{Detailed proof in the general setting of Theorem~\ref{thW2}}



Assume first that $\sigma^2 =0$. In this case $G_{\sigma^2} = \delta_0$ and
\[
\bbE \left(W_2^2 \left(P_{S_n / {\sqrt n} |\clF_0 }, \delta_0\right) \right) = n^{-1} \bbE\left(S_n^2\right) -\sigma^2 = - 2 n^{-1} \sum_{k\,\geq\,1} (k \wedge n) \Cov (X_0,X_k),
\]
which, combined with~\eqref{UpperBoundCov1}, shows that the upper bounds~\eqref{theo2.1.a} and~\eqref{theo2.1.b} hold.


%\smallskip
We turn now to the case $\sigma^2 >0$. Let $\delta$ be a random variable with uniform distribution over $[0,1]$ independent of $(X_k)_{k\,\in\,\bbZ}$. Define $\clG_\ell = \sigma ((X_i)_{i\,\leq\,\ell}, \delta) $ and $\clG_\infty = \sigma ((X_{i })_{i\,\in\,\bbZ}, \delta) $. Define also the conditional expectation operator $\bbE_0$ by $\bbE_0 (\cdot)= \bbE (\cdot | {\clG_0 }) $.

%\smallskip

In what follows $(Y_k)_{k\,\geq 1}$ will be a sequence of iid random variables independent of $\clG_\infty$. In case of Item~~\eqref{theo2.1.a}, their common law will be the normal law $ \clN(0, \sigma^2)$ whereas in case of Item~\eqref{theo2.1.b}, we will have to prescribe also their third moment as it is described below.

Let $\beta_3$ be a fixed real number. Let $Z$ be a r.v. with distribution $ \clN(0, \sigma^2/2)$. There exists a random variable $B$ independent of $Z$, taking only 2 values and such that $Y= Z+B$ satisfies
\begin{equation}\label{momentY}
\bbE(Y)=0, \; \bbE(Y^2)= \sigma^2\quad \text{and}\quad \bbE(Y^3)=\beta_3 \,.
\end{equation}
We refer to~\cite[Lemma~4.1]{DR08} for more details. For the proof of Item~~\eqref{theo2.1.b},
\begin{equation}\label{defbeta3}
\beta_3= \bbE (X_0^3) + 3 \sum_{i\,\geq\,1 } \left\{ \bbE \left(X_0^2 X_i\right) + \bbE \left(X_0 X^2_i\right) \right\} + 6 \sum_{u\,\geq\,1} \sum_{v\,\geq\,u+1} \bbE \left(X_0 X_uX_{v}\right) \,,
\end{equation}
which is the limit of $n^{-1}\bbE (S_n^3) $, as $n \rightarrow \infty$, under the conditions of Theorem~\ref{thW2}$\MK$\eqref{theo2.1.b}.

Let $(Z_k)_{k\,\geq\,1}$ be a sequence of independent r.v.'s distributed as $Z$ and let $(B_k)_{k\,\geq\,1}$ be a sequence of independent r.v.'s distributed as $B$ and independent of $(Z_k)_{k\,\geq\,1}$. Suppose furthermore that the sequence $(Z_k,B_k)_{k\,\geq\,1}$ is independent of $\clG_\infty$. For any $k \geq 1$, set $Y_k = Z_k+B_k$.

Next, in case of both items, we define $T_n = Y_1 + Y_2 + \cdots + Y_n$. Note that
\[
W_2 \left(P_{S_n / {\sqrt n}|\clF_0 }, G_{\sigma^2}\right) \leq W_2 \left(P_{S_n / {\sqrt n}|\clF_0 }, P_{T_n / {\sqrt n}}\right) + W_2 \left(P_{T_n / {\sqrt n}}, G_{\sigma^2}\right) \,.
\]
According to~\cite[Theorem~4.1]{Rio09}, since $Y \in \bbL^4$, $W_2 (P_{T_n / {\sqrt n}}, G_{\sigma^2}) \ll n^{-1/2}$. Since $P_{S_n / {\sqrt n}|\clF_0} = P_{S_n / {\sqrt n}|\clG_0}$, the theorem will follow if one can prove that the upper bounds (a) and (b) still hold with $P_{S_n / {\sqrt n}|\clG_0 }$ replacing $P_{S_n / {\sqrt n}|\clF_0 }$. With this aim, we shall apply~\cite[Lemma~A.1]{MR12}. We start by introducing some notations. Let $W= ((X_i)_{i\,\in\,{\bbZ}^-}, \delta)$ and $E = {\bbR}^{{\bbZ}^-} \times [0,1]$. Let $\Lambda_2$ be the class of real functions $f$ which are continuously differentiable and such that $|f'(x) - f'(y) |\leq | x - y |$ for any $(x,y) \in \bbR \times \bbR$. Let also $\Lambda_2 (E)$ be the set of measurable functions $f:\bbR
\times E \rightarrow \bbR$ wrt the $\sigma$-fields $\clL(\bbR \times E) $ and $\clB (\bbR)$, such that $f(\cdot, w) \in \Lambda_2$ and $f(0,w)=f'(0,w)=0$ for any $w \in E$. According to~\cite[Lemma~A.1]{MR12}, and denoting by $N$ a $\clN(0,\sigma^2)$-distributed random variable, independent of all the above sequences (so independent of $(X_k,Y_k)_k$), the upper bound~\eqref{theo2.1.a} will follow if one can prove that
\begin{multline}\label{W2condp2-Itema}
\sup_{f\,\in\,\Lambda_2 (E)} \bbE \big(f (S_n + N, W) - f (T_n + N, W) \big)\\
\ll \sqrt{n} \left(1 + \sum_{k\,\geq\,1} \left(k \wedge \sqrt{n}\right) \theta_{X,2,2} (k) \right) \,,
\end{multline}
whereas the upper bound~\eqref{theo2.1.b} will follow if
\begin{equation}\label{W2condp2}
\sup_{f\,\in\,\Lambda_2 (E) } \bbE \big (f (S_n + N, W) - f (T_n + N, W) \big) \ll 1 + \sum_{k\,\geq\,1} k \left(k \wedge \sqrt{n}\right) \theta_{X,4,4} (k) \,.
\end{equation}
In what follows, to shorten the notations, we omit the subscripts for the coefficients $\theta (k)$.

%\smallskip


\begin{proof}[{Proof of Theorem~\ref{thW2}$\MK$\eqref{theo2.1.a}}] 
We shall apply the Lindeberg method. Let us first introduce some notations.
\begin{Notation}\label{not21-a}
Set $f_{n-k} (x) = \bbE_0 (f (x + N + T_n - T_k, W))$.
\end{Notation} Notice that
\[
f_{n-k} (x) = \int_\bbR f (x - t, W) \varphi_{\sigma^2 (n-k+1)} (t) dt \,,
\]
where $\varphi_{t^2}$ is the density of a $\clN(0,t^2)$. Hence, according to~\cite[Lemma~6.1]{DMR09},
\begin{equation}\label{boundderiv}
\left\Vert f_{n-k}^{(i) }\right\Vert_{\infty} := b_i \ll (n-k+1)^{(2-i)/2} \,.
\end{equation}
Since the sequence $(N,(Y_i)_{i\,\geq\,1})$ is independent of the sequence $((X_i)_{i\,\in\,\bbZ},W)$,
\begin{equation}\label{sumdeltaalter}
\bbE \big(f(S_n + N, W) -f (T_n + N,W)\big) = \sum_{k=1}^n \bbE \big(f_{n-k} (S_{k-1} +X_k) - f_{n-k} (S_{k-1} +Y_k)\big) \,.
\end{equation}
By the Taylor formula at order 3 and using~\eqref{boundderiv}, we get
\begin{equation}\label{sumdelta-1}
\left| \bbE \left(f_{n-k} (S_{k-1} + Y_k) -f_{n-k} (S_{k-1}) - \frac{\sigma^2}{2} f''_{n-k} (S_{k-1}) \right) \right | \leq C (n-k+1)^{-1/2} \,.
\end{equation}
Similarly
\begin{multline}\label{sumdelta-2}
\left | \bbE \left(f_{n-k}(S_k) -f_{n-k} (S_{k-1}) - f_{n-k}^\prime (S_{k-1}) X_k - \frac{1}{2}f_{n-k}'' (S_{k-1}) X_k^2\right) \right| \\
\leq C (n-k+1)^{-1/2} \,.
\end{multline}
Now we control the second order term. Let
\begin{equation}\label{defgamma}
\Gamma_{k}(k,i) = f_{n-k}'' (S_{k-i}) -f_{n-k}'' (S_{k-i-1}) \,.
\end{equation}
Clearly
\[
f_{n-k}'' (S_{k-1}) X_k^2 = \sum_{i=1}^{[\sqrt{k}]-1}
\Gamma_{k}(k,i) X_k^2 +f_{n-k}'' \left(S_{k-[\sqrt k]}\right)X_k^2 \,.
\]
Since $|\Gamma_{k}(k,i) | \leq b_3 |X_{k-i}|$, by stationarity we get that for any $i \leq k-1$,
\[
\left|\covv\left(\Gamma_{k}(k,i), X^2_k\right) \right| \leq b_3 \left\Vert X_0 \left(\bbE_0 (X^2_i)- \bbE (X^2_i) \right) \right\Vert_1 \ll (n-k+1)^{-1/2} \theta(i)\,.
\]
Since $\Vert f_{n-k}'' \Vert_{\infty} \leq b_2 $ a.s., we also get by stationarity that
\[
\left|\covv \left(f_{n-k}'' \left(S_{k-[\sqrt k]}\right), X^2_k\right) \right| \leq b_2 \Vert \bbE_0 \left(X_{[\sqrt k]}^2\right)- \bbE \left(X_{[\sqrt k]}^2\right)\Vert_1 \ll \theta\left([\sqrt k]\right) \,.
\]
Starting from~\eqref{sumdelta-2}, it follows that
\begin{multline}\label{dt5}
\left| \bbE \big(f_{n-k}(S_k) -f_{n-k} (S_{k-1}) - f_{n-k}^\prime (S_{k-1}) X_k\big) - \frac{1}{2} \bbE \big(f_{n-k}'' (S_{k-1})\big) \bbE (X_k^2) \right | \\ \ll \theta\left([\sqrt k]\right) + (n-k+1)^{-1/2} \left(1+ \sum_{i=1}^{[\sqrt{k}]}\theta(i) \right)
\,.
\end{multline}
Starting from~\eqref{sumdeltaalter} and taking into account~\eqref{sumdelta-1} and~\eqref{dt5} we derive that
\begin{multline}\label{sumdelta-3}
\big | \bbE (f(S_n + Y, W) -f (T_n + Y,W)) \big |
\ll \sqrt{n} \left(1 + \sum_{i=1}^{[\sqrt n]} \theta (i) \right) \\
 + \left | \sum_{k=1}^n \left\{ \bbE \left(f'_{n-k} (S_{k-1}) X_k\right) - \bbE \left(f_{n-k}'' (S_{k-1})\right) \sum_{j\,\geq\,1} \bbE(X_0 X_j) \right\} \right |
\,.
\end{multline}
To give now an estimate of $\bbE (f'_{n-k} (S_{k-1})X_k)$, we write
\[
f'_{n-k} (S_{k-1}) = f'_{n-k} (0) + \sum_{i=1}^{k-1} \big(f'_{n-k}(S_{k-i}) - f'_{n-k}(S_{k-i-1})\big) \,.
\]
Hence
\begin{multline}\label{dt6}
\bbE \big(f'_{n-k} (S_{k-1})X_k\big) = \\
\sum_{i=1}^{k-1} \Cov \big (f'_{n-k}(S_{k-i}) - f'_{n-k}(S_{k-i-1}), X_k \big) + \bbE \left(f'_{n-k} (0) X_k\right) \,.
\end{multline}
Now $f'_{n-k} (0) $ is a $\clG_0$-measurable random variable. Since $f \in \Lambda_2(E) $ then $f'(0,w)=0$ and $f' (\cdot, w) $ is $1$-Lipschitz. Therefore
\[
\left|f'_{n-k} (0)\right| \leq \int_\bbR \left| f'(u,W) -f'(0,W)\right| \varphi_{\sigma^2 (n-k+1) } (-u) du \leq \sigma \sqrt{ n-k+1 } \, \text{ a.s.}
\]
It follows that
\begin{equation}\label{dt7}
\sum_{k=1}^n \left| \bbE \left(f'_{n-k} (0) X_k\right) \right| \ll \sum_{k=1}^n \sqrt{ n-k+1} \Vert \bbE_0 (X_k) \Vert_1 \ll \sqrt{n} \sum_{k=1}^n \theta(k) \,.
\end{equation}
We give now an estimate of $\sum_{i=1}^{k-1} \Cov (f'_{n-k}(S_{k-i}) - f'_{n-k}(S_{k-i-1}), X_k )$. Using the stationarity and noting that $|f_{n-k}^\prime (S_{k-i}) -f_{n-k}^\prime (S_{k-i-1})| \leq b_2|X_{k-i}|$, we have
\[
\left|\covv \left(f_{n-k}^\prime (S_{k-i}) -f_{n-k}^\prime (S_{k-i-1}), X_k\right)\right| \leq b_2 M \Vert \bbE_0 (X_i) \Vert_1 \ll \theta(i) \,.
\]
Hence
\begin{multline}\label{dt8}
\sum_{k=1}^n\;\sum_{i=[\sqrt k]}^k \left|\covv \left(f_{n-k}^\prime (S_{k-i}) -f_{n-k}^\prime (S_{k-i-1}), X_k\right)\right|\\
 \ll \sum_{i =1}^n \left(i \wedge \sqrt{n}\right)^2\theta(i)
\ll \sqrt{n} \sum_{i\,\geq\,1} \left(i \wedge \sqrt{n}\right)\theta(i) \,.
\end{multline}
From now on, we assume that $i< [{\sqrt k}]$. We first write
\[
f_{n-k}^\prime (S_{k-i}) -f_{n-k}^\prime (S_{k-i-1}) =f_{n-k}'' (S_{k-i-1}) X_{k-i} + R_{k,i} \,,
\]
where $R_{k,i}$ is $\clF_{k-i}$-measurable and $|R_{k,i}| \leq b_3 X_{k-i}^2 /2$. Hence, by stationarity,
\[
|\covv (R_{k,i}, X_k) | \leq b_3 \left\Vert X^2_0 \bbE_0 (X_i) \right\Vert_1 /2 \ll (n-k+1)^{-1/2} \theta(i) \,,
\]
implying that
\begin{equation}\label{dt10}
\sum_{k=1}^n \sum_{i=1}^{[\sqrt k]}|\covv (R_{k,i}, X_k) | \ll \sqrt{n} \sum_{i = 1}^{[\sqrt n]} \theta(i) \,.
\end{equation}
In order to estimate the term $\bbE (f''_{n-k} (S_{k-i-1})X_{k-i} X_k)$, we introduce the decomposition below:
\[
f''_{n-k} (S_{k-i-1}) = \sum_{\ell=1}^{i-1} \left(f''_{n-k} (S_{k-i-\ell}) - f''_{n-k} (S_{k-i-\ell-1})\right) + f''_{n-k} (S_{k-2i}) \,,
\]
where by convention we set $S_p = 0$ if $p \leq 0$. For any $\ell \in \{ 1, \cdots, i-1 \}$, by using the notation~\eqref{defgamma} and the stationarity, we get that
\[
\big|\covv \big(\Gamma_{k}(k,\ell+i) X_{k-i}, X_k\big)\big| \leq b_3 \Vert X_{- \ell} X_0 \bbE_0 (X_i) \Vert_1 \ll (n-k+1)^{-1/2} \theta(i) \,.
\]
Hence
\begin{equation}\label{dt11}
\sum_{k=1}^n \sum_{i=1}^{[\sqrt k]} \sum_{\ell =1}^{i-1} \covv \left.\big(\Gamma_{k}(k,\ell+i) X_{k-i}, X_k\big)\right| \ll \sqrt n\sum_{i=1}^{[\sqrt n]} i \theta(i) \,.
\end{equation}
As a second step, we bound up $|\covv (f_{n-k}'' (S_{k-2i}), X_{k-i} X_k)|$. Clearly,
\[
f_{n-k}'' (S_{k-2i}) = \sum_{\ell =i}^{k-i -1} \Gamma_{k}(k, \ell+i) + f_{n-k}'' (0) \,.
\]
Now for any $\ell \in \{ i, \cdots, (k-i-1) \}$, by stationarity,
\begin{align*}
\left|\covv \big(\Gamma_{k}(k, \ell+i), X_{k-i} X_k\big)\right| &\leq b_3 \left\Vert X_{- \ell} \big (\bbE_{- \ell} (X_0 X_i) - \bbE (X_0 X_i) \big) \right\Vert_1\\
&\ll (n-k+1)^{-1/2} \theta(\ell) \,.
\end{align*}
Hence
\begin{equation}\label{dt12}
\sum_{k=1}^n \sum_{i=1}^{[\sqrt k]} \sum_{\ell=i}^{k-i -1} \big|\covv \big(\Gamma_{k}(k, \ell +i), X_{k-i} X_k\big)\big| \ll \sqrt n \sum_{\ell=1}^n (\ell \wedge \sqrt{n})\theta(\ell) \,.
\end{equation}
Next, note that
\[
\big|\covv \left(f_{n-k}'' (0), X_{k-i} X_k\right)\big| \ll b_2 \min (\theta (k-i), \theta(i)) \ll \theta([k/2]) \,,
\]
implying that
\begin{equation}\label{dt13second}
\sum_{k=1}^n \sum_{i=1}^{[\sqrt k]} \left|\covv \left(f''_{n-k}(0), X_{k-i} X_k\right)\right| \ll \sum_{k=1}^n \sum_{i=1}^{[\sqrt k]} \theta([k/2]) \ll \sqrt n \sum_{k=1}^n\theta(k) \,.
\end{equation}
Taking into account the inequalities~\eqref{dt7}-\eqref{dt13second}, and using that $\sum_{k\,\geq\,1}\theta(k) < \infty$, we get
\begin{multline}\label{dt14}
\sum_{k=1}^n \left| \bbE \left(f^\prime_{n-k} (S_{k-1}) X_k\right) - \sum_{i=1}^{ [\sqrt k] } \bbE \left(f_{n-k}'' (S_{k-2i})\right) \bbE (X_{k-i} X_k) \right |\\ \ll \sqrt n \left(1 + \sum_{\ell\,\geq\,1} \left(\ell \wedge \sqrt{n}\right)\theta(\ell) \right) \,.
\end{multline}
We handle now the quantity
\[
A_k:= \sum_{i=1}^{ [\sqrt k] } \bbE \left(f''_{n-k}(S_{k-2i})\right) \bbE (X_{k-i} X_k) - \sum_{i=1}^{\infty} \bbE \left(f''_{n-k} (S_{k-1})\right) \bbE (X_{k-i}X_k) \,.
\]
We first note that by stationarity,
\[
\sum_{i\,\geq\,[\sqrt k] +1}\left| \bbE \left(f''_{n-k}(S_{k-1})\right) \bbE (X_{k-i}X_k) \right|
\leq b_2 \sum_{i\,\geq\,[\sqrt k] +1} | \bbE(X_0 \bbE_0(X_i)) | \ll\sum_{i\,\geq\,[\sqrt k] +1} \theta(i) \,.
\]
Hence
\begin{multline}\label{dt14-2}
\sum_{k=1}^n \sum_{i\,\geq\,[\sqrt k] +1}\left| \bbE \left(f''_{n-k}(S_{k-1})\right) \bbE (X_{k-i}X_k) \right|\\ 
\ll \sum_{i\,\geq\,1} \left(i \wedge \sqrt n\right)^2 \theta(i) \ll \sqrt n \sum_{i\,\geq\,1} (i \wedge \sqrt n) \theta(i) \,.
\end{multline}
On another hand, we write
\[
\bbE \left(f''_{n-k}(S_{k-1}) -f_{n-k}'' (S_{k-2i})\right) \bbE (X_{k-i} X_k) = \sum_{\ell =1}^{2i-1} \bbE (\Gamma_{k}(k,\ell) \bbE (X_0 \bbE_0(X_i))\,.
\]
Therefore
\[
\sum_{i=1}^{[\sqrt k]}\left| \bbE \left(f''_{n-k}(S_{k-1}) -f_{n-k}'' (S_{k-2i})\right) \bbE (X_{k-i} X_k) \right|
\leq (n-k+1)^{-1/2} \sum_{i=1}^{[\sqrt k]} i \theta(i) \,,
\]
implying that
\begin{equation}\label{dt18}
\sum_{k=1}^n \sum_{i=1}^{[\sqrt k]}\left| \bbE \left(f''_{n-k}(S_{k-1}) -f_{n-k}'' (S_{k-2i})\right) \bbE (X_{k-i} X_k)\right | \ll {\sqrt n}
\sum_{i=1}^{[\sqrt n]} i \theta(i) \,.
\end{equation}
Hence~\eqref{dt14-2} and~\eqref{dt18} entail that
\begin{equation}\label{dt19}
\sum_{k=1}^n |A_k| \ll \sqrt n \sum_{i\,\geq\,1} \left(i \wedge \sqrt n\right) \theta(i) \,.
\end{equation}
The estimates~\eqref{dt14} and~\eqref{dt19} yield to
\begin{multline}\label{dt20}
\sum_{k=1}^n \left|\bbE \left(f^\prime_{n-k} (S_{k-1}) X_k\right) - \sum_{i=1}^{\infty} \bbE \left(f''_{n-k} (S_{k-1})\right) \bbE (X_{0}X_i) \right|\\ \ll \sqrt n \left(1 + \sum_{\ell\,\geq\,1} \left(\ell \wedge {\sqrt n}\right)\theta(\ell) \right) \,.
\end{multline}
Taking into account the estimates~\eqref{sumdelta-3} and~\eqref{dt20}, Theorem~\ref{thW2}$\MK$\eqref{theo2.1.a} follows.
\end{proof}

\begin{proof}[{Proof of Theorem~\ref{thW2}$\MK$\eqref{theo2.1.b}}] 
Recall that in this case the iid random variables $(Y_k)_{k\,\geq\,1}$ have their first three moments defined by~\eqref{momentY} and~\eqref{defbeta3}.

\begin{nota} \label{TildeX}
For any integer $k \geq 0$, let
$\tilde X_k = X_k - \bbE_0 (X_k)$ and $\tilde S_k = S_k - \bbE_0 (S_k)$, with the convention $S_0=0$.
\end{nota}

Note that, since we assume that $ \sum_{j\,\geq\,1} j \theta (j) < \infty$,
\[
\Vert \bbE_0 (S_n) \Vert_2^2 \leq 2 \sum_{i=1}^n \sum_{j =i }^n \big | \bbE  \left(\bbE_0 (X_i) \bbE_0 (X_j)\right) \big | \leq 2 M \sum_{j=1}^n j \theta (j) \ll 1 \,.
\]
Therefore, using that $f'(0,W) =0$ and that $ | f'(x,W) - f'(y,W) | \leq |x-y|$, we infer that to prove~\eqref{W2condp2}, it is enough to show that for any $f \in \Lambda_2(E) $ and any positive $n$,
\begin{equation}\label{aim1thmain}
\sup_{f\,\in\,\Lambda_2(E)} \bbE \big (f \left(\tilde S_n + N, W\right) - f (T_n + N, W) \big) \ll 1 + \sum_{k\,\geq\,1} k \left(k \wedge \sqrt{n}\right) \theta (k) \,.
\end{equation}
This will be done by using again the Lindeberg method. Let us introduce some additional notations.
\begin{nota}\label{not21}
For any positive integer $k$, let $\Delta_{n,k} = f_{n-k} ({\tilde S}_{k-1} + {\tilde X}_k) - f_{n-k} ({\tilde S}_{k-1} + Y_k)$ where $f_{n-k}$ is defined in Notation~\ref{not21-a}.
\end{nota}
All along the proof, the following lemma will be used (the proof is postponed to the Appendix~\ref{Annex} and is based on the fact that the common distribution of the random variables $(Y_k)_{k\,\geq\,1}$ is smooth).

\begin{lemm}\label{lmacrucial}
Let $f \in \Lambda_2(E)$.
\begin{enumerate}
\item\label{lemm4.4.1}For any $i \geq 2$, there exists a positive constant $\kappa_1$ depending on $\sigma^2$ and $i$ and such that $\Vert f_{n-k}^{(i) }\Vert_{\infty} \leq \kappa_1(n-k+1)^{(2-i)/2}$.

\item\label{lemm4.4.2} Assume that $\sum_{k\,\geq\,1} k \theta_{X,3,4} (k) < \infty$. Then, for any $i \geq 2$, there exists a constant $\kappa_2>0$ depending on $\sigma^2$ and $i$ such that, for any integer $\ell >0$,
\[
\left| \bbE \left(f_{n-k}^{(i)} \left({ \tilde S}_{\ell -1}\right)\right) \right| \leq \kappa_2(n-k+1)^{(1-i)/2} +
\kappa_2(n-k+ \ell)^{(2-i)/2} \,.
\]
\end{enumerate}
\end{lemm}

\begin{rema}
If $(X_k)_{k\,\in\,\bbZ}$ is a stationary sequence of martingale differences, Item~\eqref{lemm4.4.2} is valid under the condition $\sum_{k\,\geq\,1} \theta_{X,2,3} (k) < \infty$.
\end{rema}

Since the sequence $(N,(Y_i)_{i\,\geq\,1})$ is independent of $((X_i)_{i\,\in\,\bbN}, W)$,
\begin{equation}\label{sumdelta}
\bbE \left(f \left({ \tilde S}_n + N,W\right) - f (T_n + N,W)\right) = \sum_{k=1}^n \bbE (\Delta_{n,k}) \,.
\end{equation}
Next the functions $f_{n-k}$ are $C^\infty$. Consequently, from the Taylor integral formula at order $5$,
\begin{equation}\label{firstdecomp}
\Delta_{n,k} = \sum_{j=1}^4 \frac{1}{j!} \, f^{(j)}_{n-k} \left({ \tilde S}_{k-1}\right) \left({ \tilde X}_k^j - Y_k^j\right) +R_{n,k} \,,
\end{equation}
with
\begin{multline*}
R_{n,k} = \frac{1}{24} { \tilde X}_k^5 \int_0^1 (1-s)^4 f_{n-k}^{(5)} \left({ \tilde S}_{k-1} + s { \tilde X}_k\right) ds \\
- \frac{1}{24} Y_k^5 \int_0^1 (1-s)^4 f_{n-k}^{(5)} \left({ \tilde S}_{k-1} + s Y_k\right) ds \,.
\end{multline*}
Taking into account the fact that $\Vert X_{k} \Vert_{\infty} \leq M $ and Item 1 of Lemma~\ref{lmacrucial}, we derive that
\[
\Vert R_{n,k} \Vert_1 \ll \left(M^5 +\bbE \left(|Y_1|^5\right)\right) \left\Vert f_{n-k}^{(5) }\right\Vert_{\infty}
\ll (n-k+1)^{-3/2} \,.
\]
Therefore,
\begin{equation}\label{reste1}
\sum_{k\,\in\,[1,n]} \Vert R_{n,k} \Vert_1 \ll 1 \,.
\end{equation}
Let $\beta_2= \sigma^2= \bbE(Y_k^2)$ and $\beta_4= \bbE(Y_k^4)$. Since the sequence $(Y_i)_{i\, \geq\,1}$ is independent of the sequence $(X_i)_{i\,\geq\,1}$,
\begin{multline}\label{linddec1}
\bbE \left(\Delta_{n,k} - R_{n,k}\right)\\
\begin{aligned}
 =\,&\bbE \left(f'_{n-k} \left({\tilde S}_{k-1}\right) \tilde X_k + \sum_{\ell =2}^4 \frac{1}{\ell ! } f^{(\ell)}_{n-k} \left({\tilde S}_{k-1}\right) \left(\tilde X_k^\ell - \beta_\ell\right)\right) \\
 =\,& \bbE \left(f'_{n-k} \left({\tilde S}_{k-1}\right) \tilde X_k + \sum_{\ell =2}^4 \frac{1}{\ell ! } f^{(\ell)}_{n-k} \left({\tilde S}_{k-1}\right) \left(X_k^\ell - \beta_\ell\right) \right) + {\tilde B}_{n,k}
\\
{:=}\,&  \bbE \left(\Delta_{n,k}^{(1)} + { \frac{1}{2} } \Delta_{n,k}^{(2)} + { \frac{1}{6} } \Delta_{n,k}^{(3)} + \frac{1}{24} \Delta_{n,k}^{(4)} \right) + {\tilde B}_{n,k}\,.
\end{aligned}
\end{multline}
Using Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1}, we first notice that
\begin{equation}\label{tildeB}
\sum_{k\,\in\,[1,n]} \left\vert {\tilde B}_{n,k} \right\vert \ll \sum_{k\,\in\,[1,n]} \Vert \bbE_0 (X_k) \Vert_1 \ll 1.
\end{equation}
Next we develop the first four terms in the right-hand side of the decomposition~\eqref{linddec1} with the help of the Lindeberg method. From now on, to soothe the notation, we shall omit most of the time the index $n$ in all the $\Delta_{n,k}^{(i)} $ and the related quantities, and then rather write $\Delta_{k}^{(i)} $. Let us start with the term $ \Delta_{k}^{(4)} $. Using Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.2}, note first that
\begin{equation}\label{T4correction1}
\sum_{k=1}^n \left| \bbE \left(f_{n-k}^{(4) } \left({\tilde S}_{k-1}\right)\right) \left(\bbE (X_k^4) + \beta_4\right) \right| \ll \sum_{k=1}^n \left(\frac{1}{(n-k+1)^{3/2}}+ \frac{1}{n} \right) \ll 1 \,.
\end{equation}
Next, we write
\begin{multline*}
f_{n-k}^{(4) } \left({\tilde S}_{k-1}\right) \left(X_k^4 - \bbE (X_k^4)\right)\\ = f_{n-k}^{(4) } (0) \left(X_k^4 - \bbE (X_k^4)\right)  + \sum_{i=1}^{k-1} \left(f_{n-k}^{(4) } \left({\tilde S}_{k-i}\right) - f_{n-k}^{(4) } \left({\tilde S}_{k-i -1 }\right) \right) \left(X_k^4 - \bbE (X_k^4)\right) \,.
\end{multline*}
By Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1} we get
\begin{equation}\label{correction3}
\sum_{k=1}^n \left| \Cov \left(f_{n-k}^{(4) } (0), X_k^4\right) \right| \ll
\sum_{k=1}^n (n-k+1)^{-1} \theta(k) \ll \sum_{k=1}^n \theta(k) \,,
\end{equation}
and
\begin{multline}\label{correction1}
\sum_{k=1}^n \sum_{i=1}^{k-1} \left| \Cov \left(f_{n-k}^{(4) } \left({\tilde S}_{k-i}\right) - f_{n-k}^{(4) } \left({\tilde S}_{k-i -1 }\right), X_k^4 \right) \right| \\
\ll M^5 \sum_{k=1}^n (n-k+1)^{-3/2} \sum_{i=1}^k \theta(i) \ll \sum_{i=1}^n \theta(i) \,.
\end{multline}
Taking into account~\eqref{T4correction1}, \eqref{correction3}, \eqref{correction1} and the fact that $\sum_{k\,\geq\,1}\theta (k) < \infty$, it follows that
\begin{equation}\label{newtermorder4}
\sum_{k=1}^n \left| \bbE \left(\Delta_{n, k}^{(4)}\right) \right| \ll 1 \,.
\end{equation}
Now, concerning the first term in the right-hand side of~\eqref{linddec1}, letting ${\ell_k} = [k/2]$, we write
\begin{multline}\label{linddec2tilde}
\bbE \left(\Delta_{k}^{(1)}\right)\\
\begin{aligned}
 & = \bbE \left(f'_{n-k} \left(\tilde S_{k- {\ell_k} -1}\right) \tilde X_k\right) + \sum_{i=1}^{\ell_k} \bbE \left(\left\{ f'_{n-k}\left(\tilde S_{k-i}\right) - f'_{n-k} \left(\tilde S_{k-i- 1}\right) \right\} \tilde X_k \right)\\
& = \bbE \left({\tilde \Delta}_{k,2}^{(1)}\right) + \frac{1}{2} \bbE \left({\tilde \Delta}_{k,3}^{(1)}\right) + \frac{1}{6} \bbE \left({\tilde \Delta}_{k,4}^{(1)}\right) + {\tilde B}_{n,k}^{(1)} \,,
\end{aligned}
\end{multline}
where, for $j=2,3,4$,
\[
{\tilde \Delta}_{k,j}^{(1)} = \sum_{i=1}^{\ell_k} f^{(j)}_{n-k} \left(\tilde S_{k-i- 1}\right) \tilde X^{j-1}_{k-i} \tilde X_k \,,
\]
and
\begin{multline*}
{\tilde B}_{n,k}^{(1)}=\\
 \bbE \left(f'_{n-k} (S_{k- {\ell_k} -1}) \tilde X_k\right) + \frac{1}{6} \sum_{i=1}^{\ell_k} \int_0^1 (1-s)^3 \bbE \left(f_{n-k}^{(5)}\left(\tilde S_{k-i-1} + s \tilde X_{k-i}\right) \tilde X_{k-i}^4 \tilde X_k \right) ds.
\end{multline*}
We start by noticing that, by Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1}, for any $m \geq 2 $ and any $s$ in $[0,1]$,
\begin{multline}\label{compwithtilde}
\sum_{k=1}^n \sum_{i=1}^{\ell_k} \left\Vert f^{(m)}_{n-k} \left(\tilde S_{k-i- 1} + s \tilde X_{k-i}\right) \left(\tilde X^{m-1} _{k-i}\tilde X_k - X^{m-1} _{k-i} X_k\right) \right\Vert_1 \\
\ll M^{m-1} \sum_{k=1}^n (n-k+1)^{(2-m)/2}\sum_{i=1}^{\ell_k} \big (\Vert \bbE_0 (X_k) \Vert_1 + \Vert \bbE_0 (X_{k-i}) \Vert_1 \big) \ll \sum_{k \geq 1} k \theta(k) \,.
\end{multline}
On another hand, since $ f'_{n-k} (0) $ is $\clF_0$-measurable, $\bbE (f'_{n-k} (0) \tilde X_k) =0$. Therefore
\begin{multline*}
\left\vert \bbE \left(f'_{n-k} \left(\tilde S_{k- {\ell_k} -1}\right) \tilde X_k\right) \right\vert = \left\vert \bbE \left(\left\{ f'_{n-k} \left(\tilde S_{k- {\ell_k} -1}\right) - f'_{n-k} (0) \right\} \tilde X_k \right)\right\vert \\
\leq \int_0^1 \left\vert \bbE \left (f''_{n-k} \left(t \tilde S_{k- {\ell_k} -1}\right) \tilde S_{k- {\ell_k} -1} \tilde X_k \right) \right\vert dt \leq 4 M \left\Vert f_{n-k}^{(2)} \right\Vert_{\infty} (k-{\ell_k})\theta({\ell_k}) \,.
\end{multline*}
Since $\Vert f_{n-k}^{(2)} \Vert_{\infty} \ll 1$ and $\sum_{k \,\geq\,1} k \theta(k) < \infty$,
\begin{equation}\label{B1b1}
\sum_{k\,\in\,[1,n]} \left\vert \bbE \left(f'_{n-k} \left(\tilde S_{k- {\ell_k} -1}\right) \tilde X_k\right) \right\vert \ll 1 \,.
\end{equation}
Next, Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1} implies that
\[
\left\vert \bbE \left(f_{n-k}^{(5)} \left(\tilde S_{k-i-1} + s \tilde X_{k-i}\right) X_{k-i}^4 X_k \right) \right\vert \leq M^4 \left\Vert f_{n-k}^{(5)} \right\Vert_{\infty} \theta (i) \ll (n-k+1)^{-3/2} \theta(i) \,.
\]
Hence
\begin{equation}\label{B1b3}
\sum_{k =1}^n \sum_{i=1}^{\ell_k} \left\vert \bbE \left(f_{n-k}^{(5)} \left(S_{k-i-1} + s X_{k-i}\right) X_{k-i}^4 X_k \right) \right\vert \ll \sum_{i=1}^{n} \theta(i) \ll 1 \,.
\end{equation}
The upper bounds~\eqref{compwithtilde}, \eqref{B1b1} and~\eqref{B1b3} imply that
\begin{equation}\label{B1tilde}
\sum_{k\,\in\,[1,n]} \left\vert {\tilde B}_{n,k}^{(1)} \right\vert \ll 1 \,.
\end{equation}
Next, taking into account Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.2} and the fact that $|\bbE(X^3_{k-i}X_k) | \leq M^3\theta (i) $, we derive that
\[
\left\vert \bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k-i- 1}\right)\right\} \bbE\left(X^3_{k-i}X_k\right) \right\vert \ll \left((n-k+1)^{-3/2} + (n-i)^{-1}\right) \theta(i) \,.
\]
Therefore
\begin{equation}\label{B1b2}
\sum_{k=1}^n \sum_{i=1}^{\ell_k} \left\vert \bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k-i- 1}\right) \right\} \bbE\left(X^3_{k-i}X_k\right) \right\vert \ll \sum_{i\,\geq\,1}\theta(i) \ll 1 \,.
\end{equation}
So, overall, starting from~\eqref{linddec2tilde} and taking into account~\eqref{compwithtilde}, \eqref{B1tilde} and~\eqref{B1b2} we get
\begin{equation}\label{linddec2}
\bbE \left(\Delta_{k}^{(1)}\right) = \bbE\left(\Delta_{k,2}^{(1)}\right) + \frac{1}{2} \bbE\left(\Delta_{k,3}^{(1)}\right) + \frac{1}{6} \bbE \left(\Delta_{k,4}^{(1)}\right) + A_{k,2}^{(1)} + \frac{1}{2} A_{k,3}^{(1)} + B_{n,k}^{(1)} \,,
\end{equation}
where $B_{n,k}^{(1)}$ is such that
\begin{equation}\label{BoundB1}
\sum_{k\,\in\,[1,n]} \left| B_{n,k}^{(1)}\right| \ll 1 \,,
\end{equation}
and the following notations have been used: for $j= 2,3,4$,
\begin{equation}\label{defdeltank4}
\begin{split}
\Delta_{k,j}^{(1)} &= \sum_{i=1}^{\ell_k} \left\{ f^{(j)}_{n-k} \left(\tilde S_{k-i- 1}\right) \right\} \left(X_{k-i}^{j-1}X_k\right)^{(0)},\\
A_{k,j}^{(1)} &= \sum_{i=1}^{\ell_k} \bbE \left\{ f^{(j)}_{n-k} \left(\tilde S_{k-i- 1}\right) \right\} \bbE\left(X_{k-i}^{j-1}X_k\right).
\end{split}
\end{equation}
Introduce now the following additional notations.
\begin{nota}
Let $ \gamma_i= \bbE (X_0X_i) $ and $ \gamma^{(2)}_i= \bbE (X^2_0X_i) $. Define
$\beta_{2, {\ell_k}} = 2 \sum_{i =1}^{{\ell_k}} \gamma_i$, $ \beta_{2}^{({\ell_k})} = 2 \sum_{i \,\geq\,{\ell_k} +1} \gamma_i$ and $\beta_{3, 1, {\ell_k}}= 3 \sum_{i = 1 }^{{\ell_k}} \ \gamma^{(2)}_i$.
\end{nota}

Next note that, since $\bbE(X_{k-i}X_k) = \gamma_i$,
\begin{multline}\label{dec2bisbis}
\frac{1}{2} \bbE \left\{ f''_{n-k} \left(\tilde S_{k-1}\right) \right\} \beta_{2, {\ell_k}} - A_{k,2}^{(1)}
 = \sum_{i=1}^{\ell_k} \gamma_i \sum_{j=1}^i \bbE \left\{ f''_{n-k} \left(\tilde S_{k-j}\right) - f''_{n-k} \left(\tilde S_{k-j -1}\right) \right\}  \\ 
=\sum_{i=1}^{\ell_k} \gamma_i \sum_{j=1}^i \bbE \left\{ f^{(3)}_{n-k}\left(\tilde S_{k-j -1}\right) \tilde X_{k-j} \right\}+\sum_{i=1}^{\ell_k} \frac{\gamma_i }{2} \sum_{j=1}^i \bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k-j -1}\right) \tilde X^2_{k-j} \right\} + r_{n,k,2}^{(1)} \,,
\end{multline}
where
\[
r_{n,k,2}^{(1)} := \frac 12 \int_0^1 (1-t)^2 \sum_{i=1}^{\ell_k} \gamma_i \sum_{j=1}^i \bbE \left\{ f^{(5)}_{n-k} \left(S_{k-j -1} + t \tilde X_{k-j}\right) \tilde X_{k-j}^3 \right\} dt \,.
\]
By Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1}, it follows that
\[
\left| r_{n,k,2}^{(1)}\right| \ll M^4 (n-k+1)^{-3/2} \sum_{i=1}^{\ell_k} i \theta(i) \,.
\]
Since $\sum_{i\,\geq\,1} i \theta(i) < \infty$, this implies that
\begin{equation}\label{Boundr21}
\sum_{k\,\in\,[1,n]} \left\vert r_{n,k,2}^{(1)}\right\vert \ll 1 \,.
\end{equation}
Next, taking into account Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1}, we get
\[
\sum_{i=1}^{\ell_k} \sum_{j=1}^i \Big | \bbE \left\{ f^{(3)}_{n-k}\left(\tilde S_{k-j -1}\right) \bbE_0 (X_{k-j}) \right\} \gamma_i \Big| \ll (n-k+1)^{-1/2} \theta(k - j) \theta(i) \,.
\]
Hence, since $\ell_k = [k/2]$ and $\sum_{i\,\geq\,1} i \theta(i) < \infty$,
\begin{equation}\label{additional-20-1}
\sum_{k=1}^n \sum_{i=1}^{\ell_k} \sum_{j=1}^i \Big | \bbE \left\{ f^{(3)}_{n-k} \left(\tilde S_{k-j -1}\right) \bbE_0 \left(X_{k-j}\right) \right\} \gamma_i \Big | \ll \sum_{k=1}^n \theta([k/2]) \sum_{i=1}^{ \ell_k } i \theta(i) \ll 1 \,.
\end{equation}
With similar arguments, we have
\begin{equation}\label{additional-20-2}
\sum_{k=1}^n \sum_{i=1}^{\ell_k} \sum_{j=1}^i \Big | \bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k-j -1}\right) \left(\tilde X^2_{k-j} - X^2_{k-j}\right) \right\} \gamma_i \Big | \ll 1 \,.
\end{equation}
In addition, by taking into account Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.2}, we get
\[
\Big | \bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k-j -1}\right) \bbE \left(X^2_{k-j}\right) \right\} \gamma_i \Big | \ll M^3 \left((n-k+1)^{-3/2} + (n-j)^{-1}\right) \theta (i) \,.
\]
Hence,
\begin{multline}\label{additional-20-3}
\sum_{k=1}^n\sum_{i=1}^{\ell_k} \sum_{j=1}^i \Big| \bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k-j -1}\right) \bbE \left(X^2_{k-j}\right) \right\} \gamma_i \Big | \\
\ll \sum_{k=1}^n \left((n-k+1)^{-3/2} + (n-{\ell_k})^{-1}\right) \sum_{i=1}^{\ell_k} i \ \theta(i) \ll \sum_{i=1}^n i \theta(i) \ll 1 \,.
\end{multline}
So overall, starting from~\eqref{dec2bisbis} and taking into account the upper bounds~\eqref{Boundr21}-\eqref{additional-20-3}, we derive that
\begin{multline}\label{dec2bisbis-20sept}
\frac{1}{2} \bbE \left\{ f''_{n-k} \left(\tilde S_{k-1}\right) \right\}
\beta_{2, {\ell_k}} - A_{k,2}^{(1)} =
\\
\sum_{i=1}^{\ell_k} \gamma_i \sum_{j=1}^i \bbE \left\{ f^{(3)}_{n-k} \left(\tilde S_{k-j -1}\right) X_{k-j} \right\}  +
\sum_{i=1}^{\ell_k} \frac{\gamma_i}{2} \sum_{j=1}^i \bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k-j -1}\right) \left(X^2_{k-j}\right)^{(0)}\right\} + R_{n,k,2}^{(1)} \,,
\end{multline}
where $ R_{n,k,2}^{(1)} $ is such that
\begin{equation}\label{BoundR21-20sept}
\sum_{k\,\in\,[1,n]} \left\vert R_{n,k,2}^{(1)} \right\vert \ll 1 \,.
\end{equation}

Now, let $ r_{n,k,3}^{(1)} = \frac{1}{3} \bbE \{ f^{(3)}_{n-k} (\tilde S_{k-1}) \} \beta_{3,1, {\ell_k}} - A_{k,3}^{(1)}$. Then, recalling the notation $\gamma_i^{(2)} = \bbE(X_0^2 X_i)$,
\begin{equation}\label{dec2bisbisavec3}
\begin{split}
r_{n,k,3}^{(1)}=&\, \sum_{i=1}^{{\ell_k}} \gamma_i^{(2)} \bbE \left
\{ f^{(3)}_{n-k} \left(\tilde S_{k-1}\right) \right\} - A_{k,3}^{(1)}\\
=&\, \sum_{i=1}^{\ell_k} \gamma_i^{(2)} \sum_{j=1}^i \bbE \left\{ f^{(3)}_{n-k} \left(\tilde S_{k-j}\right) - f^{(3)}_{n-k} \left(\tilde S_{k-j -1}\right) \right\} \\
=&\, \sum_{i=1}^{\ell_k} \gamma_i^{(2)} \sum_{j=1}^i \left(\bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k-j -1}\right) \tilde X_{k-j} \right\}\right.\\
&\quad\left. + \int_0^1 (1-t) \bbE \left\{ f^{(5)}_{n-k} \left(\tilde S_{k-j -1} + t \tilde X_{k-j}\right) \tilde X^2_{k-j} \right\} dt \right) \\
{:=}&\, r_{n,k,3}^{(1)} (1) + r_{n,k,3}^{(1)} (2) \,.
\end{split}
\end{equation}
Taking into account Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1} and the fact that $| \gamma_i^{(2)} | \leq M^2 \theta(i)$ and $ \Vert \tilde X_{k-j} \Vert_{\infty} \leq 2M$, it follows that
\[
\left\vert r_{n,k,3}^{(1)} (2)\right\vert \ll \sum_{i=1}^{\ell_k} \sum_{j=1}^i
\frac{ \theta (i) } { (n-k+1)^{3/2} }
\ll
\frac{ \sum_{i=1}^{\ell_k} i \ \theta (i) } { (n-k+1)^{3/2} } \,.
\]
Therefore, since $\sum_{i\,\geq\,1} i \theta(i) < \infty$,
\begin{equation}\label{Boundr312}
\sum_{k\,\in\,[1,n]} \left\vert r_{n,k,3}^{(1)} (2) \right\vert \ll 1 \,.
\end{equation}
On another hand, by Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1},
\[
\left| \gamma_i^{(2)} \bbE \big \{ f^{(4)}_{n-k} \left(\tilde S_{k-j -1}\right) \bbE_0(X_{k-j}) \big \} \right| \ll (n-k+1)^{-1} \theta(k-j) \theta(i) \,.
\]
Hence, since $\sum_{i\,\geq\,1} i \theta(i) < \infty$,
\begin{equation}\label{new-reste132-20sept}
\sum_{k=1}^n \sum_{i=1}^{\ell_k} \sum_{j=1}^i \left| \gamma_i^{(2)} \bbE \big \{ f^{(4)}_{n-k} \left(\tilde S_{k-j -1}\right) \bbE_0(X_{k-j}) \big \} \right| \ll \sum_{k=1}^n \theta([k/2]) \sum_{i=1}^{n} i \theta(i) \ll 1 \,.
\end{equation}

Starting from~\eqref{firstdecomp} and taking into account~\eqref{reste1}, \eqref{linddec1}, \eqref{tildeB}, \eqref{newtermorder4}, \eqref{linddec2}, \eqref{BoundB1}, \eqref{dec2bisbis-20sept}-\eqref{new-reste132-20sept} and the fact that $\beta_2 = \sigma^2 = \bbE (X_0^2) + \beta_{2, \ell_k} + \beta_{2}^{(\ell_k)}$, we get
\begin{multline}\label{linddec3}
\bbE (\Delta_{n,k})= \\
\begin{aligned}
& \bbE \left(\Delta_{k,2}^{(1)}\right) + {\frac{1}{2}} \bbE 
\left(f''_{n-k} \left(\tilde S_{k-1}\right) \left(X_k^2\right)^{(0)}\right) - { \frac{1}{2} } \bbE \left(f''_{n-k} \left(\tilde S_{k-1}\right) \right) \beta_2^{({\ell_k})} +
\frac{1}{2} \bbE \left(\Delta_{k,3}^{(1)}\right)  \\
 - &\sum_{i=1}^{\ell_k} \gamma_i \sum_{j=1}^i \bbE \left\{ f^{(3)}_{n-k} \left(\tilde S_{k-j -1}\right) X_{k-j} \right\} - \sum_{i=1}^{\ell_k} \frac{\gamma_i}{2} \sum_{j=1}^i \bbE \left\{ f^{(4)}_{n-k}\left(\tilde S_{k-j -1}\right) \left(X^2_{k-j}\right)^{(0)} \right\} \\
 - &\sum_{i=1}^{\ell_k} \frac{\gamma_i^{(2)}}{2} \sum_{j=1}^i \left(\bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k-j -1}\right) X_{k-j} \right\} +
\frac{1}{6} \bbE \left(f_{n-k}^{(3)}\! \left(\tilde S_{k-1}\right)\! \left(X_k^3 - \left(\beta_3 - \beta_{3,1, {\ell_k}}\right)\! \right)\! \right)\! \right)\\& + \frac{1}{6} \bbE \left(\Delta_{k,4}^{(1)}\right) + \Gamma^{(1)}_{n,k} \,,
\end{aligned}
\end{multline}
where $ \Gamma^{(1)}_{n,k} $ satisfies
\begin{equation}\label{boundGamma}
\sum_{k\,\in\,[1,n]} \left\vert \Gamma^{(1)}_{n,k} \right\vert \ll 1 \,.
\end{equation}
Note first that
\begin{equation}\label{B0term1}
\sum_{k=1}^n \left\vert \bbE \left(f''_{n-k} \left(\tilde S_{k-1}\right) \right) \beta_2^{({\ell_k})} \right\vert \ll \sum_{k=1}^n \left\vert \beta_2^{({\ell_k})} \right\vert \ll \sum_{k=1}^n \sum_{i\, \geq\,{\ell_k} +1} \theta (i) \ll
\sum_{i\,\geq\,1} i \theta(i) \ll 1 \,.
\end{equation}
To handle the first two terms in the right hand side of~\eqref{linddec3}, define
\begin{equation}\label{defmki}
\begin{split}
m_k = \left[\sqrt{n-k}\right], \ m_{k,i} &= \min (m_k, k-i-1) \\ \text{and}\ D_{k,i,2}^{(1)} &= \bbE \big \{ f''_{n-k} \left(\tilde S_{k-i- 1}\right) \left(X_{k-i}X_k\right)^{(0)} \big \}.
\end{split}
\end{equation}
Then, for any integer $i$ in $[0, {\ell_k}]$, with the convention that $\tilde S_u =0$ for any $u \leq 0$, we write
\begin{multline*}
D_{k,i,2}^{(1)} \\
= \bbE \left\{ \left(f''_{n-k} \left(\tilde S_{k- i - m_{k,i} -1 }\right)\right) + \sum_{j=i+1}^{i+m_{k,i} } \left(f''_{n-k} \left(\tilde S_{k-j }\right) - f''_{n-k} \left(\tilde S_{k-j -1}\right) \right) \left(X_{k-i}X_k\right)^{(0)} \right\}.
\end{multline*}
Let then, for $\ell =3,4,5$ and $t$ in $[0,1]$,
\begin{equation}\label{defdelta24ibis}
{\tilde \Delta}_{k,i,2}^{(1,\ell)} (t) = \sum_{j=i+1}^{i+m_{k,i} } f^{(\ell)}_{n-k}\left(\tilde S_{k-j -1}+ t\tilde X_{k-j}\right) \tilde X_{k-j}^{\ell -2} (X_{k-i}X_k)^{(0)}.
\end{equation}
By the Taylor integral formula,
\begin{multline}\label{linddec4}
D_{k,i,2}^{(1)} =  \bbE \left\{ f''_{n-k} \left(\tilde S_{k- i - m_{k,i} -1 }\right) \left(X_{k-i}X_k\right)^{(0)} + {\tilde \Delta}_{k,i,2}^{(1,3)} (0) + \frac 12 {\tilde \Delta}_{k,i,2}^{(1,4)} (0) \right \}\\  + \frac{1}{2}
\int_0^1 (1-t)^2 \bbE \left\{ {\tilde \Delta}_{k,i,2}^{(1,5)} (t) \right\} dt.
\end{multline}
But, since $\Vert f''_{n-k} \Vert_{\infty} \ll 1$,
\begin{multline*}
\sum_{k=1}^n \sum_{i=0}^{{\ell_k}} \left\vert \bbE \left\{ f''_{n-k} \left(\tilde S_{k- i - m_{k,i} -1 }\right) \left(X_{k-i}X_k\right)^{(0)} \right\} \right\vert\\
\begin{aligned}
&\ll \sum_{k=1}^n \sum_{i=0}^{{\ell_k}}
\left(\theta (m_k) + \theta (k-i) \right) \wedge \theta (i) \\
&\ll \sum_{k=1}^n \left(m_k
\theta (m_k) + \sum_{i=m_k}^{\ell_k} \theta (i) + k \theta([k/2]) \right)\\
&\ll 1+ \sum_{k=1}^{\left[\sqrt{n}\right]} k^2 \theta(k) + n \sum_{k\,\geq\,\left[\sqrt{n}\right]} \theta(k) \,.
\end{aligned}
\end{multline*}
Hence
\begin{equation}\label{B1term1}
\sum_{k=1}^n \sum_{i=0}^{{\ell_k}} \left\vert \bbE \left\{ f''_{n-k} \left(\tilde S_{k- i - m_{k,i} -1 }\right) \left(X_{k-i}X_k\right)^{(0)} \right\} \right\vert \ll 1+ \sum_{k\,\geq\,1} k \left(k \wedge \sqrt{n}\right) \theta (k) \,.
\end{equation}
On another hand, by using Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1},
\begin{equation}\label{B2term1}
\sum_{k=1}^n \sum_{i=0}^{{\ell_k}} \left| \bbE \left\{ {\tilde \Delta}_{k,i,2}^{(1,5)} (t) \right\} \right|
\ll \sum_{k=1}^n \sum_{i=0}^{{\ell_k}} \sum_{j=i+1}^{i+m_{k,i}}
\frac{\theta (j-i) \wedge \theta (i)}{(n-k+1)^{3/2}}
\ll \sum_{j=1}^nj \theta ([j/2]) \ll 1 \,.
\end{equation}
For $\ell=3,4$, set
\begin{equation}\label{defdelta24i}
\Delta_{k,i,2}^{(1,\ell)} := \sum_{j=i+1}^{i+m_{k,i} } f^{(\ell)}_{n-k}\left(\tilde S_{k-j -1}\right) X_{k-j}^{\ell -2} \left(X_{k-i}X_k\right)^{(0)}.
\end{equation}
Applying Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1} and using that $m_{k,i} \leq \sqrt{n-k+1}$, we get
\begin{multline}\label{linddec5ante1}
\sum_{k=1}^n \sum_{i=0}^{{\ell_k}} \left|\bbE \left\{ {\tilde \Delta}_{k,i,2}^{(1,3)} (0) - \Delta_{k,i,2}^{(1,3)} \right\} \right|
\\
\begin{aligned}
&\leq \sum_{k=1}^n \sum_{i=0}^{{\ell_k}} \sum_{j=i+1}^{i+m_{k,i} } \left\Vert \bbE_0 (X_{k-j}) f^{(3)}_{n-k}\left(\tilde S_{k-j -1}\right) (X_{k-i}X_k)^{(0)} \right\Vert_1 \\
&\ll \sum_{k=1}^n \sum_{i=0}^{{\ell_k}} \sum_{j=i+1}^{i+m_{k,i} }
\frac{ \theta(k-j) \wedge \theta(j-i) \wedge \theta (i) } {(n-k+1)^{1/2} }
\ll \sum_{k=1}^n k \theta ([k/3]) \ll 1 \,.
\end{aligned}
\end{multline}
Similarly, since $\Vert f^{(4)}_{n-k} \Vert_{\infty} \ll (n-k+1)^{-1}$, we derive
\begin{equation}\label{linddec5ante2}
\sum_{k=1}^n \sum_{i=0}^{{\ell_k}} \left|\bbE \left\{ {\tilde \Delta}_{k,i,2}^{(1,4)} (0) - \Delta_{k,i,2}^{(1,4)} \right\} \right| \ll 1 \,.
\end{equation}
Starting from~\eqref{linddec3} and taking into account~\eqref{boundGamma}, \eqref{B0term1}, \eqref{linddec4}, \eqref{B1term1}, \eqref{B2term1}, \eqref{linddec5ante1} and~\eqref{linddec5ante2}, we then derive that
\begin{multline}\label{linddec5}
\bbE (\Delta_{n,k})=\\
\begin{aligned}
& \frac 12 \sum_{i=0}^{\ell_k} \left(1 + \bfun_{\left\{i\,\neq\,0\right\}}\right) \bbE \left\{ { \Delta}_{k,i,2}^{(1,3)} + \frac 12 { \Delta}_{k,i,2}^{(1,4)} \right\} +
\frac{1}{2} \bbE \left(\Delta_{k,3}^{(1)}\right) \\
& - \sum_{i=1}^{\ell_k} \gamma_i \sum_{j=1}^i \bbE \left\{ f^{(3)}_{n-k} \left(\tilde S_{k-j -1}\right) X_{k-j} \right\} - \sum_{i=1}^{\ell_k} \frac{\gamma_i}{2} \sum_{j=1}^i \bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k-j -1}\right) \left(X^2_{k-j}\right)^{(0)} \right\}\\
& - \sum_{i=1}^{\ell_k} \frac{\gamma_i^{(2)}}{2} \sum_{j=1}^i \left(\bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k-j -1}\right) X_{k-j} \right\} +
\frac{1}{6} \bbE \left(f_{n-k}^{(3)} \left(\tilde S_{k-1}\right) \left(X_k^3 - \left(\beta_3 - \beta_{3,1, {\ell_k}}\right)\right)\right) \right)\\
& + \frac{1}{6} \bbE \left(\Delta_{k,4}^{(1)}\right) + \Gamma^{(2)}_{n,k} \,,
\end{aligned}
\end{multline}
where $ \Gamma^{(2)}_{n,k} $ satisfies $\sum_{k=1}^n \vert \Gamma^{(2)}_{n,k} \vert \ll 1 + \sum_{k\,\geq\,1} k (k \wedge \sqrt{n}) \theta (k) $. Introduce now the follo\-wing notations.
\begin{Notation} Let
\[
\beta_{3, 2, m_k} = 3 \sum_{i = 1 }^{m_{k,0}} \bbE \left(X_0 X^2_i\right) \,, \quad
\beta^*_{3, {\ell_k}, m_k}= 6 \sum_{i= 1}^{{\ell_k} } \sum_{j= i+1}^{i + m_{k,i}} \bbE \left(X_0 X_{j-i}X_{j}\right);
\]
Next, let ${\tilde \beta}_3^{({\ell_k},m_k)} = \beta_3 - \beta_{3,1, {\ell_k}}- \{ \bbE (X_0^3) + \beta_{3,2, m_k} + \beta^*_{3,{\ell_k},m_k} \}$, where we recall that $m_k$ and $m_{k,i}$ have been defined in~\eqref{defmki}.
\end{Notation} Since
\begin{align*}
{\tilde \beta}_3^{({\ell_k},m_k)} = & \quad 3 \sum_{i\,>\,{\ell_k} } \bbE \left(X_0^2 X_i\right) + 3 \sum_{i\,>\,m_{k,0} } \bbE \left(X_0 X^2_i\right) \\
& + 6 \sum_{i= 1}^{{\ell_k}} \sum_{j\,>\,m_{k,i} } \bbE \left(X_0 X_jX_{j+i}\right) + 6 \sum_{i\,>\,{\ell_k} } \sum_{j\,\geq\,1} \bbE \left(X_0 X_jX_{j+i}\right),
\end{align*}
by Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1},
\begin{multline*}
\sum_{k =1}^n \left\Vert f^{(3)}_{n-k} \right\Vert_{\infty} \left| {\tilde \beta}_3^{({\ell_k},m_k)} \right| \ll \sum_{k =1}^n (n-k+1)^{-1/2} \sum_{i\,\geq\,{\ell_k} \wedge m_k } \theta (i) \\
+ \sum_{k =1}^n (n-k+1)^{-1/2} \left(\sum_{i= 1}^{{\ell_k}} \sum_{j\,\geq\,m_{k,i}+1} \theta (j) \wedge \theta (i) + \sum_{i\,\geq\,{\ell_k}} \sum_{j\,\geq\,1} \theta (j) \wedge \theta (i) \right) \,.
\end{multline*}
By simple algebra, and since $\sum_{i\,\geq\,1} i\theta(i) < \infty$, we then derive that
\begin{equation}\label{Boundtildebeta}
\sum_{k =1}^n \left\Vert f^{(3)}_{n-k} \right\Vert_{\infty} \left|{\tilde \beta}_3^{({\ell_k},m_k)}\right| \ll 1 + \sum_{i\,\geq\,1} i \left(i \wedge \sqrt{n}\right)\theta(i) \,.
\end{equation}
Next we shall first center the random variables $X_{k-j} (X_{k-i}X_k)^{(0)} $ appearing in the quantity $ \Delta_{k,i,2}^{(1,3)} $. Using that $\bbE  \{ X_{k-j} (X_{k-i}X_k)^{(0)} \} = \bbE \{ X_{k-j} X_{k-i}X_k \} $, an application of Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.2} gives
\begin{multline}\label{defJ1k}
J_{1,k}:=\\
\left\vert \frac{1}{6} \bbE \left\{ f^{(3)}_{n-k}\left(\tilde S_{k-1}\right) \right\} \beta^*_{3,{\ell_k},m_k} - \sum_{i=1}^{\ell_k} \sum_{j=i+1}^{i+m_{k,i} } \bbE \left\{ f^{(3)}_{n-k}\left(\tilde S_{k-j -1}\right) \right\} \bbE \left\{ X_{k-j} \left(X_{k-i}X_k\right)^{(0)} \right\} \right\vert \\
\ll \sum_{i=1}^{\ell_k} \sum_{j=i+1}^{i+m_{k,i} } \left\vert \bbE \left\{ f^{(3)}_{n-k}\left(\tilde S_{k-1}\right) - f^{(3)}_{n-k}\left(\tilde S_{k-j -1}\right) \right\} \right \vert \left(\theta(j-i) \wedge \theta(i)\right) \,.
\end{multline}
Let us handle the quantity $\bbE \{ f^{(3)}_{n-k}(\tilde S_{k-1}) - f^{(3)}_{n-k}(\tilde S_{k-j -1})\} $. By Taylor integral formula,
\begin{multline*}
\bbE \left\{ f^{(3)}_{n-k}\left(\tilde S_{k-1}\right) - f^{(3)}_{n-k}\left(\tilde S_{k-j -1}\right) \right\} = \sum_{\ell=1}^j \bbE \left\{ f^{(4)}_{n-k}\left(\tilde S_{k-\ell-1}\right) \tilde X_{k- \ell} \right\} \\
+ \int_0^1 (1-t) \sum_{\ell=1}^j \bbE \left\{ f^{(5)}_{n-k}\left(\tilde S_{k-\ell-1} + t \tilde X_{k- \ell}\right) \tilde X^2_{k- \ell} \right\} dt \,.
\end{multline*}
By using Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1} and noticing that $\theta(j-i) \wedge \theta(i) \leq \theta([j/2]) $, we get
\begin{multline}\label{computation1-21}
\sum_{k =1}^n \sum_{i=1}^{\ell_k} \sum_{j=i+1}^{i+m_{k,i} } \sum_{\ell=1}^j \left| \bbE \left\{ f^{(5)}_{n-k}\left(\tilde S_{k-\ell-1} + t \tilde X_{k- \ell}\right) \tilde X^2_{k- \ell} \right\} \left(\theta(j-i) \wedge \theta(i)\right) \right| \\
\begin{aligned}
& \ll \sum_{k =1}^n (n-k+1)^{-3/2} \sum_{i=1}^{\ell_k} \sum_{j=i+1}^{i+m_{k,i} } j \theta([j/2])\\
& \ll \sum_{k =1}^n (n-k+1)^{-3/2} \left\{ \sum_{j=1}^{2 \left[\sqrt n\right]} j^2 \theta(j) + m_k \sum_{j\,\geq \left[\sqrt{n}\right]} j \theta(j) \right\}\\ 
&\ll \sum_{i\,\geq\,1} i \left(i \wedge \sqrt{n}\right)\theta(i) \,.
\end{aligned}
\end{multline}
Next, by Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1} again,
\begin{multline}\label{computation2-21}
\sum_{k =1}^n \sum_{i=1}^{\ell_k} \sum_{j=i+1}^{i+m_{k,i} } \sum_{\ell=1}^j \left| \bbE \left\{ f^{(4)}_{n-k}\left(\tilde S_{k-\ell-1}\right) \bbE_0(X_{k- \ell}) \right\} \left(\theta(j-i) \wedge \theta(i)\right) \right|\\
\begin{aligned}
& \ll \sum_{k =1}^n (n-k+1)^{-1} \sum_{i=1}^{\ell_k} \sum_{j=i+1}^{i+m_{k,i} } \sum_{\ell=1}^j \theta(k-\ell) \theta([j/2])\\
& \ll \sum_{k =1}^{[n/2]} (n-k+1)^{-1} \sum_{i=1}^{\ell_k} \sum_{j=i+1}^{i+m_{k,i} } j \theta([j/2])\\
& \quad + \sum_{k =[n/2] +1}^{n } (n-k+1)^{-1} \sum_{i=1}^{[k/2]} \sum_{j=i+1}^{i+m_k} \sum_{\ell=1}^{[k/2] + m_k} \theta(k-\ell) \theta([j/2]) \,.
\end{aligned}
\end{multline}
With the computations as given in~\eqref{computation1-21} and the fact that
\begin{multline}\label{computation3-21}
\sum_{k =[n/2] +1}^{n } (n-k+1)^{-1} \sum_{i=1}^{[k/2]} \sum_{j=i+1}^{i+m_k} \sum_{\ell=1}^{[k/2] + m_k} \theta(k-\ell) \theta([j/2]) \\
\ll \sum_{k =[n/2] +1}^{n } (n-k+1)^{-1} k \theta([k/4]) \sum_{i=1}^{[k/2]} \sum_{j=i+1}^{i+m_k} \theta([j/2]) \ll \sum_{k\,\geq\,1} k \theta(k) \sum_{i\,\geq\,1} \theta(i) \,,
\end{multline}
we derive, overall, that
\begin{equation}\label{computation4-21}
J_{1,k} \leq \sum_{i=1}^{\ell_k} \sum_{j=i+1}^{i+m_{k,i} } \sum_{\ell=1}^j \left\vert \bbE \left\{ f^{(4)}_{n-k}\left(\tilde S_{k-\ell-1}\right) X_{k- \ell} \right\} \right\vert \theta([j/2]) + \Gamma^{(3)}_{n,k} \,,
\end{equation}
where $ \Gamma^{(3)}_{n,k} $ satisfies $ \sum_{k=1}^n \vert \Gamma^{(3)}_{n,k} \vert \ll 1+ \sum_{i\,\geq\,1} i (i \wedge \sqrt{n})\theta(i) $. Next, for $m_{k, \ell}$ defined in~\eqref{defmki}, write
\begin{multline*}
\bbE \left\{ f^{(4)}_{n-k}\left(\tilde S_{k-\ell-1}\right) X_{k- \ell} \right\} \\ = \bbE \left\{ f^{(4)}_{n-k}\left(\tilde S_{k-\ell-m_{k,\ell} -1}\right) X_{k- \ell} \right\} + \sum_{u=\ell+1}^{\ell + m_{k,\ell}} \bbE
\left\{ (f^{(4)}_{n-k}\left(\tilde S_{k-u}\right) - f^{(4)}_{n-k}\left(\tilde S_{k-u-1}\right) X_{k- \ell} \right\} \,,
\end{multline*}
implying, by using Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1}, that
\begin{multline*}
\left| \bbE \left\{ f^{(4)}_{n-k}\left(\tilde S_{k-\ell-1}\right) X_{k- \ell} \right\} \right| \\
\ll (n-k+1)^{-1} \left(\theta(m_k) + \theta (k-\ell)\right) + (n-k+1)^{-3/2} \sum_{u=\ell+1}^{\ell + m_{k,\ell}} \theta (u-\ell) \,.
\end{multline*}
Hence
\begin{multline*}
\sum_{k=1}^n \sum_{i=1}^{\ell_k} \sum_{j=i+1}^{i+m_{k,i} } \sum_{\ell =1}^j \left| \bbE \left\{ f^{(4)}_{n-k}\left(\tilde S_{k-\ell-1}\right) X_{k- \ell} \right\} \right| \theta([j/2]) \\
\ll \sum_{k =1}^n \sum_{i=1}^{\ell_k} \sum_{j=i+1}^{i+m_{k,i} } (n-k+1)^{-1} \theta([j/2]) \left\{ j \theta(m_k) + \sum_{\ell =1}^j \theta (k-\ell) \right\} \\ + \sum_{k =1}^n \sum_{i=1}^{\ell_k} \sum_{j=i+1}^{i+m_{k,i} } (n-k+1)^{-3/2} j \theta([j/2]) \sum_{u=1}^{\sqrt n} \theta (u) \,.
\end{multline*}
With the computations as given in~\eqref{computation1-21}-\eqref{computation3-21} together with the fact that
\[
\sum_{k =1}^n (n-k+1)^{-1} m_k \theta(m_k) = \sum_{k=1}^n k^{-1/2} \theta\left([\sqrt{k}]\right) \ll \sum_{k=1}^{[\sqrt n]} \theta(k) \,,
\]
it follows that
\begin{equation}\label{computation5-21}
\sum_{k=1}^n \sum_{i=1}^{\ell_k} \sum_{j=i+1}^{i+m_{k,i} } \sum_{\ell =1}^j \left| \bbE \left\{ f^{(4)}_{n-k}\left(\tilde S_{k-\ell-1}\right) X_{k- \ell} \right\} \right| \theta([j/2]) \ll 1+ \sum_{i\,\geq\,1} i \left(i \wedge \sqrt{n}\right)\theta(i) \,.
\end{equation}
Therefore~\eqref{computation4-21} together with~\eqref{computation5-21} imply
\begin{multline}\label{B1dec5}
\sum_{k=1}^n \left\vert \frac{1}{6} \bbE \left\{ f^{(3)}_{n-k}\left(\tilde S_{k-1}\right) \right\} \beta^*_{3,{\ell_k},m_k}\right. \\
-\left.\sum_{i=1}^{\ell_k} \sum_{j=i+1}^{i+m_{k,i} } \bbE \left\{ f^{(3)}_{n-k}\left(\tilde S_{k-j -1}\right) \right\} \bbE \left\{ X_{k-j} \left(X_{k-i}X_k\right)^{(0)} \right\} \right \vert
\ll 1+ \sum_{i\,\geq\,1} i \left(i \wedge \sqrt{n}\right)\theta(i) \,.
\end{multline}
With similar arguments, we infer that
\begin{multline}\label{B1dec5i=0}
\sum_{k=1}^n \left\vert \frac{1}{3} \bbE \left\{ f^{(3)}_{n-k}\left(\tilde S_{k-1}\right) \right\} \beta_{3,2,m_k} - \sum_{j=1}^{m_{k,0} } \bbE \left\{ f^{(3)}_{n-k}\left(\tilde S_{k-j -1}\right) \right\} \bbE \left\{ X_{k-j} \left(X^2_k\right)^{(0)} \right\} \right\vert
\\ 
\ll 1+ \sum_{i\,\geq\,1} i \left(i \wedge \sqrt{n}\right)\theta(i) \,.
\end{multline}
Now, for any integer $i \in [0,n]$, let
\[
{ \Delta}_{k,i,2}^{(1,3,0)} := \sum_{j=i+1}^{i+m_{k,i} } \big \{ f^{(3)}_{n-k}\left(\tilde S_{k-j -1}\right) \left(X_{k-j} (X_{k-i}X_k)^{(0)}\right)^{(0)} \big \} \,.
\]
\pagebreak

Starting from~\eqref{linddec5} and taking into account~\eqref{Boundtildebeta}, \eqref{B1dec5} and~\eqref{B1dec5i=0}, we then obtain
\begin{multline}\label{linddec6}
\bbE (\Delta_{n,k}) = \\
\begin{aligned}
&\frac 12 \sum_{i=0}^{\ell_k} \left(1 + \bfun_{\left\{i\,\neq\,0\right\}}\right) \bbE \left\{ { \Delta}_{k,i,2}^{(1,3,0)} \right\} +
\frac{1}{2} \bbE \left(\Delta_{k,3}^{(1)}\right) +
\frac{1}{6} \bbE \left(f_{n-k}^{(3)} \left(\tilde S_{k-1}\right) \left(X_k^3 - \bbE \left(X_0^3\right)\right) \right) \\
&\;- \sum_{i=1}^{\ell_k} \gamma_i \sum_{j=1}^i \bbE \big \{ f^{(3)}_{n-k} \left(\tilde S_{k-j -1}\right) X_{k-j} \big \} - \sum_{i=1}^{\ell_k} \frac{\gamma_i}{2} \sum_{j=1}^i \bbE \big \{ f^{(4)}_{n-k} \left(\tilde S_{k-j -1}\right) \left(X^2_{k-j}\right)^{(0)} \big \}\\
&\; - \sum_{i=1}^{\ell_k} \frac{\gamma_i^{(2)}}{2} \sum_{j=1}^i \bbE \big \{ f^{(4)}_{n-k} \left(\tilde S_{k-j -1}\right) X_{k-j} \big \} + \frac 14 \sum_{i=0}^{\ell_k} \left(1 + \bfun_{\{i\,\neq\,0 \}}\right) \bbE \left\{ { \Delta}_{k,i,2}^{(1,4)} \right\} \\
&\qquad + \frac{1}{6} \bbE \left(\Delta_{k,4}^{(1)}\right) + \Gamma^{(4)}_{n,k} \,,
\end{aligned}
\end{multline}
where $ \Gamma^{(4)}_{n,k} $ satisfies $\sum_{k=1}^n \vert \Gamma^{(4)}_{n,k} \vert \ll 1 + \sum_{i\,\geq\,1} i (i \wedge \sqrt{n})\theta(i) $.
\vspace*{4pt}

In what follows we continue the estimation of each term in the right-hand side of~\eqref{linddec6} and show that the sum over $k $ from $1$ to $n$ of their absolute values is bounded by a constant times $ \{1 + \sum_{i\,\geq\,1} i (i \wedge \sqrt{n})\theta(i) \}$. Let us start by dealing with the quantities $ { \Delta}_{k,i,2}^{(1,3,0)}$. With this aim, note first that for $m_{k,j}$ defined in~\eqref{defmki},
\begin{multline*}
\left|\bbE \left\{ f^{(3)}_{n-k}\left(\tilde S_{k-j -m_{k,j} -1 }\right) \left(X_{k-j} \left(X_{k-i}X_k\right)^{(0)}\right)^{(0)} \right\} \right| \\
\begin{aligned}
&\ll \left\Vert f^{(3)}_{n-k} \right\Vert_{\infty} \big (\theta (m_{k,j}) \wedge \theta (j-i) \wedge \theta (i) \big) \\
&\ll \left\Vert f^{(3)}_{n-k} \right\Vert_{\infty} \big (\theta (m_{k}) \wedge \theta (j-i) \wedge \theta (i) + \theta (k-j) \wedge \theta (j-i) \wedge \theta (i) \big) \,.
\end{aligned}
\end{multline*}
Hence, by Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1} and the fact that $m_k\leq \sqrt{n-k+1}$,
\begin{multline}\label{B1dec6}
\sum_{k=1}^n \sum_{i = 0}^{\ell_k} \sum_{j=i+1}^{i+m_{k,i} } \left| \bbE \left\{ f^{(3)}_{n-k}\left(\tilde S_{k-j -m_{k,j} -1 }\right) (X_{k-j} \left(X_{k-i}X_k)^{(0)}\right)^{(0)} \right\} \right|\\
\begin{aligned}
&\ll \sum_{k=1}^n \frac{m_k}{ \sqrt{n-k+1}}
\left(m_k \theta (m_k) + \sum_{i\,\geq\,m_k} \theta (i) + \ell_k \theta ([k/3] \right) \\
&\ll \sum_{k=1}^n \sqrt{k} \theta \left([\sqrt k]\right) + \sum_{k=1}^n \sum_{i\,\geq\,[\sqrt k]} \theta (i) + \sum_{k=1}^n k \theta (k) \ll 1 + \sum_{i\,\geq\,1} i \left(i \wedge \sqrt{n}\right)\theta(i) \,.
\end{aligned}
\end{multline}
On another hand, by the Taylor integral formula,

\begin{multline*}
\bbE \left\{ \left(f^{(3)}_{n-k}\left(\tilde S_{k-j -1 }\right) - f^{(3)}_{n-k}\left(\tilde S_{k-j -m_{k,j} -1 }\right) \right) \left(X_{k-j} \left(X_{k-i}X_k\right)^{(0)}\right)^{(0)} \right\} \\
\begin{aligned}
&= \sum_{u=1}^{m_{k,j}} \bbE \left\{ \left(f^{(3)}_{n-k}\left(\tilde S_{k-j -u }\right) - f^{(3)}_{n-k}\left(\tilde S_{k-j -u -1 }\right) \right) \left(X_{k-j} \left(X_{k-i}X_k\right)^{(0)}\right)^{(0)} \right\} \\
&= \sum_{u=1}^{m_{k,j}} \bbE \left\{ f^{(4)}_{n-k}\left(\tilde S_{k-j -u -1 }\right) \tilde X_{k-j -u } \left(X_{k-j} \left(X_{k-i}X_k\right)^{(0)}\right)^{(0)} \right\} \\
& \quad + \sum_{u=1}^{m_{k,j}} \int_0^1 (1-t) \bbE \left\{ f^{(5)}_{n-k}\left(\tilde S_{k-j -u -1 } +t \tilde X_{k-j -u }\right) \tilde X^2_{k-j -u } \left(X_{k-j} \left(X_{k-i}X_k\right)^{(0)}\right)^{(0)} \right\} dt \,.
\end{aligned}
\end{multline*}
According to Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1},
\begin{multline}\label{B2dec6}
\sum_{k=1}^n \sum_{i = 0}^{\ell_k} \sum_{j=i+1}^{i+m_{k,i} } \sum_{u=1}^{m_{k,j}}\\
\left| \bbE \left\{ f^{(5)}_{n-k}\left(\tilde S_{k-j -u -1 } +t \tilde X_{k-j -u }\right) \tilde X^2_{k-j -u } \left(X_{k-j} \left(X_{k-i}X_k\right)^{(0)}\right)^{(0)} \right\} \right|\\
\begin{aligned}
& \ll \sum_{k=1}^n\left\Vert f^{(5)}_{n-k} \right\Vert_{\infty} \sum_{i = 0}^{\ell_k} \sum_{j=1}^{m_k } \sum_{u=1}^{m_k} \left(\theta (u) \wedge \theta (j) \wedge \theta (i) \right)\\
& \ll \sum_{k=1}^n (n-k+1)^{-3/2} \left\{ \sum_{u =1}^{[\sqrt n]} u^2 \theta (u) + m_k^2 \sum_{i\,\geq\,m_k+1} \theta (i) \right\}\\
& \ll \sum_{u =1}^{[\sqrt n]} u^2 \theta (u) + \sum_{i\,\geq \,1} i \theta (i) \ll 1 + \sum_{i\,\geq\,1} i \left(i \wedge \sqrt{n}\right)\theta(i) \,.
\end{aligned}
\end{multline}
Next, let $Z_{k,j,u,i}:= \tilde X_{k-u } (X_{k-j} (X_{k-i}X_k)^{(0)})^{(0)}$ and $Z^{(0)}_{k,j,u,i}:=Z_{k,j,u,i}-\bbE (Z_{k,j,u,i})$. Since
\[
\left\vert \bbE (Z_{k,j,u,i}) \right\vert \ll
\bigl(\theta (u-j) + \theta (k-j) \bigr) \wedge \theta (j-i) \wedge \theta (i) \,,
\]
by Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.2},
\begin{multline}\label{B3dec6ante}
 \sum_{k=1}^n \sum_{i=0}^{\ell_k} \sum_{j=i+1}^{ i+m_{k,i}} \sum_{u= j+1}^{j+m_{k,j}} \left| \bbE \left\{ f^{(4)}_{n-k}\left(\tilde S_{k -u -1 }\right) \right\} \bbE\left(Z_{k,j,u,i}\right) \right|\\
\begin{aligned}
&\ll \sum_{k=1}^n \sum_{i=0}^{\ell_k} \sum_{j=i+1}^{ i+m_{k,i}} \sum_{u=j+1}^{j+m_{k,j}}
\frac{ \left(\left(\theta (u-j) + \theta (k-j) \right) \wedge \theta (j-i) \wedge \theta (i) \right) } {(n-u) \wedge (n-k+1)^{3/2} }\\
&\ll \sum_{k=1}^n \left((n-k+1)^{-3/2} + n^{-1} \right) \left(m^2_k \sum_{i =m_k }^{{\ell_k}} \theta (i) + \sum_{u=1}^{m_k} u^2 \theta (u) + m_k^2 k \theta ([k/3] \right)\\ &
\ll 1 + \sum_{u\,\geq\,1} u\left(u \wedge {\sqrt n}\right) \theta (u) \,.
\end{aligned}
\end{multline}
On another hand, for $m_{k,u}$ defined in~\eqref{defmki},
\begin{multline*}
\left| \bbE \big \{ f^{(4)}_{n-k}\left(\tilde S_{k -u -m_{k,u} -1 }\right) Z^{(0)}_{k,j,u,i} \big \} \right|
\ll\\ \left\Vert f^{(4)}_{n-k} \right\Vert_{\infty} \Big \{ \big (\theta (m_k) \wedge \theta (u-j) \wedge \theta (j-i) \wedge \theta (i) \big) + \big (\theta (k-u) \wedge \theta (u-j) \wedge \theta (j-i) \wedge \theta (i) \big) \Big \} \,.
\end{multline*}
Hence, using Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1} and the fact that $m_k^2 \leq n-k+1$,
\begin{multline}\label{B3dec6}
\sum_{k=1}^n \sum_{i = 0}^{\ell_k} \sum_{j=i+1}^{i+m_{k,i} } \sum_{u=j+1}^{j+m_{k,j}}
\left| \bbE \big \{ f^{(4)}_{n-k}\left(\tilde S_{k-u -m_{k,u} -1 }\right) Z^{(0)}_{k,j,u,i} \big \} \right|
\\
\begin{aligned}
& \ll \sum_{k=1}^n \frac{m^2_k}{ n-k+1 } \left(m_k \theta (m_k) + \sum_{i\,\geq\,m_k} \theta (i) + k \theta ([k/4])\right)\\
& \ll \sum_{k=1}^n \left(\sqrt{k} \theta \left([\sqrt k]\right) + \sum_{i\,\geq\,[\sqrt k]} \theta (i) + k \theta (k) \right) \ll 1 + \sum_{u \geq 1} u \left(u \wedge {\sqrt n}\right) \theta (u) \,.
\end{aligned}
\end{multline}
Next
\begin{multline*}
\bbE \big \{ \left(f^{(4)}_{n-k}\left(\tilde S_{k -u -1 }\right) - f^{(4)}_{n-k}\left(\tilde S_{k-u -m_{k,u} -1 }\right) \right) Z^{(0)}_{k,j,u,i} \big \}
\\
\begin{aligned}
& = \sum_{v=u+1}^{u+m_{k,u}} \bbE \left\{ \left(f^{(4)}_{n-k}\left(\tilde S_{k -v }\right) - f^{(4)}_{n-k}\left(\tilde S_{k-v -1 }\right) \right) Z^{(0)}_{k,j,u,i} \right\} \\
&= \sum_{v=u+1}^{u+m_{k,u}} \int_0^1 \bbE \left\{ \left(f^{(5)}_{n-k}\left(\tilde S_{k -v -1 }\right) + t \tilde X_{k-v }\right) \tilde X_{k -v } Z^{(0)}_{k,j,u,i} \right\} dt \,.
\end{aligned}
\end{multline*}
Therefore, by Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1},
\begin{multline}\label{B4dec6}
\sum_{k=1}^n \sum_{i = 0}^{\ell_k} \sum_{j=i+1}^{i+m_{k,i} } \sum_{u=j+1}^{j+m_{k,j}} \left| \bbE \left\{ \left(f^{(4)}_{n-k}\left(\tilde S_{k-u -1 }\right) - f^{(4)}_{n-k}\left(\tilde S_{k-u -m_{k,u} -1 }\right) \right) Z^{(0)}_{k,j,u,i} \right\} \right|\\
\begin{aligned}
&\ll \sum_{k=1}^n \sum_{i = 0}^{\ell_k} \sum_{j=i+1}^{i+m_{k,i} } \sum_{u=j+1}^{j+m_{k,j}} \sum_{v=u+1}^{u+m_{k,u}} \left\Vert f^{(5)}_{n-k} \right\Vert_{\infty} \big (\theta (v-u) \wedge \theta (u-j) \wedge \theta (j-i) \wedge \theta (i) \big)\\
& \ll \sum_{k=1}^n \frac{1}{(n-k+1)^{3/2}} \sum_{\ell =1}^{m_k} \ell^3 \theta (\ell) + \sum_{k=1}^n \frac{m_k^3}{(n-k+1)^{3/2}} \sum_{i\,\geq\,m_k +1} \theta (i)\\
&\ll \sum_{k=1}^n \frac{1}{k^{3/2}} \sum_{\ell =1}^{[{\sqrt k}]} \ell^3 \theta (\ell) + \sum_{k=1}^n \sum_{i\,\geq\,{[\sqrt k]}} \theta (i)
\ll \sum_{u\,\geq\,1} u \left(u \wedge {\sqrt n}\right) \theta (u) \,.
\end{aligned}
\end{multline}
Taking into account~\eqref{B1dec6}, \eqref{B2dec6}, \eqref{B3dec6ante}, \eqref{B3dec6} and~\eqref{B4dec6}, it follows that
\begin{equation}\label{B5dec6}
\sum_{k=1}^n \sum_{i=0}^{{\ell_k}} \left| \bbE \left({ \Delta}_{k,i,2}^{(1,3,0)}\right) \right| \ll 1 + \sum_{u \,\geq\,1} u \left(u \wedge {\sqrt n}\right) \theta (u) \,.
\end{equation}
With similar (but even simpler) arguments, we infer that the sum over $k$ from $1$ to $n$ of the second and third terms in the right-hand side of~\eqref{linddec6} are also bounded by a constant times $ \{1 + \sum_{u \geq 1} u (u \wedge {\sqrt n}) \theta (u) \} $. More precisely,

\begin{multline}\label{B6dec6}
\sum_{k=1}^n \Big\{ \left| \bbE \left(\Delta_{n,k,3}^{(1)}\right) \right| + \left| \bbE \left(f_{n-k}^{(3)} \left(\tilde S_{k-1}\right) \left(X_k^3 - \bbE\left(X_0^3\right)\right) \right) \right| \Big\}\\
\ll 1 + \sum_{u\,\geq\,1} u \left(u \wedge {\sqrt n}\right) \theta (u)\,.
\end{multline}
We deal now with the fourth term of the right hand side of~\eqref{linddec6}. With this aim, recalling the definition~\eqref{defmki} of $m_{k,j}$, note that
\[
\left| \bbE \left\{ f^{(3)}_{n-k} \left(\tilde S_{k-j -m_{k,j} - 1}\right) X_{k-j} \right\} \right| \ll \left\Vert f^{(3)}_{n-k} \right\Vert_{\infty} \theta (m_{k,j}) \ll \left\Vert f^{(3)}_{n-k} \right\Vert_{\infty} \big (\theta (m_{k}) + \theta (k-j) \big) \,.
\]
Hence, by Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1} and recalling the notation $\gamma_i= \bbE(X_{0}X_i)$, we get
\begin{multline}\label{B7dec6}
\sum_{k=1}^n \sum_{i=1}^{\ell_k} \sum_{j=1}^i  \left| \gamma_i \bbE \left\{ f^{(3)}_{n-k} \left(\tilde S_{k-j -m_{k,j}- 1}\right) X_{k-j} \right\} \right|\\
\begin{aligned}
& \ll \sum_{k=1}^n (n-k+1)^{-1/2} \sum_{i=1}^{\ell_k} \sum_{j=1}^i \theta (i) (\theta (m_k) +\theta(k-j)) \\
& \ll \sum_{k=1}^n k^{-1/2} \theta \left([\sqrt k]\right) \sum_{i\,\geq\,1}i \theta (i) + \sum_{k=1}^n \theta([k/2]) \sum_{i\,\geq\,1} i \theta (i) \ll 1 \,.
\end{aligned}
\end{multline}
Next, by the Taylor integral formula,
\begin{multline*}
\bbE \left\{ \left(f^{(3)}_{n-k} \left(\tilde S_{k-j -1}\right) - f^{(3)}_{n-k} \left(\tilde S_{k-j -m_{k,j}- 1}\right) \right) X_{k-j} \right\} \\
\begin{aligned}
&= \sum_{u=j+1}^{j+m_{k,j}} \bbE \left\{ \left(f^{(3)}_{n-k} \left(\tilde S_{k-u}\right) - f^{(3)}_{n-k} \left(\tilde S_{k-u- 1}\right) \right) X_{k-j} \right\} \\
&= \sum_{u=j+1}^{j+m_{k,j}} \left(\bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k -u- 1}\right) \tilde X_{k-u} X_{k-j} \right\}\right. \\
&\quad\left. + \int_0^1 (1-t) \bbE \left\{ f^{(5)}_{n-k} \left(\tilde S_{k -u- 1} + t \tilde X_{k-u}\right) \tilde X^2_{k -u} X_{k-j} \right\} dt \right) \,.
\end{aligned}
\end{multline*}
But, by Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1},
\begin{multline}\label{B8dec6}
\sum_{k=1}^n \sum_{i=1}^{\ell_k} \sum_{j=1}^i \sum_{u=j+1}^{j+m_{k,i}} \left| \gamma_i \bbE \left\{ f^{(5)}_{n-k} \left(\tilde S_{k -u- 1} + t \tilde X_{k -u}\right) \tilde X^2_{k-u} X_{k-j} \right\} \right| \\
\ll \sum_{k=1}^n (n-k+1)^{-3/2} \sum_{u\,\geq\, 1} \theta (u) \sum_{i\,\geq\,1} i \theta (i) \ll 1 \,.
\end{multline}
On another hand, by Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1} again,
\begin{multline*}
\left| \gamma_i \bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k -u- 1}\right)\left(\tilde X_{k-u} - X_{k-u}\right) X_{k-j} \right\} \right| \\
\begin{aligned}
&= \left| \gamma_i \bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k -u- 1}\right) \bbE_0(X_{k-u}) X_{k-j} \right \} \right| \\
&\ll (n-k+1)^{-1} (\theta (k-u) \wedge \theta (u-j)) \theta(i)\\
&\ll (n-k+1)^{-1} \theta ([(k-j)/2]) \theta(i) \,.
\end{aligned}
\end{multline*}
Hence,
\begin{multline}\label{B9dec6-cor-ante}
\sum_{k=1}^n \sum_{i=1}^{\ell_k} \sum_{j=1}^i \sum_{u=j+1}^{j+m_{k,j}} \left| \gamma_i \bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k -u- 1}\right)\left(\tilde X_{k-u} - X_{k-u}\right) X_{k-j} \right\} \right|\\
\ll \sum_{k=1}^n \theta ([k/4]) \sum_{i=1}^n i \theta (i) \ll 1 \,.
\end{multline}
Moreover, by Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.2},
\begin{multline}\label{B9dec6}
\sum_{k=1}^n \sum_{i=1}^{\ell_k} \sum_{j=1}^i \sum_{u=j+1}^{j+m_{k,i}} \left| \gamma_i \bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k -u- 1}\right) \right\} \bbE \left(X_{k-u} X_{k-j}\right) \right| \\
\ll \sum_{k=1}^n \sum_{i=1}^{\ell_k} \sum_{j=1}^i \sum_{u=j+1}^{j+m_{k,i}}
\frac{ \theta (u-j) \theta (i) } { (n-u) \wedge (n-k+1)^{3/2} }
\ll \sum_{u\,\geq\,1} \theta ([u/2]) \sum_{i\,\geq\,1} i \theta (i) < \infty \,.
\end{multline}
Hence, taking into account~\eqref{B7dec6}, \eqref{B8dec6}, \eqref{B9dec6-cor-ante} and~\eqref{B9dec6}, we derive that
\begin{multline}\label{B9bisdec6}
\sum_{k=1}^n \sum_{i=1}^{\ell_k} \sum_{j=1}^i \left| \gamma_i \bbE \left\{ f^{(3)}_{n-k} \left(\tilde S_{k-j - 1}\right) X_{k-j} \right\} \right| \\
\ll 1 + \sum_{k=1}^n \sum_{i=1}^{\ell_k} \sum_{j=1}^i \sum_{u=j+1}^{j+m_{k,j}} \left| \gamma_i \bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k-u- 1}\right) (X_{k-u} X_{k-j})^{(0)} \right\} \right| \,.
\end{multline}
Next, recalling the definition~\eqref{defmki} of $m_{k,u}$, by Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1}, note that
\begin{multline}\label{B10dec6}
\sum_{k=1}^n \sum_{i=1}^{\ell_k} \sum_{j=1}^i \sum_{u=j+1}^{j+m_{k,j}} \left| \gamma_i \bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k -u- m_{k,u} - 1}\right) (X_{k -u} X_{k-j})^{(0)} \right\} \right|\\
\begin{aligned}
&\ll \sum_{k=1}^n \sum_{i=1}^{\ell_k} \sum_{j=1}^i \sum_{u=j+1}^{j+m_{k,j}} \left\Vert f^{(4)}_{n-k} \right\Vert_{\infty} \left\{ \theta (m_{k,u}) \wedge \theta (u-j) \right\} \theta (i)\\
& \ll \sum_{k=1}^n \frac{m_k \theta (m_k) }{n-k+1} \sum_{i =1}^n i \theta (i) + \sum_{k=1}^n \sum_{i=1}^{\ell_k} \sum_{j=1}^i \sum_{u=j+1}^{k } \left\{ \theta (k-u) \wedge \theta (u-j) \right\} \theta (i)\\
& \ll \left(\sum_{i =1}^n i \theta (i) \right)^2 \ll 1 \,.
\end{aligned}
\end{multline}
On another hand,
\begin{multline*}
\left| \bbE \left\{ \left(f^{(4)}_{n-k} \left(\tilde S_{k -u- 1}\right)  - f^{(4)}_{n-k} \left(\tilde S_{k-u- m_{k,u} - 1}\right) \right) (X_{k -u} X_{k-j})^{(0)} \right\} \right| \\
\begin{aligned}
& \leq \sum_{v =u+1}^{u+m_{k,u}} \left| \bbE \left\{ \left (f^{(4)}_{n-k} \left(\tilde S_{k-v}\right) - f^{(4)}_{n-k} \left(\tilde S_{k- v - 1}\right) \right) (X_{k -u} X_{k-j})^{(0)} \right\} \right| \\
& \leq \sum_{v =u+1}^{u+m_{k,u}} \int_0^1 \left| \bbE \left\{ f^{(5)}_{n-k} \left(\tilde S_{k- v - 1} + t X_{k-v}\right) \tilde X_{k -v} (X_{k -u} X_{k-j})^{(0)} \right\} \right| dt \,.
\end{aligned}
\end{multline*}
Hence,
\begin{multline}\label{B11dec6}
\left| \gamma_i \bbE \left\{ \left(f^{(4)}_{n-k} \left(\tilde S_{k -u- 1}\right)  - f^{(4)}_{n-k} \left(\tilde S_{k-u- m_{k,u} - 1}\right) \right) (X_{k -u} X_{k-j})^{(0)} \right\} \right|\\
\ll \left\Vert f^{(5)}_{n-k} \right\Vert_{\infty} \sum_{v =u+ 1}^{u+ m_k} \big (\theta (v-u) \wedge \theta (u-j)\big) \theta (i)
\,.
\end{multline}
Taking into account~\eqref{B10dec6} and~\eqref{B11dec6} together with Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1}, it follows that
\begin{multline}\label{B12dec6}
\sum_{k=1}^n \sum_{i=1}^{\ell_k} \sum_{j=1}^i \sum_{u=j+1}^{j+m_{k,j}} \left| \gamma_i \bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k-u- 1}\right) (X_{k -u} X_{k-j})^{(0)} \right \} \right| \\
 \ll 1+ \sum_{k=1}^n (n-k+1)^{-3/2} \sum_{v=1}^{m_k} v \theta (v) \sum_{i =1}^n i \theta (i) \ll 1 \,.
\end{multline}
Starting from~\eqref{B9bisdec6} and taking into account the upper bound~\eqref{B12dec6}, we get that the sum over $k$ from $1$ to $n$ of the fourth term in the right-hand side of~\eqref{linddec6} is uniformly bounded as a function of $n$. More precisely,
\begin{equation}\label{B9bisdec6term5}
\sum_{k=1}^n \sum_{i=1}^{\ell_k} \sum_{j=1}^i \left| \gamma_i \bbE \left\{ f^{(3)}_{n-k} \left(\tilde S_{k-j - 1}\right) X_{k-j} \right\} \right|
\ll 1 \,.
\end{equation}
Similar computations (even simpler since we deal with the fourth derivative rather than the third one) give the following upper bound concerning the quantities involved in the fifth and sixth terms in the right-hand side of~\eqref{linddec6}:
\begin{multline}\label{B9bisdec6term5-new}
\sum_{k=1}^n \sum_{i=1}^{\ell_k} \sum_{j=1}^i \Big (\left| \gamma_i \bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k-j -1}\right)\left(X^2_{k-j}\right)^{(0)} \right\} \right|\\ + \left | \gamma_i^{(2)} \bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k-j -1}\right) X_{k-j} \right\} \right| \Big)
\ll 1 \,.
\end{multline}
We deal now with the last terms in the decomposition~\eqref{linddec6} and show that
\begin{equation}\label{lastaimdec6}
\sum_{k=1}^n \sum_{i=0}^{\ell_k} \left| \bbE\left(\Delta_{k,i,2}^{(1,4)}\right) \right| \ll 1 \quad\text{and}\quad \sum_{k=1}^n \left| \bbE \left(\Delta_{k,4}^{(1)}\right) \right|
\ll 1 \,,
\end{equation}
where we recall that $\Delta_{k,i,2}^{(1,4)} $ and $\Delta_{k,4}^{(1)}$ have been respectively defined in~\eqref{defdelta24i} and~\eqref{defdeltank4}. With this aim, note first that, by Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.2},
\begin{multline}\label{lastaimdec6P1}
\sum_{k=1}^n \sum_{i=0}^{\ell_k} \sum_{j=i+1}^{ i+m_{k,i}} \left| \bbE \left\{ f^{(4)}_{n-k} \left(\tilde S_{k-j - 1}\right) \bbE \left(X^2_{k-j} (X_{k-i} X_k)^{(0)} \right) \right\} \right|\\
\begin{aligned}
&\ll \sum_{k=1}^n \sum_{i=0}^{\ell_k} \sum_{j=i+1}^{ i+m_{k,i}} \big ((n-k+1)^{-3/2} + (n-j)^{-1} \big) \big (\theta (j-i) \wedge \theta (i) \ \big)\\
&\ll \sum_{k=1}^n \left((n-k+1)^{-3/2} + n^{-1} \right) \left(m_k \sum_{i\,\geq\,m_k } \theta (i) + \sum_{u=1}^{m_k} u \theta (u) \right)
\ll \sum_{u\,\geq\,1} u \theta (u) \,.
\end{aligned}
\end{multline}
Next, let $W_{k,i,j} = (X^2_{k-j} (X_{k-i} X_k)^{(0)} )^{(0)}$. We start by noticing that
\begin{multline*}
\sum_{k=1}^n \sum_{i=0}^{\ell_k} \sum_{j=i+1}^{ i+m_{k,i}} \left| \bbE \left\{ \left(f^{(4)}_{n-k} \left(\tilde S_{k-j -m_{k,j}- 1}\right) W_{k,i,j} \right) \right\} \right|\\
\ll \sum_{k=1}^n \sum_{i=0}^{\ell_k} \sum_{j=i+1}^{ i+m_{k,i}} \left\Vert f^{(4)}_{n-k} \right\Vert_{\infty} \big (\theta (m_{k,j}) \wedge \theta (j-i) \wedge \theta (i) \ \big) \,.
\end{multline*}
But, $ \theta (m_{k,j}) = \theta (m_{k}) \vee \theta (k-j) $. Hence, using Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1},
\begin{multline}\label{lastaimdec6P2}
\sum_{k=1}^n \sum_{i=0}^{\ell_k} \sum_{j=i+1}^{ i+m_{k,i}} \left| \bbE \left\{ \left(f^{(4)}_{n-k}\left(\tilde S_{k-j -m_{k,j}- 1}\right) W_{k,i,j} \right)\right\} \right| \\
\begin{aligned}
& \ll \sum_{k=1}^n (n-k+1)^{-1} \left(m_k^2 \theta (m_k) + m_k \sum_{i\,\geq\,m_k} \theta (i) + m_k \, k \theta([k/3])\right) \\
& \ll \sum_{k=1}^n \theta (m_k) + \sum_{k=1}^n (n-k+1)^{-1/2} \sum_{i\,\geq\,m_k} \theta (i) + \sum_{k=1}^n k \theta(k)
\ll \sum_{u\,\geq\,1} u \theta (u)\,.
\end{aligned}
\end{multline}
Next, by Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1}, we derive
\begin{multline}\label{lastaimdec6P3}
\sum_{k=1}^n  \sum_{i=0}^{\ell_k} \sum_{j=i+1}^{ i+m_{k,i}} \left| \bbE \left\{ \left(f^{(4)}_{n-k} \left(\tilde S_{k-j -1}\right) - f^{(4)}_{n-k} \left(\tilde S_{k-j -m_{k,j}- 1}\right) \right) W_{k,i,j} \right\} \right|
\\
\begin{aligned}
& \leq
\sum_{k=1}^n \sum_{i=0}^{\ell_k} \sum_{j=i+1}^{ i+m_k} \sum_{u=j+1}^{j+m_{k,j}} \int_0^1 \left| \bbE \left\{ \left(f^{(5)}_{n-k} \left(\tilde S_{k-u-1} + t \tilde X_{k-u}\right) \right) \tilde X_{k -u} W_{k,i,j} \right\} \right| dt \\
& \ll \sum_{k=1}^n \sum_{i=0}^{\ell_k} \sum_{j=i+1}^{ i+m_k} \sum_{u=j+1}^{j+m_{k,j}} \left\Vert f^{(5)}_{n-k} \right\Vert_{\infty} \big (\theta (u-j) \wedge \theta (j-i) \wedge \theta (i) \ \big) \\
& \ll \sum_{k=1}^n (n-k+1)^{-3/2} \left(\sum_{u=1}^{m_k} u^2 \theta (u) + m^2_k \sum_{i\,\geq\,m_k} \theta (i) \right)
\ll \sum_{u\,\geq\,1} u \theta (u) \,.
\end{aligned}
\end{multline}
Putting together~\eqref{lastaimdec6P1}, \eqref{lastaimdec6P2} and~\eqref{lastaimdec6P3}, the first part of~\eqref{lastaimdec6} follows. Similar (but simpler) arguments lead to the second part of~\eqref{lastaimdec6}. Finally, starting from~\eqref{linddec6} and taking into account the upper bounds~\eqref{B5dec6}, \eqref{B6dec6}, \eqref{B9bisdec6term5}, \eqref{B9bisdec6term5-new} and~\eqref{lastaimdec6}, it follows that $ \sum_{k=1}^n | \bbE (\Delta_{n,k})| \ll 1 + \sum_{k\geq 1} k (k \wedge \sqrt{n}) \theta (k)$, which combined with~\eqref{sumdelta} implies~\eqref{aim1thmain} and then proves Theorem~\ref{thW2}$\MK$\eqref{theo2.1.b}.
\end{proof}

\subsection{Proof of Lemma~\ref{lmacrucial}}


Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.1} comes from the smoothing~\cite[Lemma~6.1]{DMR09}. To prove Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.2}, we write
\[
\left| \bbE \left(f_{n-k}^{(i) } \left({\tilde S}_{\ell -1}\right)\right) - \bbE \left(f_{n-k}^{(i) } \left({ S}_{\ell -1}\right)\right) \right| \leq \left\Vert f_{n-k}^{(i+1)} \right\Vert_{\infty} \left\Vert \bbE_0 (S_{\ell -1}) \right\Vert_1 \,.
\]
Hence, since $ \Vert \bbE_0 (S_{\ell -1}) \Vert_1 \leq \sum_{k=1}^{\ell} \theta_{X,1,1} \ll 1$, using Item 1, we derive that for any positive integer $\ell$,
\[
\left| \bbE \left(f_{n-k}^{(i) } \left({\tilde S}_{\ell -1}\right)\right) - \bbE \left(f_{n-k}^{(i)} \left({ S}_{\ell -1}\right)\right) \right| \ll (n-k+1)^{-(i-1)/2} \,.
\]
Next, let $(G_i)_{i\,\geq\,1}$ be a sequence of iid centered Gaussian random variables with variance $\sigma^2$ and independent of $(X_i,B_i,Z_i)_{i\,\geq\,1}$ (recall that the random variables $(B_i)$ and $(Z_i) $ have been defined at the beginning of Section~\ref{subproofTh}). Let $N_{k} = \sum_{i=1}^{k} G_i $. Write that
\[
\bbE \left(f_{n-k}^{(i)} (S_{\ell -1})\right) = \bbE \left(f_{n-k}^{(i) } (S_{\ell -1})\right) - \bbE \left(f_{n-k}^{(i) } (N_{\ell -1})\right) + \bbE \left(f_{n-k}^{(i) } (N_{\ell -1})\right) \,.
\]
Next, let $t_k = \sigma \sqrt{(n-k)/2+1}$ and let $\varphi_{t^2_k}$ be the density of the law $\clN (0,t_k^2)$. Denote also $H_{k,n} = \sum_{i=k+1}^n B_i$ and note that, by definition, $H_{k,n} $ is independent of $S_{\ell -1} $ and of $N_{\ell -1} $. Note that
\[
\bbE \left(f_{n-k}^{(i) } (N_{\ell -1})\right) = \bbE \left(f * \varphi_{t^2_k}^{(i) } (N_{\ell -1} + H_{k,n})\right) = \bbE \left(f * \varphi_{t^2_k + \sigma^2 (\ell-1)}^{(i) } (H_{k,n})\right) \,.
\]
Using Item~\eqref{lemm4.4.1}, it follows that
\[
\left| \bbE \left(f_{n-k}^{(i) } (N_{\ell -1})\right) \right| \ll (n-k+ \ell)^{- (i-2)/2} \,.
\]
On another hand
\begin{multline*}
\bbE (f_{n-k}^{(i) } (S_{\ell -1})) - \bbE \left(f_{n-k}^{(i) } (N_{\ell -1})\right)\\
\begin{aligned}
&= \bbE \left(f * \varphi_{t^2_k}^{(i) } (S_{\ell -1} +H_{k,n})\right) - \bbE \left(f * \varphi_{t^2_k}^{(i) } (N_{\ell -1} + H_{k,n})\right) \\
&= \int_\bbR \bbE \big\{ f' (S_{\ell -1} +H_{k,n} -u) - f' (N_{\ell -1} + H_{k,n} -u) \big \} \varphi_{t^2_k}^{(i-1) } (u) du \,.
\end{aligned}
\end{multline*}
Since $f \in \Lambda_2(E)$, $g:=f'$ is in $\Lambda_1(E)$ meaning that $g:\bbR
\times E \rightarrow \bbR$ is measurable wrt the $\sigma$-fields $\clL(\bbR \times E) $ and $\clB (\bbR)$, $g(\cdot, w) $ is $1$-Lipschitz and $g(0,w)=0$ for any $w \in E$. Therefore, since it is assumed that $\sum_{k\,\geq\,1} k \theta_{X,3,4}(k) < \infty$, one can use~\cite[Theorem~3.1$\MK$(a)]{DR08} (see also~\cite[Theorem~1.1]{Pe05}) which entails that
\[
\sup_{v\,\in\,\bbR} \big | \bbE (f' (S_{\ell -1} + v)) - \bbE(f' (N_{\ell -1} +v)) \big | \ll 1 \,.
\]
Note that~\cite[Theorem~3.1$\MK$(a)]{DR08} is stated for $g$ a Lipschitz function but following its proof one can show that it holds also if $g$ belongs to $\Lambda_1(E)$. On another hand, $\varphi_{t^2_k}^{(i-1) } (u) = t_k^{-i} \varphi_{1}^{(i-1) } (u/t_k)$. Therefore
\[
\left| \bbE \left(f_{n-k}^{(i) } (S_{\ell -1})\right) - \bbE \left(f_{n-k}^{(i) } (N_{\ell -1})\right) \right| \ll t_k^{ 1-i} \left\Vert \varphi_{1}^{(i-1) } \right\Vert_1 \ll t_k^{ 1-i} \,.
\]
Putting together all the above upper bounds gives Lemma~\ref{lmacrucial}$\MK$\eqref{lemm4.4.2}.

\appendix
\section{Convergence of quantiles in the CLT} \label{Annex}

%\setcounter{equation}{0}

In this section, we give an inequality involving the difference between the quantile of a normalized random variable and the quantile of a standard normal, and the Wasserstein distance of order $p$ between the corresponding laws. The main result of this section is Proposition~\ref{PropVaR} below which is a key result to prove Corollary~\ref{AppliVaR}.

\begin{prop} \label{PropVaR}
Let $Z$ be a centered real-valued random variable satisfying
$\bbE (Z^2) \leq 2$. Let $F_Z$ denote the distribution function of $Z$ and $\Phi$ denote the distribution function of a standard normal $Y$. For any $p\geq 1$, let
\[
K_p = \int_0^1 \big| F_Z^{-1} (t) - \Phi^{-1} (t) \big|^p dt.
\]
Then, for any $u$ in $(0,1/2]$,
\[
\big| F_Z^{-1} (1-u) - \Phi^{-1} (1-u) \big| \leq \max \left(\left(\frac{(p+1)e K_p }{uQ_{1,Y} (u)} \right)^{1/(p+1)},
\left(\frac{(p+1)e K_p }{u} \right)^{1/p} \right),
\]
where $Q_{1,Y}$ is defined in~\eqref{DefSuperQuantile}.
\end{prop}

\begin{rema}
Note that $Q_{1,Y} (u) \sim_{u \rightarrow 0} \sqrt{2 \ln (1/u)}$.
\end{rema}


\begin{proof}[{Proof of Proposition~\ref{PropVaR}}]
Throughout the proof, $H_Y = 1 - \Phi$ and $Q_Y$ is the inverse function of $H_Y$. With these notations,
\begin{equation}\label{ExpressionQ1Y}
Q_{1,Y} (u) = u^{-1} \int_0^u Q_Y (t) dt = u^{-1} \bbE \bigl(Y \bbI_{Y\,\geq\,Q_Y (u) } \bigr) =
\frac{ \exp \left(- Q_Y^2 (u) /2\right) }{ \sqrt{2\pi} \, u } \,.
\end{equation}
We also set $H_Z = 1-F_Z$ and we denote by $Q_Z$ the generalized inverse function of $H_Z$. From~\eqref{ExpressionQ1Y}, Proposition~\ref{PropVaR} is equivalent to
\begin{equation}\label{ProofVaR1}
| Q_Z (u) - Q_Y (u) | \leq \max \Bigl(\bigl((p+1)e \sqrt{2\pi} \, e^{Q_Y^2 (u) /2} K_p \bigr)^{1/(p+1)},
\bigl((p+1)e K_p /u \bigr)^{1/p} \Bigr)
\end{equation}
for $u\leq 1/2$. We start by proving~\eqref{ProofVaR1} in the case $Q_Z (u) > Q_Y (u)$.

%\smallskip

\begin{proof}[{Proof of~\eqref{ProofVaR1} in the case $Q_Z (u) > Q_Y (u)$}]
Let $\delta = Q_Z (u) - Q_Y (u)$ and let $\eta$ be the unique real in $(0,u)$ such that $Q_Y (u-\eta) = Q_Y (u) + \delta = Q_Z (u)$. From the convexity of $Q_Y$ on $(0,1/2]$,
\begin{equation}\label{ProofVaR2}
Q_Y (u-t\eta) \leq Q_Y (u) + t \delta \ \text{ for any } t\in [0,1].
\end{equation}
Moreover $Q_Z (u - t \eta) \geq Q_Z (u) \geq Q_Y (u) + \delta$ for $t$ in $[0,1]$, whence, using the change of variables $s=u-t\eta$,
\begin{equation}\label{ProofVaR3}
K_p \geq
\int_{u-\eta}^u |Q_Z (s) - Q_Y (s) |^p ds \geq \eta \int_0^1 (\delta -\delta t)^p dt = \eta \delta^p / (p+1).
\end{equation}
In view of the above inequality, we have to bound $\eta$ from below. In order to get a lower bound on $\eta$, we will bound up $-Q'_Y$. From the definition of $Q_Y$,
\[
-Q'_Y (s) = -1/H'_Y (Q_Y (s)) = \sqrt{2\pi} \exp \left(Q_Y^2 (s) / 2\right) \leq \sqrt{2\pi} \exp \left((Q_Y (u) + \delta)^2 /2\right)
\]
for any $s$ in $[u-\eta, u]$,


%\smallskip
We now separate two cases. If $\delta \leq \sqrt{2 + Q_Y^2 (u) } - Q_Y (u)$,
\[
- Q'_Y (s) \leq \sqrt{2\pi} \exp \left((Q_Y (u) + \delta)^2 /2\right) \leq \sqrt{2\pi} \exp \left(1 + Q_Y^2 (u) /2\right)
\]
for any $s$ in $[u-\eta, u]$. Then
\begin{equation}\label{Competadelta}
Q_Y (u-\eta) - Q_Y (u) \leq \eta e\sqrt{2\pi} \exp (Q_Y^2 (u) /2).
\end{equation}
In that case, putting the above lower bound on $\eta$ in~\eqref{ProofVaR3}, we obtain that
\begin{equation}\label{ProofVaR4}
\delta^{p+1} \leq (p+1)e \sqrt{2\pi} \, e^{Q_Y^2 (u) /2} K_p.
\end{equation}


If $\delta > \sqrt{2 + Q_Y^2 (u) } - Q_Y (u)$, let $\delta_0 = \sqrt{2 + Q_Y^2 (u) } - Q_Y (u)$ and let $\eta_0$ be the real in $(0,u)$ such that $Q_Y (u-\eta_0) = Q_Y (u) + \delta_0$. Then $\eta \geq \eta_0$ and $(\delta_0, \eta_0)$ still satisfies~\eqref{Competadelta}, from which
\begin{equation}\label{ProofVaR5}
\eta \geq \eta_0 \ \geq\left(e\sqrt{2\pi}\right)^{-1} \Bigl(\sqrt{2 + Q_Y^2 (u) } - Q_Y (u) \Bigr) \exp \left(- Q_Y^2 (u) /2\right).
\end{equation}
Putting this lower bound in~\eqref{ProofVaR3}, we obtain that
\begin{equation}\label{ProofVaR6}
\delta^p \leq (p+1)e \, (K_p/ u) \frac{ \sqrt{2\pi}\exp \left(Q_Y^2 (u) /2\right) u }{ \sqrt{2 + Q_Y^2 (u) } - Q_Y (u) }.
\end{equation}
Now, setting $u= H_Y (x)$,
\[
\sup_{u\,\in\,(0,1/2]} \frac{ \sqrt{2\pi}\exp \left(Q_Y^2 (u) /2\right) u }{ \sqrt{2 + Q_Y^2 (u) } - Q_Y (u) } =
\sup_{x\,\geq\,0} \frac{ \sqrt{2\pi}\exp \left(x^2/2\right) H_Y (x) }{ \sqrt{2 + x^2 } - x } \leq 1
\]
by an inequality on the Mills ratio of Komatu~\cite{Ko55}. The two above inequalities imply that
\begin{equation}\label{ProofVaR7}
\delta^p \leq (p+1)e \, (K_p/ u),
\end{equation}
if $\delta > \sqrt{2 + Q_Y^2 (u)} - Q_Y (u)$. Combining~\eqref{ProofVaR4} and~\eqref{ProofVaR7}, we get~\eqref{ProofVaR1} in the case $Q_Z (u)> Q_Y (u)$. It remains to prove~\eqref{ProofVaR1} in the case $Q_Z (u) < Q_Y (u)$.
\end{proof}

%\smallskip

\begin{proof}[{Proof of~\eqref{ProofVaR1} in the case $Q_Z (u) < Q_Y (u)$}]
Let then $\delta = Q_Y (u) - Q_Z (u)$. From the assumptions $\bbE (Z)=0$, $\bbE (Z^2) \leq 2$ and the Tchebichef--Cantelli inequality, for any $x\leq 0$, $H_Z (x) \geq x^2 / (2+x^2)$. This implies that
\begin{equation}\label{LowerBoundQZ}
Q_Z (u) \geq - \sqrt{2u/(1-u)}\ \text{ for any } u\in (0,1).
\end{equation}
In particular, for $u\leq 1/2$, $Q_Z(u) \geq - \sqrt{2} \geq - \sqrt{2 + Q_Y^2 (u)}$. Let then $\beta$ be the positive real such that $Q_Y (u+ \beta) = Q_Z (u)$. From~\eqref{LowerBoundQZ}, $-Q'_Y (s) \leq \sqrt{2\pi} \exp (1 + Q_Y^2 (u) /2)$ for any $s$ in $[u, u+\beta]$. It follows that
\[
Q_Y (u+s) \geq Q_Y (u) - s \sqrt{2\pi} \exp \left(1 + Q_Y^2 (u) /2\right)
\]
for any $s$ in $[0,\beta]$. For $s=\beta$, the above inequality yields
\[
\beta \geq \left(e\sqrt{2\pi}\right)^{-1} \exp \left(- Q_Y^2 (u)/2\right) \delta := \eta.
\]
With the above definition of $\eta$, for any $t$ in $[0,1]$,
\[
Q_Y (u + t\eta) \geq Q_Y (u) - t \delta \geq Q_Y (u) - \delta \geq Q_Z (u+t\eta).
\]
Hence
\[
Q_Y (u + t\eta) - Q_Z (u + t\eta) \geq (1-t) \delta
\]
for any $t$ in $[0,1]$. It follows that
\[
K_p \geq \eta \int_0^1 \big| Q_Y (u + t \eta) - Q_Z (u+t\eta) \big|^p dt \geq \eta \int_0^1 (1-t)^p \delta^p dt =\frac{ \eta \delta^p } { p+1 }.
\]
The above inequality together with the definition of $\eta$ then imply~\eqref{ProofVaR4}, which completes the proof of~\eqref{ProofVaR1}.
\end{proof}
\end{proof}

\begin{proof}[{Proof of Corollary~\ref{AppliVaR}}]
Recall that from Theorem~\ref{thW2}$\MK$\eqref{theo2.1.b} (see also Comment~\ref{commentaftermainth}), under the assumptions of Corollary~\ref{AppliVaR}, $W_2 (P_{S_n / \sigma_n}, G_1) =O(n^{-1/2})$. Hence, Item~\eqref{coro2.5.a} comes from an application of Proposition~\ref{PropVaR} by taking into account the fact that, if $Y$ is a standard normal r.v., there exists a positive constant $\eta$ such that
\[
\inf_{u\,\in\,(0,1/2]} \frac{ Q_{1,Y} (u)}{ \sqrt{\ln (1/u)}} \geq \eta \,.
\]
Indeed, $Q_{1,Y} (u) \sim_{u\,\rightarrow\,0} \sqrt{2 \ln (1/u)}$, $Q_{1,Y}$ is continuous and decreasing and $Q_{1,Y} (1/2) = \sqrt{2 / \pi}$.

Item~\eqref{coro2.5.b} follows again from Theorem~\ref{thW2}$\MK$\eqref{theo2.1.b} together with \cite[Inequality (2.7)]{Rio17VaR}.
\end{proof}


\bibliography{dedecker}
\end{document}
