%~Mouliné par MaN_auto v.0.27.3 2023-09-08 11:21:23
\documentclass[AHL,Unicode,longabstracts,published]{cedram}

\usepackage{amssymb}
%\usepackage{amsthm}
\usepackage{tikz}
\usepackage{bbm}
\usepackage{hyperref}




%
%\AtBeginDocument{
%%\newtheorem{myname}[cdrthm]{My beautiful theorem} }



\newcommand{\ind}{\mathbbm{1}}
\newcommand{\var}{\operatorname{Var}}
\newcommand{\cov}{\operatorname{Cov}}

\newcommand{\bfZ}{\mathbf{Z}}
\newcommand{\bbR}{\mathbb{R}}
\newcommand{\bbE}{\mathbb{E}}
\newcommand{\bbP}{\mathbb{P}}
\newcommand{\bbN}{\mathbb{N}}
\newcommand{\clA}{\mathcal{A}}
\newcommand{\clB}{\mathcal{B}}
\newcommand{\clE}{\mathcal{E}}
\newcommand{\clF}{\mathcal{F}}
\newcommand{\area}{\mathrm{area}}
\newcommand{\aff}{\mathrm{aff}}

\makeatletter
\def\editors#1{%
\def\editor@name{#1}
\if@francais
\def\editor@string{Recommand\'e par les \'editeurs \editor@name.}
\else
\def\editor@string{Recommended by Editors \editor@name.}
\fi}   
\makeatother

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\graphicspath{{./figures/}}

\newcommand*{\mk}{\mkern -1mu}
\newcommand*{\Mk}{\mkern -2mu}
\newcommand*{\mK}{\mkern 1mu}
\newcommand*{\MK}{\mkern 2mu}

%\hypersetup{urlcolor=purple, linkcolor=blue, citecolor=red}

\newcommand*{\romanenumi}{\renewcommand*{\theenumi}{\roman{enumi}}}
\newcommand*{\Romanenumi}{\renewcommand*{\theenumi}{\Roman{enumi}}}
\newcommand*{\alphenumi}{\renewcommand*{\theenumi}{\alph{enumi}}}
\newcommand*{\Alphenumi}{\renewcommand*{\theenumi}{\Alph{enumi}}}
\newcommand*{\stepenumi}{\renewcommand*{\theenumi}{\textbf{Step~\arabic{enumi}}}}
\let\oldtilde\tilde
\renewcommand*{\tilde}[1]{\mathchoice{\widetilde{#1}}{\widetilde{#1}}{\oldtilde{#1}}{\oldtilde{#1}}}
\let\oldforall\forall
\renewcommand*{\forall}{\mathrel{\oldforall}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\title[Random polygons in a polygon]{Variance expansion and Berry-Esseen bound for the number of vertices of a random polygon in a polygon}
\alttitle{Asymptotique de la variance et bornes de Berry-Esseen pour le nombre de sommets d'un polygone aléatoire dans un polygone}


\subjclass{52A22, 60D05}
\keywords{Berry--Esseen bound, central limit theorem, geometric probability, Poisson point process, random convex chain, random polygon, variance expansion}


\author[\initial{A.} \lastname{Gusakova}]{\firstname{Anna} \lastname{Gusakova}}
\address{Fachbereich Mathematik, \\
Universit\"at M\"unster, Germany}
\email{gusakova@uni-muenster.de}
\thanks{This work has been initiated during the virtual Hausdorff Trimester Program \emph{The Interplay between High Dimensional Geometry and Probability}. \\ CT was supported by the German Research Foundation (DFG) via SPP 2265 \emph{Random Geometric Systems}. AG was supported by the DFG under Germany's Excellence Strategy EXC 2044 -- 390685587, \emph{Mathematics M\"unster: Dynamics - Geometry - Structure}.}

\author[\initial{M.} \lastname{Reitzner}]{\firstname{Matthias} \lastname{Reitzner}}
\address{Institut f\"ur Mathematik,\\
Universit\"at Osnabr\"uck, Germany}
\email{matthias.reitzner@uni-osnabrueck.de}


\author[\initial{C.} \lastname{Th\"ale}]{\firstname{Christoph} \lastname{Th\"ale}}
\address{Fakult\"at f\"ur Mathematik,\\
Ruhr-Universit\"at Bochum, Germany}
\email{christoph.thaele@rub.de}



\begin{abstract}
Fix a container polygon $P$ in the plane and consider the convex hull $P_n$ of $n\geq 3$ independent and uniformly distributed in $P$ random points. In the focus of this paper is the vertex number of the random polygon $P_n$. The precise variance expansion for the vertex number is determined up to the constant-order term, a result which can be considered as a second-order analogue of the classical expansion for the expectation of Rényi and Sulanke (1963). Moreover, a sharp Berry--Esseen bound is derived for the vertex number of the random polygon $P_n$, which is of the same order as one over the square-root of the variance. The latter is optimal and improves the earlier result of Bárány and Reitzner (2006) by removing the factor $(\log\log n)^{60}$ in the planar case. The main idea behind the proof of both results is a decomposition of the boundary of the random polygon $P_n$ into random convex chains and a careful merging of the variance expansions and Berry--Esseen bounds for the vertex numbers of the individual chains. In the course of the proof, we derive similar results for the Poissonized model.
\end{abstract}

\begin{altabstract}
Fixons un polygone conteneur $P$ dans le plan et considérons l'enveloppe convexe $P_n$ de $n\geq 3$ points aléatoires indépendants et uniformément distribués dans $P$. Cet article est consacré à l'étude du nombre de sommets du polygone aléatoire $P_n$. Nous déterminons l'asymptotique précise de sa variance jusqu'au terme constant, un résultat qui peut être considéré comme un analogue au second ordre du développement classique pour l'espérance de Rényi et Sulanke (1963). De plus, nous obtenons une borne de Berry--Esseen précise pour le nombre de sommets du polygone aléatoire $P_n$, du même ordre que l'inverse de la racine carrée de la variance. Cette borne est optimale et améliore le résultat précédent de Bárány et Reitzner (2006) en supprimant le facteur $(\log\log n)^{60}$ dans le cas planaire. L'idée principale dans la preuve des deux résultats est une décomposition du bord du polygone aléatoire $P_n$ en chaînes convexes aléatoires et une fusion soigneuse des asymptotiques de la variance et des bornes de Berry--Esseen pour le nombre de sommets des chaînes individuelles. Au cours de la preuve, nous obtenons des résultats similaires pour le modèle poissonisé.
\end{altabstract}


\datereceived{2022-06-30}
\daterevised{2023-05-12}
\dateaccepted{2023-06-18}

\editors{S. Gou\"ezel and N. Privault}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\dateposted{2023-10-02}
\begin{document}
\maketitle

\section{Introduction and result}

Let $P\subset \bbR^2$ be a polygon in the plane with $\ell\ge 3$ vertices. We refer to $P$ as a container in what follows. Let $X_1,\,\ldots,\,X_n$ be $n\geq 3$ independent random points, distributed uniformly in $P$. We denote by $P_n$ the random convex hull $[X_1,\,\ldots,\,X_n]$ of these points. It is a random polygon in the container polygon $P$, see Figure~\ref{fig:Polygon}. In this article we are interested in the combinatorial structure of $P_n$, more precisely in the variance expansion and the fluctuations of the number $f_0(P_n)$ of vertices of $P_n$. Note that this quantity is the same as the number $f_1(P_n)$ of edges of $P_n$.

\begin{figure}[!h]
\centering
\includegraphics[width=0.5\columnwidth]{polygon2}
\caption{Random polygon in a polygon.}\label{fig:Polygon}
\end{figure}

The random variable $f_0(P_n)$ has been intensively studied in the literature and has attracted a lot of interest in geometric probability as well as convex and integral geometry. For example, as $n\to\infty$, Rényi and Sulanke in their fundamental article~\cite{RS63} have established the asymptotics
\begin{equation}\label{eq:expPolUnif}
\bbE f_0(P_n)= \frac{2 \ell}{3}\log n + \frac 23 \sum_{i=1}^\ell \log \left(\frac{F_i}{{\area}(P)}\right) + \frac{2 \gamma \ell }3 + o(1),
\end{equation}
for the expected number of vertices of $P_n$, where $\gamma\approx 0.57721\ldots$ is the Euler--Mascheroni constant, $F_i$, $i=1, \dots, \ell$, are the areas of the triangles formed by three consecutive vertices of $P$ (that is, $F_i={\area}([v_{i-1},v_i,v_{i+1}])$ if $v_1,\,\ldots,\,v_\ell$ are the vertices of $P$ with the convention that $v_0=v_\ell$ and $v_{\ell+1}=v_1$). The corresponding variance asymptotics

\begin{equation}\label{eq:varPolUnif}
\var f_0(P_n)=\frac{10\ell}{27}\log n (1+o(1)),
\end{equation}
is due to Groeneboom~\cite{Gro88}. In the same paper, Groeneboom also proved a central limit theorem for $f_0(P_n)$, as $n\to\infty$. Denoting by $\Phi(\,\cdot\,)$ the distribution function of a standard Gaussian random variable, Bárány and Reitzner~\cite{BR06} have obtained the following quantitative version of the central limit theorem:
\begin{equation}\label{eq:BaraReitz}
\sup_{x\,\in\,\bbR}\left|\bbP\left(\frac{f_0(P_n)-\bbE f_0(P_n)}{\sqrt{\var f_0(P_n)}}\leq x\right)-\Phi(x)\right|\leq c\,\frac{(\log\log n)^{60}}{\sqrt{\log n}},
\end{equation}
where $c>0$ is some constant not depending on $n$. Note that in the published version~\cite{BR10} of~\cite{BR06} the Poissonized model for $P_n$ has been treated, which involves the additional randomization that the number of generating random points is Poisson distributed with mean $n$. The quantitative central limit theorem in~\cite{BR10} for this Poisson random polytope $P_\eta$ comes with an error term $c\, (\log\log n)^{60}/ \sqrt{\log n} $ and thus contains an additional double logarithmic factor like~\eqref{eq:BaraReitz}. The Poisson random polytope $P_\eta$ will also play a prominent role in our analysis. In Theorem~\ref{thm:BerryEsseenPoisson} we will remove the double logarithmic factor in the error term of the Poissonized version, thus improving for dimension two the speed of convergence in the main result of~\cite{BR10}. In the unpublished preprint~\cite{BR06}, a de-Poissonization argument leads to the central limit theorem~\eqref{eq:BaraReitz}. A similar de-Poissonization argument will also be used in Section~\ref{sec:dePoisson} to obtain Theorem~\ref{thm:main}.

The double logarithmic factor also appears in a central limit theorem of Pardon~\cite{Pardon1, Pardon2}, which holds for general planar convex container. This is one of the few result in the literature, in which the considered class of container sets is not restricted to polygons or convex sets bounded by a sufficiently smooth closed curve. The purpose of the present article is to demonstrate how the double-logarithmic factor in~\eqref{eq:BaraReitz} can be removed for polygons. Since the resulting rate of convergence is then of the same order as $(\var f_0(P_n))^{-1/2}$ and since $f_0(P_n)$ is an integer-valued random variable, an argument of Englund~\cite[Section~6]{Englund} shows that, up to numerical constants, our result is in fact optimal. Moreover, our technique allows us at the same time to determine the precisely expansion of~\eqref{eq:varPolUnif} up to the constant-order term, leading to a second-order analogue of the classical result~\eqref{eq:expPolUnif} for the expectation of Rényi and Sulanke. Even further, our technique allows us to make more precise the $o(1)$-term in~\eqref{eq:expPolUnif}.


\begin{theo}\label{thm:main}
For every planar polygon $P\subset\bbR^2$ and $n\geq 2$ we have
\[
\sup_{x\,\in\,\bbR}\left|\bbP\left(\frac{f_0(P_n)-\bbE f_0(P_n)}{\sqrt{\var f_0(P_n)}}\leq x\right)-\Phi(x)\right|\leq \frac{c}{\sqrt{\log n}},
\]
where $c>0$ is some constant independent of $n$, with
\begin{align*}
\bbE f_0(P_{n}) &= \frac{2 \ell}{3}\log n + \frac 23 \sum_{i=1}^\ell \log \left(\frac{F_i}{{\area}(P)}\right) + \frac{2 \gamma \ell }3 + O\left(n^{-\frac 14}(\log n)^2\right)
\intertext{and}
\var f_0(P_n) &= \frac{10 \ell}{27}\log n +\frac{10}{27}\sum_{i=1}^\ell \log \left(\frac{F_i}{{\area}(P)}\right) +
\frac{\left(10 \gamma -2 \pi^2\right)\ell }{27} + O\left(n^{-\frac 14} (\log n)^4\right),
\end{align*}
as $n\to\infty$.
\end{theo}

\begin{rema}
On the way of the proof of Theorem~\ref{thm:main} we obtain in Theorem~\ref{thm:BerryEsseenPoisson} the same statements also for the Poisson random polytope model previously considered in~\cite{BR10}, that is, under the additional randomization that the number of generating points follows a Poisson distribution with mean $n$.
\end{rema}

\begin{rema}
Although this paper focusses on the vertex number of the random polygon $P_n$, we briefly discuss a direct consequence of Theorem~\ref{thm:main} on the area of $P_n$. First, the classical Efron identity~\cite{Ef} connects the expected number of vertices of $P_n$ with its area:
\[
\frac{\bbE{\area}(P_n)}{{\area}(P)} = 1- \frac{\bbE f_0(P_{n+1})}{n+1}.
\]
Applying the expansion for $\bbE f_0(P_{n+1})$ in Theorem~\ref{thm:main} we conclude that, as $n\to\infty$,
\begin{multline*}
\frac{\bbE {\area}(P \setminus P_n)}{{\area}(P)} \\
= \frac{2 \ell}{3} (\log n)n^{-1}+ \left[\frac {2}3 \sum_{i=1}^\ell \log \left(\frac{F_i}{{\area}(P)}\right) + \frac{2 \gamma \ell }3 \right] n^{-1} + O\left(n^{-\frac 54} (\log n)^2\right).
\end{multline*}
Similarly, one can apply Buchta's identity~\cite[Corollary~1]{Bu11}, which connects the variance of the area of $P_n$ with the first two moments of its vertex number:
\[
\frac{\var{\area}(P_n)}{{\area}(P)^2} = \frac{\var f_0(P_{n+2})+A_n-B_n}{(n+1)(n+2)}
\]
with $A_n:=(\bbE f_0(P_{n+2}))^2-\frac{n+2}{n+1}(\bbE f_0(P_{n+1}))^2$ and $B_n:=(2n+3)\bbE f_0(P_{n+2})-2(n+2)\linebreak \bbE f_0(P_{n+1})$. In connection with the variance expansion in Theorem~\ref{thm:main} this leads to

\begin{multline*}
\frac {\var {\area}(P_n)}{{\area}(P)^2} \\ 
= \frac{28 \ell}{27} (\log n)n^{-2} +\left[\frac{28}{27}\sum_{i=1}^\ell \log \left(\frac{F_i}{{\area}(P)}\right) +
\frac{(28 \gamma -2 \pi^2)\ell }{27}\right]n^{-2}
 + O\left(n^{-\frac 94}(\log n)^4\right),
\end{multline*}
as $n\to\infty$.
\end{rema}



We would like to point out that if the container set $P$ of the random polygon $P_n$ has a $C^2$-smooth boundary with everywhere positive curvature the first Berry--Esseen bound for the vertex number in~\cite{Reitz05} also contained an additional logarithmic factor, and the presumably optimal Berry--Esseen bound has been found in~\cite[Theorem~3.5]{LachSchulteYukich}. Moreover, the approach in~\cite{LachSchulteYukich} even allows to deal with higher dimensional random polytopes and with other geometric and combinatorial parameters. However, we would like to stress at this point that the transition from smooth container sets to polygons appears to be highly non-trivial. One reason for this fact is the observation that, in contrast to smooth containers, the geometry of a random polygon in a polygon is not locally determined in a sufficiently strong sense. For example, for an arbitrary boundary point $x$ of the container set one can ask for the expected number of vertices of the random polygon $P_n$ one can ``see'' from $x$. While in the smooth case this number stays bounded for large $n$, in case of a polygon it grows to infinity at a double logarithmic speed. These and similar geometric facts are the reason why already the proof of a sub-optimal Berry--Esseen bound like~\eqref{eq:BaraReitz} is considerably more involved compared to its counterpart for smooth container sets. Another aspect to be mentioned in this context is the strong concentration of the number of vertices of a random polygon in a small neighbourhood around the corners of the container polygon. This concentration phenomenon, which apparently does not take place in smoothly bounded container sets, makes it much harder to approximate the random polygon by the so-called floating body associated with the container. More precisely, even the careful refinement from~\cite{BR10} of this approach automatically leads to the double-logarithmic factor in~\eqref{eq:BaraReitz}.

We shall now briefly explain how we overcome these difficulties. In particular, this description makes it evident that our approach cannot be extended to deal with the vertex number, or more generally the number of faces of arbitrary dimension, of convex hulls of random points in polytopes of dimension more than two. The main ingredient we use in the proof of Theorem~\ref{thm:main} is a decomposition of the boundary of the random polygon $P_n$ into so-called random convex chains, where each chain corresponds to one of the corners of the container polygon, see Figure~\ref{fig:Step2}. In fact, the vertex number of $P_n$ is the same as the sum of the number of vertices of these chains, which after Poissonization of the construction become independent random variables. By a suitable affine transformation, each chain can be transformed into the following standard form without changing its combinatorial structure (of course, the number $m$ below depends in a suitable way on the size of the corresponding chain in $P_n$). Consider the triangle $T$ with vertices $(0,0)$, $(0,1)$ and $(1,0)$ and let $T_m$ be the convex hull of $(0,1)$ and $(1,0)$ together with $m\geq 1$ independent and uniformly distributed random points in $T$. The following Berry--Esseen bound for the vertex number $f_0(T_m)$ of the random convex chain $T_m$ has been obtained in~\cite[Corollary~9]{GT21}:

\begin{equation}\label{eq:CLTChain}
\sup_{x\,\in\,\bbR}\left|\bbP\left(\frac{f_0(T_m)-\bbE f_0(T_m)}{\sqrt{\var f_0(T_m)}}\leq x\right)-\Phi(x)\right|\leq \frac{c}{\sqrt{\log m}},
\end{equation}
for some absolute constant $c>0$ and $m\geq 1$. We would like to mention at this occasion that this result is a consequence of an unexpected connection (which does not persist for polygons different from a triangle) between the random variables $f_0(T_m)$, the location of zeros of certain orthogonal polynomials related to probability generating function of $f_0(T_m)$ and a Berry--Esseen bound for sums of independent Bernoulli random variables. We remark that~\eqref{eq:CLTChain} is the main motivation behind Theorem~\ref{thm:main} and the highly non-trivial transition from~\eqref{eq:CLTChain} to the Berry--Esseen bound for $f_0(P_n)$ in Theorem~\ref{thm:main} is our main contribution. In particular, it involves a careful merge of the Berry--Esseen bounds of the individual convex chains based on Poissonization and conditioning arguments. A similar strategy will be applied in order to determine the asymptotic behaviour of the expectation and variance of $f_0(P_n)$ in Theorem, \ref{thm:main}.



\section{Preliminaries}\label{sec:Prelim}

In this paper, $[A]$ stands for the convex hull of a set $A\subseteq\bbR^2$, $\# A$ for its cardinality and $\aff(A)$ for the affine hull of $A$. In addition, if $A$ is a line segment, we denote its length by $|A|$. We write $B(x,r)$ for the closed disc centred at $x\in\bbR^2$ with radius $r>0$. Given a set $A\subset \bbR^2$ and a point $x\in\bbR^2$ denote by $d(A,x):=\inf_{y\,\in\,A}\|y-x\|$ the distance between $A$ and $x$. For two functions $f$ and $g$ we write $f=O(g)$ if $\limsup_{x\,\to\,\infty}|f(x)/g(x)|<\infty$ and $f=o(g)$ if $\lim_{x\,\to\,\infty}|f(x)/g(x)|=0$. Given a line $L:=\{(x,y)\in\bbR^2\colon ux+vy=t\}$ we denote by
\[
L^+:= \left\{(x,y)\in\bbR^2\colon ux+vy\ge t\right\}\qquad \text{and}\qquad L^-:= \left\{(x,y)\in\bbR^2\colon ux+vy\leq t\right\}
\]
the positive and negative half-planes into which $L$ divides $\bbR^2$, respectively. Given a convex body $K\subset\bbR^2$ we consider a function $v:K\mapsto\bbR$, defined as
\[
v(z):=\min\{{\area}(H\cap K)\colon H\text{ is a half-plane with }z\in H\}.
\]
Then the floating body $K(v\ge \delta)$ with parameter $\delta>0$ is a level set of the function $v$, namely $K(v\ge \delta):=\{z\in K\colon v(z)\ge \delta\}$. The wet part is the set $K(v<\delta):=K\setminus K(v\ge \delta)$. In case of the polygon $P$ with ${\area}(P)=1$ the following formula for the area of the wet part (and an analogous formula for volume in arbitrary dimensions) was independently obtained in~\cite{Schu91} and~\cite{BB93}:
\begin{equation}\label{eq:wetpart}
{\area}(P(v<\delta))=\frac{\ell}{4}\delta \log \frac 1{\delta}\, (1+o(1)),
\end{equation}
where $o(1)$ is meant for $\delta\to 0$.

%%It was first observed by Bárány and Larman~\cite{BarLar} that the random polygon $P_n$ is close to the floating body $P(v \geq n^{-1})$, with similar result for the Poissonized random polytope. This connection was made precise in several aspects. Bárány and Dalla~\cite[Theorem~1]{BD} proved the following fact, see also Bárány~\cite[Theorem~7.4]{Barsurvey}. Note that it is possible to choose the same constant $b_0>0$ in the following two results in which we denote by $\overline{\clA}$ the complement of an event $\clA$.
%%
\begin{lemm}\label{le:floating-Pn}
Choose $n$ independent uniform random points $X_1,\,\dots,\,X_n$ in a container polygon $P$. Then there is a constant $b_0>0$ such that the event $\clA_n := \{ P(v\ge b_0 n^{-1} \log n)\subset P_{n}\}$ satisfies
\begin{equation}
\bbP(\overline{\clA_n}) =O(n^{-6}).
\end{equation}
\end{lemm}

We also have to deal with the Poissonized random polytope in the following. To define this model properly, let $N$ be a Poisson random variable with mean $n\in \bbR_+$, and choose $N$ independent uniform random points $X_1,\,\dots,\,X_N$ in a polygon $P$, which are also independent of $N$. Then $X_1,\,\dots,\,X_N$ is a homogeneous Poisson point process $\eta$ in the polygon $P$ with $\bbE(\#\eta)=n$. We denote its convex hull by $P_\eta=[\eta]$. The next lemma is a combination of~\cite[Lemma~5.3]{BR10} and the Poissonized version of~\cite[Theorem~1]{BD}.

\begin{lemm}\label{le:floating-Peta}
There are constants $b_0, c_0>0$ independent of $n>1$, such that the events
\begin{gather*}
\clA^\pi_n := \left\{P(v\ge b_0 n^{-1} \log n)\subset P_\eta\right\}
\\
\intertext{and}
\clB^\pi_n := \left\{\#\left(\eta \cap P(v< b_0 n^{-1} \log n)\right)\leq c_0 (\log n)^2\right\}
\\
\intertext{satisfy}
\bbP(\overline{\clA^\pi_n}) \leq \bbP(\overline{\clA^\pi_n \cap \clB^\pi_n}) = O\left(n^{-6}\right).
\end{gather*}
\end{lemm}

During the proofs of this paper we switch several times between the Poisson model $P_\eta$ and the binomial model $P_n$ of the random polygons we consider. For this the following two estimates will turn out to be helpful. The first is a slight extension of a result of Matsunawa~\cite{Matsunawa}, the second one estimates the total variation distance between two Poisson random variables.

\begin{lemm}\label{le:diff-Poisson-binom}
Let $n \in \bbN$ and $p>0$. Put $\binom{n}{m}=0$ for $n<m$. Then
\begin{equation*}
\sum_{m=0}^\infty m^k \left|\frac{(np)^m}{m!} e^{-np} - \binom{n}{m} p^m (1-p)^{n-m} \right| \leq
\begin{cases}
2p & k=0, \\
4np^2 & k=1, \\
6np^2 (1+ n p) & k=2.
\end{cases}
\end{equation*}
\end{lemm}


\begin{proof}
The inequality for $k=0$ is due to Matsunawa~\cite{Matsunawa} verifying a conjecture of Vervaat~\cite{Vervaat}. The case $k=1$ follows from
\begin{multline*}
\sum_{m=0}^\infty m \left|\frac{(np)^m}{m!} e^{-np}- \binom{n}{m} p^m (1-p)^{n-m} \right|
\\
\begin{aligned}
&= np\sum_{m=1}^\infty \left|\frac{(np)^{m-1}}{(m-1)!} e^{-np} - \binom{n-1}{m-1} p^{m-1} (1-p)^{n-m} \right|
\\ & = np \sum_{m=0}^\infty \left|\frac{(np)^{m}}{m!} e^{-np} - \binom{n-1}{m} p^{m} (1-p)^{n-m-1} \right|
\\ &\leq 4np^2.
\end{aligned}
\end{multline*}
The case $k=2$ is a combination of case $k=1$ with
\begin{multline*}
\sum_{m=0}^\infty m(m-1) 
\left|\frac{(np)^m}{m!} e^{-np} - \binom{n}{m} p^m (1-p)^{n-m} \right|\\
\begin{aligned}
&\leq  (np)^2 \sum_{m=0}^\infty \left|\frac{(np)^{m}}{m!} e^{-np} - \frac{n-1}{n} \binom{n-2}{m} p^{m} (1-p)^{n-m-2} \right|
\\ &\leq  (np)^2 \sum_{m=0}^\infty \left|\frac{(np)^{m}}{m!} e^{-np} - \binom{n-2}{m} p^{m} (1-p)^{n-m-2} \right|
\\ & \quad+ np^2 \sum_{m=0}^\infty \left| \binom{n-2}{m} p^{m} (1-p)^{n-m-2} \right|
\\ &\leq  6 p (np)^2 + np^2.
\end{aligned}
\end{multline*}
This completes the argument.
\end{proof}


\begin{lemm}\label{le:diff-Poisson-Poisson}
Let $ t > 0 $. Then for $\varepsilon \geq 0$ we have
\begin{equation*}
\sum_{m=0}^\infty \left|\frac{(t+ \varepsilon) ^m}{m!} e^{-(t+ \varepsilon)} - \frac{t^m}{m!} e^{-t} \right| \leq 2 \varepsilon.
\end{equation*}
\end{lemm}


\begin{proof}
The inequality $|ab-cd| \leq |a-c|b + c |b -d| $, valid for $a,b,c,d\geq 0$, shows that
\begin{align*}
\sum_{m=0}^\infty \left|\frac{(t+ \varepsilon)^m}{m!} e^{-(t+ \varepsilon)} - \frac{t^m}{m!} e^{-t} \right| &\leq
\sum_{m=0}^\infty \frac{(t+ \varepsilon)^m-t^m}{m!} e^{- (t+ \varepsilon)} +
\sum_{m=0}^\infty \frac{t^m}{m!} \left(e^{- t} -e^{- (t+ \varepsilon)} \right)
\\ & =
\left(e^{t+ \varepsilon} - e^t \right) e^{- (t+ \varepsilon)} + e^t \left(e^{- t} -e^{- (t+ \varepsilon)} \right)
\\ & = 2(1 - e^{-\varepsilon}) \leq 2 \varepsilon,
\end{align*}
and the proof is complete.
\end{proof}



\section{Proof of Theorem~\ref{thm:main}}\label{sec:ProofSquare}

The strategy of the proof of Theorem~\ref{thm:main} consists of the following main steps:
\begin{description}
\item[{\hyperref[subsec1]{Step 1}}] As a first step we randomize the model further by taking instead of a fixed number $n$ of points, distributed uniformly in $P$, a random number $N$ of points. More precisely, as the number of points we use a Poisson random variable with mean $n$ so that the collection of random points becomes a homogeneous Poisson point process. This construction is known as Poissonization and it appears to be very helpful for our purposes. In fact, after Poissonization the number of points in disjoint regions become independent random variables.

\item[{\hyperref[step2]{Step 2}}] In order to prove the variance asymptotic and the Berry--Esseen bound for the Poisson model described in \hyperref[subsec1]{Step 1} we need to introduce an additional construction, which allows us to exclude some ``bad'' and ``unlikely'' events on which we do not have sufficient control on the geometric configuration.

\item[{\hyperref[step3]{Step 3}}] The third step is devoted to the proof of the variance asymptotic and the Berry--Esseen bound for the Poisson random convex chain. The corresponding results for the classical convex chain, proven in~\cite{GT21}, are transformed using so-called ``transfer lemma'', which has been used in similar situations before in the literature, see~\cite{BR06,BR10, BV06,Reitz05,Vu06}.

\item[{\hyperref[step4]{Step 4}}] The fourth step is concerned with the proof of the variance asymptotic and the Berry--Esseen bound for the Poisson random polygon in $P$ under the conditions introduced in \hyperref[step2]{Step 2}. As already explained in the introduction, the idea of the proof is to decompose the boundary of the random polygon into independent random convex chains, each of which corresponds to one corner of container polygon. The result can now be obtained from the corresponding result for the Poissonized convex chain from \hyperref[step3]{Step 3}.

\item[{\hyperref[step5]{Step 5}}] In this step we remove the constraints introduced in \hyperref[step2]{Step 2} and show that this does not change the quality of the variance asymptotic and the Berry--Esseen bound for the Poisson model.

\item[{\hyperref[sec:dePoisson]{Step 6}}] In the last step we deduce the variance asymptotic and the Berry--Esseen bound for the original model from the one for the Poisson model via de-Poissonization. The transition from the Poisson model to the binomial one is made using again the ``transfer lemma''.
\end{description}

In order to simplify some of our arguments we can and will from now on assume that the container polygon $P$ has area one. This is indeed possible as the vertex number of $P_n$ does not change under rescaling of the container polygon $P$.


\subsection{Step 1: Introducing the Poisson model} \label{subsec1}

Let $\eta$ be a homogeneous Poisson point process in the container polygon $P$ with $\bbE(\#\eta) = n$. Formally, $\eta$ can be constructed as follows. Let $N$ be a Poisson random variable with mean $n$ and, independently of $N$, let $X_1,X_2,\,\ldots$ be a sequence of independent and uniformly distributed random points in $P$. Then $\eta$ can be defined as the random set of points $\{X_1,\,\ldots,\,X_N\}$, which is interpreted as the empty set if $N=0$, an event having probability $e^{-n}$. We will refer to $\eta$ as a homogeneous Poisson point process with intensity $n$ on $P$. The well-known multivariate Mecke formula is a useful tool to compute expectations of Poisson functionals, see~\cite[Theorem~4.1]{LP}. It says that
\begin{multline}\label{eq:Mecke}
\bbE \sum_{(X_1,\,\ldots,\,X_k)\,\in\,\eta^k_{\neq}} f(X_1,\,\ldots,\,X_k,\eta) \\
= n^k \, \bbE \int\limits_{P^k} f\big(x_1,\,\ldots,\,x_k,\eta\cup\{x_1,\,\ldots,\,x_k\}\big)\,\mathrm{d} x_1\ldots \mathrm{d} x_k,
\end{multline}
where $\eta^k_{\neq}$ denotes the set of all $k$-tuples of distinct points of $\eta$, and $f$ is a non-negative measurable function acting on a $k$-tuple of points and a locally-finite point configuration in $\bbR^2$ to which these points belong.


We consider now the random polygon $P_{\eta}$, which (we recall) was defined as a convex hull of the Poisson point process $\eta$, that is, $P_{\eta}=[\eta]$. There exists a clear connection between the random polygons $P_n$ and $P_{\eta}$, namely, $P_n$ has the same distribution as $P_\eta$, given that the number of points $N=\#\eta$ of $\eta$ is equal to $n$:
\[
P_n\stackrel{d}{=}(P_{\eta}\,|\,\#\eta = n).
\]
The Poisson random polygons $P_{\eta}$ have been intensively studied. Bárány and Reitzner~\cite{BR10} and Yukich and Calka~\cite{CY17} showed that the number $f_0(P_\eta)$ of vertices of $P_\eta$ satisfies
\begin{align*}
\bbE f_0(P_{\eta})&=\frac{2 \ell}{3} \log n (1+o(1)),
\\
\var f_0(P_{\eta})&= c_2 \ell \log n (1+o(1))
\end{align*}
for some constant $c_2>0$. In fact, more general results have been proven for arbitrary dimensions. For the first formula see~\cite[Theorem~1.2]{BR10}, where also a lower bound for the variance was obtained, and an upper bound for the variance appears in~\cite[Theorem~1.1]{BR10a}. For the precise asymptotics for the variance see~\cite[Theorem~1.3]{CY17}. In addition, in~\cite{BR10a} a Berry--Esseen bound for $f_0(P_\eta)$ is shown, which involves a double logarithmic factor as in~\eqref{eq:BaraReitz}. As explained above, we will give more precise estimates for the moments in Theorem~\ref{thm:BerryEsseenPoisson} and remove the double-logarithmic factor in the central limit theorem for this Poisson random polygon model $P_\eta$, and eventually carry these results to $P_n$.


\subsection{Step 2: Fixing the construction}\label{step2}

\begin{figure}[!hb]
\centering
\begin{tikzpicture}
\node at (0,0) {\includegraphics[width=0.8\textwidth]{polygon-triangle}};
\node at (-2.8,-0.9) {\small $Z_1$};
\node at (2.8,-1.5) {\small $Z_2$};
\node at (5,2.7) {\small $Z_3$};
\node at (-5.3,2.2) {\small $Z_\ell$};

\node at (0.4,-2.8) {\small $V_1$};
\node at (4.2,-0.7) {\small $V_2$};
\node at (-4.9,0.5) {\small $V_\ell$};

\node at (-5.8,-0.7) {\small $v_0=v_{\ell}$};
\node at (-2.7,-2.5) {\small $e_1$};
\node at (-0.2,-3.9) {\small $v_1$};
\node at (2.5,-2.5) {\small $e_2$};
\node at (5.1,-0.8) {\small $v_2$};

\node at (-1.2,-1.8) {\footnotesize $\delta_{1,1}$};
\node at (1.6,-2.1) {\footnotesize $\delta_{1,2}$};

\node at (-0.5,0) {\footnotesize $\Delta_1$};
\draw (-0.2,-1.4) -- (-0.6,-0.2);

\node at (3,1.1) {\small $\Delta_2$};
\draw (3,0.9) -- (3.6,0);

\node at (-3.5,1.5) {\small $\Delta_\ell$};
\draw (-3.7,1.3) -- (-4.4,1);
\end{tikzpicture}
\caption{Illustration of the construction in \hyperref[step2]{Step 2}.}\label{fig:Step2}
\end{figure}

Let $v_1,\,\ldots,\,v_\ell$ be the $\ell\geq 3$ consecutive vertices of the polygon $P$ and $e_1,\,\ldots,\,e_\ell$ be the $\ell$ consecutive edges of $P$, where $e_i =[v_{i-1}, v_i]$ with the convention that here and in what follows the index is taken modulo $\ell$, e.g. $v_0=v_\ell$. Further, denote by $\ell_i$ the length of the edge $e_i$, by $\alpha_i$ the angle of $P$ at the vertex $v_i$ and by $F_i$ the area of the triangle $[v_{i-1}, v_i, v_{i+1}]=[e_i, e_{i+1}]$, $1\leq i\leq \ell$. For every edge $e_i$, $1\leq i\leq \ell$, if $\eta\neq \varnothing$ there is one point $Z_i=Z_i(\eta):=(x_i,y_i)$ from the Poisson point process $\eta$, such that either $L^+(Z_i)\cap \eta=\{Z_i\}$ or $L^-(Z_i)\cap \eta=\{Z_i\}$, where $L(Z_i)$ denotes the line parallel to $e_i$ passing through $Z_i$, see Figure~\ref{fig:Step2}. If $\eta=\varnothing$ we set $Z_i$, $1\leq i\leq \ell$ to be the vertex of $P$ maximizing $d(y,\aff(e_i))$, $y\in P$. In case if there are few such vertices we may take the lexicographically smallest one. Without loss of generality we assume that $L(Z_i)$ is parametrized in a way such that $L^-(Z_i)\cap \eta=\{Z_i\}$ and thus $\eta \subset L^+(Z_i)$. It might happen that for some or few $1\leq i\leq \ell $ we have $Z_i=Z_{i+1}$ with $Z_{\ell +1}=Z_1$ or even $\eta=\varnothing$, but we will treat these situations separately and then, by a conditioning argument, exclude them from the forthcoming discussion.

Denote the point of intersection of $L(Z_i)$ and $L(Z_{i+1})$ by $V_{i}:=L(Z_i)\cap L(Z_{i+1})$, see Figure~\ref{fig:Step2}, and put $\Delta_i:=[Z_i,V_{i},Z_{i+1}]$, $1\leq i\leq \ell$, and $\Delta_i := Z_i$ if $Z_i=Z_{i+1}$. The length of the edges of the triangle $\Delta_i$ with vertex $V_i$ are denoted by $\delta_{i,i}=\|Z_i-V_i\| $ and $\delta_{i,i+1}= \| V_i - Z_{i+1} \| $.

In the next sections we will need the first moments and covariances of the logarithmic area of $\Delta_i$, and the probability that the triangle is not too small. We start with the following lemma.

\begin{lemm}\label{lm:smalldistanceedge}
There is a constant $c_P>0$ depending only on $P$, such that for $1\leq i\leq \ell$ we have
\[
\bbP\big(d(Z_{i}, \aff(e_i)) \geq x\big) \leq e^{- c_P n x}.
\]
\end{lemm}

\begin{proof}
If $\eta=\emptyset$ then $Z_i$ is the vertex of $P$ maximizing the distance, and the inequality is trivial. Recall that $\ell_i$ denotes the length of $e_i$. If $d(Z_{i}, \aff(e_i)) \geq x $ and $m$ is some point in $P$ with $d(m, \aff(e_i)) = x $, then $L^-(Z_i) \cap P$ contains the triangle $[e_i, m]$ with base length $\ell_i$ and height $x$,
\[
{\area}([e_i, m]) = \frac 12 \ell_i x.
\]
On the other hand, according to the definition of $Z_i$ we have $\eta \subset L^+(Z_i)$ and, thus, the above triangle with base $\ell_i$ and height $x$ does not contain points of $\eta$. Thus,
\[
\bbP\big(d(Z_{i}, \aff(e_i)) \geq x\big) \leq
\bbP\big(\sharp (\eta \cap [e_i, m]) =0\big) = e^{- n \, \frac 12 \ell_i x }.
\]
The result follows with $c_P:= \min_i \frac{\ell_i}2 $.
\end{proof}

From now on we assume that $d(Z_i, \aff(e_i)) \leq n^{- \frac 34}$, $Z_i \neq Z_{i+1}$, and $\delta_{i,j} \geq \tilde c_P n^{- \frac 14}$ with $\tilde c_P:=\sqrt{2}(\min_{1\,\leq\,j\,\leq\,\ell} \sin \alpha_j)^{-\frac12}$ for $1\leq i\leq \ell$ and $j=i, i+1$ (note that the latter condition already implies that $Z_i\neq Z_{i+1}$, which we have included only for convenience). We denote this event by $\clE$. Observe that given $\clE$ we have
\begin{equation}\label{eq:Delta>n-12}
{\area}(\Delta_i) \geq n^{- \frac 12}.
\end{equation}
In a first step we show that $\clE$ happens with high probability.

\begin{coro}\label{cor:complcE}
There are constants $\underline c_P, \overline c_P>0$ only depending on $P$, such that
\[
\underline c_Pn^{- 1}\leq \bbP(\overline{\clE})\leq \overline c_P n^{-\frac 14}.
\]
\end{coro}

\begin{proof}
First of all note that
\begin{multline}\label{eq:19.04.23_1}
\bbP(\overline{\clE})\leq \sum_{i=1}^\ell \bbP\left(d(Z_i, \aff(e_i)) > n^{- \frac 34}\right)+ \sum_{i=1}^\ell \bbP(Z_i\neq Z_{i+1})\\
+ \sum_{i=1}^\ell \sum_{j=i}^{i+1} \bbP\left(0<\delta_{i,j}<\tilde c_P n^{-1/4}, \ d(Z_k, \aff(e_k)) \leq n^{- \frac 34},\ \forall k=1,\ldots,\ell\right).
\end{multline}
The event that $d(Z_i, \aff(e_i)) > n^{- \frac 34}$ occurs according to Lemma~\ref{lm:smalldistanceedge} with probability $\leq e^{- c_P n^{ \frac 14} }$. The probability of the event that $V_i=Z_i=Z_{i+1}$ for some $i=1,\,\ldots,\,\ell$, again according to Lemma~\ref{lm:smalldistanceedge}, can be estimated by
\begin{multline*}
\sum_{i=1}^\ell\bbP(Z_i=Z_{i+1})
\\
\begin{aligned}
& \leq
\sum_{i=1}^\ell \bbP\left(Z_i=Z_{i+1}, d(Z_i, \aff(e_i)) \leq n^{- \frac 34}, d(Z_{i+1}, \aff(e_{i+1})) \leq n^{- \frac 34}\right) + 2\ell e^{- c_P n^{\frac 14}}
\\ & \leq
\sum_{i=1}^\ell \bbP\left(\#\left(\eta \cap B\left(v_i,(\sin(\alpha_i/2))^{-1}n^{- \frac 34}\right)\right) \geq 1\right) + 2\ell e^{- c_P n^{\frac 14}}
\\ & \leq
\ell \left[1-\exp\left(-2 \pi\max_{1\,\leq\,i\,\leq\,\ell}(\sin(\alpha_i/2))^{-2} n^{- \frac 12} \right)\right] + 2\ell e^{- c_P n^{\frac 14}} = O\left(n^{- \frac 12}\right).
\end{aligned}
\end{multline*}
Finally, we estimate the probabilities that $0<\delta_{i,j} < \tilde c_P n^{- \frac 14}$ for $i=1,\,\ldots,\,\ell$ and $j=i,i+1$ using the multivariate Mecke formula~\eqref{eq:Mecke}. Denote by $L_i(x)$ the line through $x$ parallel to $e_i$ and by $L_i^+(x)$ the corresponding halfplane not containing the edge $e_i$. Taking for simplicity $i=1, j=2$ we have for $\delta_{1,2}=\delta_{1,2}(\eta)$ that
\begin{align*}
&\bbP \left(0<\delta_{1,2} < \tilde c_P n^{- \frac 14}, \ d(Z_k, \aff(e_k)) \leq n^{- \frac 34},\ \forall k=1,\ldots,\ell \right)
\\
& =
\bbE \sum_{(X_1, X_2,X_3)\,\in\,\eta_{\neq}^3} \ind \left(0<\delta_{1,2}(\eta)< \tilde c_P n^{- \frac 14}\right) \ind\left(\eta \subset L_1^+(X_1) \cap L_2^+ (X_2)\cap L_3^+ (X_3)\right)\\
&\hspace{2.3cm}\times
\ind\big(d(Z_k, \aff(e_k)) \leq n^{- \frac 34}\ \forall k=1,\ldots,\ell \big)
\\[1ex] &= n^3 \bbE \int\limits_{P}\int\limits_{P}\int\limits_{P} \ind\left(0<\delta_{1,2}(\eta\cup\{x_1,x_2,x_3\}) < \tilde c_P n^{- \frac 14}\right)
\\ &\hspace{2.3cm}\times
\ind \big(\eta\cup\{x_1,x_2,x_3\} \subset L_1^+(x_1) \cap L_2^+ (x_2)\cap L_3^+ (x_3)\big)\\
\\ &\hspace{2.3cm}\times
\ind(d(Z_k(\eta\cup\{x_1,x_2,x_3\}), \aff(e_k)) \leq n^{- \frac 34}\ \forall k=1,\ldots,\ell \big) \,\mathrm{d} x_1\mathrm{d} x_2\mathrm{d} x_3.
\end{align*}
Let us remark that $(X_1, X_2,X_3) \in \eta_{\neq}^3$ in the second line automatically implies that $Z_1=X_1$, $Z_2=X_2$ and $Z_3=X_3$ are almost surely distinct points. Also, the random variable $\delta_{1,2}(\eta)$ is determined by the random points $X_1$ and $X_2$ in the second and $\delta_{1,2}(\eta\cup\{x_1,x_2,x_3\})$ by the points $x_1$ and $x_2$ in the third line. Recall that $V_1$ and $V_2$ are the points on $L_2(x_2)$ whose positions are determined by $x_1,x_2$ and $x_3$, see Figure~\ref{fig:Step2} (in fact, the dependence of $V_2$ on the position of $x_3$ is the reason why we consider the three points $x_1,x_2,x_3$). Now, we fix $V_1, V_2$ and first integrate $x_2$ along the segment $[V_1, V_2] \subset L_2(x_2) \cap P$. Without loss of generality assume that the edge $e_2$ is parallel to the one of the axes in $\bbR^2$. In this case we have $x_2=(x_{2,1},x_{2,2})$ with (say) $x_{2,2}=d(Z_2,\aff(e_2))$ and, thus, $V_1,V_2$ depend on $x_1,x_3$ and $x_{2,2}$ only. In these new coordinates we perform the integration with respect to $x_{2,1}\in [V_1, V_2]$. Because~of
\[
\int\limits_{V_1}^{V_2} \ind\left(\delta_{1,2} <\tilde c_P n^{- \frac 14}\right) \mathrm{d} x_{2,1}
\leq
\tilde c_P n^{- \frac 14} =
\tilde c_P n^{- \frac 14} \| V_1-V_2 \|^{-1} \int\limits_{V_1}^{V_2} \mathrm{d} x_{2,1}
\]
and $\| V_1-V_2 \| = \ell_2(1+O(n^{- \frac 34}))$, which holds under the assumption that $d(Z_k, \aff(e_k))\linebreak \leq n^{- \frac 34}$ for all $k=1,\,\ldots,\,\ell$, we have
\begin{align*}
&\bbP \left(0<\delta_{1,2} < \tilde c_P n^{- \frac 14}, \ d(Z_k, \aff(e_k)) \leq n^{- \frac 34} \ \forall k=1,\,\ldots,\,\ell \right)
\\ & \leq
\tilde c_P n^{- \frac 14} \ell_2^{-1} \left(1+O(n^{- \frac 34})\right) n^3 \bbE \int\limits_{P}\int\limits_{P}\int\limits_{P}
\ind \big(\eta \subset L_1^+(x_1) \cap L_2^+ (x_2)\cap L_3^+ (x_3))\\
&\qquad \times
\ind(d(Z_k(\eta\cup\{x_1,x_2,x_3\}), \aff(e_k)) \leq n^{- \frac 34} \ \forall k=1,\ldots,\ell \big)\mathrm{d} x_1\mathrm{d} x_2\mathrm{d} x_3
\\ & =
\tilde c_P n^{- \frac 14} \ell_2^{-1} \left(1+O(n^{- \frac 34})\right) \bbP\Big(0<\delta_{1,2},\, d(Z_k, \aff(e_k)) \leq n^{- \frac 34} \ \forall k=1,\ldots,\ell \Big),
\\ & \leq
\tilde c_P n^{- \frac 14} \ell_2^{-1} \left(1+O(n^{- \frac 34})\right) \left(1- \ell e^{-c_P n^{\frac 14}}\right) = O\left(n^{- \frac 14}\right),
\end{align*}
where in the third line we have applied backwards the same argument, based on the multivariate Mecke formula, as above. Combining the three estimates with~\eqref{eq:19.04.23_1} yields the right hand side of the inequality.

On the other hand, there is some small $c_1>0$ depending on $P$ such that the probability that $B(v_1, n^{-1}) \cap P$ contains precisely one point of $\eta$ is
\begin{align*}
\bbP\left(\#(\eta \cap B(v_1, n^{- 1})) = 1\right) &= n\, {\area}\left(B(v_1, n^{- 1}) \cap P\right) e^{- n\, {\area}\left(B\left(v_1, n^{- 1}\right)\,\cap\,P\right) } \\
&= c_1 n^{- 1} e^{-c_1 n^{- 1}}
\end{align*}
for $n$ sufficiently large. The area of the parallel strips along the edges $e_1$ and $e_{2}$ of width $n^{- 1}$ without the disk $B(v_1, n^{- 1})$ is upper bounded by $c_2 n^{-1}$ with some $c_2>0 $ depending on $P$. Hence, the probability that this region contains no points of $\eta$ is lower bounded by $ e^{- c_3} $ with $c_3>0$ depending on $P$. If these independent events occur then the single point in $B(v_1, n^{- 1})$ is just $Z_1=Z_2$, which proves the left hand side inequality, that is
\begin{align*}
\bbP(\overline{\clE})
\geq
\bbP(Z_1=Z_{2}) & \geq c_1 n^{- 1} e^{-c_1 n^{- 1} -c_3}
\geq \underline{c}_P n^{- 1}.
\end{align*}
This completes the argument.
\end{proof}


In the second step we investigate the moments and mixed moments of $\log \delta_{i,i}$ and $\log \delta_{i,i+1}$ under the condition $\clE$.

\begin{lemm}\label{lm:distancesmall}
Let $i,j,k,l\in\{1,\,\ldots,\,\ell\}$ with $j=i,i+1$ and $l=k,k+1$. The first to logarithmic moments of $\delta_{i,j}$ satisfy
\begin{align}
\bbE (\log \delta_{i,j} |\clE) &=\log \ell_j -1 + O\left(n^{- \frac 14} \log n\right)\label{eq:E-log-delta}\\
\intertext{and}
\bbE \left((\log \delta_{i,j})^2 \,\middle|\,\clE\right) &=(\log \ell_j-1)^2 + 1 +O\left(n^{- \frac 14}(\log n)^2\right).\label{eq:E-log-delta-sqr}\\
\intertext{For the mixed logarithmic moments we obtain}
\bbE\big[(\log \delta_{i,i+1}) (\log \delta_{i+1,i+1})\,\big|\,\clE\big] &= (\log \ell_{i+1} -1)^2 + 1- \frac {\pi^2}6 + O\left(n^{- \frac 14} \log n\right)
\label{eq:E-log-delta-neighbors}\\
\intertext{and for $j \neq l$,}
\bbE[(\log \delta_{i,j}) (\log \delta_{k,l})|\clE]& = (\log \ell_{j} -1)(\log \ell_{l} -1) + O\left(n^{- \frac 14} \log n\right).
\label{eq:E-log-delta-nonneighbors}
\end{align}
\end{lemm}

\begin{proof}
We assume the event $\clE$ and prove~\eqref{eq:E-log-delta} e.g.\ for $\delta_{i,i+1}$ and $i=1$. Our argument will be similar to that in the proof of Corollary~\ref{cor:complcE} and we will also use the same notation introduced as there. In particular, recall that $L_i(x)$ is the line through $x$ parallel to $e_i$ and $L_i^+(x)$ the corresponding halfplane not containing the edge $e_i$. The multivariate Mecke formula~\eqref{eq:Mecke} yields
\begin{align*}
\bbE \left(\log \delta_{1,2} \ind(\clE)\right) & =
\bbE \sum_{(X_1, X_2, X_3)\,\in\,\eta_{\neq}^3} \log \delta_{1,2}(\eta)\, \ind\big(\eta \subset L_1^+(X_1) \cap L_2^+ (X_2) \cap L_3^+ (X_3), \clE\big)
\\ &= n^3 \bbE \int\limits_{P}\int\limits_{P}\int\limits_{P} \log \delta_{1,2}(\eta\cup\{x_1,x_2,x_3\})
\\ &\qquad \times
\ind\left(\eta\cup\{x_1,x_2,x_3\}\subset L_1^+(x_1) \cap L_2^+ (x_2) \cap L_3^+ (x_3)\right)
\\ &\qquad \times
\ind(\clE(\eta\cup\{x_1,x_2,x_3\}))\, \mathrm{d} x_2 \mathrm{d} x_3 \mathrm{d} x_1.
\end{align*}
As before, we assume without loss of generality that $e_2$ is parallel to of the axes of $\bbR^2$ and that $x_2=(x_{2,1},x_{2,2})$ with $x_{2,2}=d(Z_2,\aff(e_2))$. Thus, $L_2 (x_2), V_1,V_2$ depend on $x_1,x_3$ and $x_{2,2}$ only. We integrate $x_{2,1}$ on the line segment $[V_1,V_2]$ as in Figure~\ref{fig:Step2}. Moreover, note that given $\clE$, we have $\| V_1-V_2 \| = \ell_2(1+O(n^{- \frac 34}))$. Because $ \int \log x\, \mathrm{d} x = (\log x - 1) x $ we get
\begin{multline*}
\int\limits_{V_1}^{V_2} \log \delta_{1,2}(\eta\cup\{x_1,x_2,x_3\}) \ind(\clE(\eta\cup\{x_1,x_2,x_3\})) \, \mathrm{d} x_{2,1}\\
\begin{aligned}
&=\int_{\tilde c_p n^{- \frac 14}}^{\|V_1-V_2\|+ \tilde c_p n^{- \frac 14}} \log x \, \mathrm{d} x \\
&= \left(\log \ell_2 - 1 +O\left(n^{- \frac 14}\right)\right) \left(\ell_2 + O\left(n^{- \frac 14}\right)\right) - O\left(n^{- \frac 14} \log n\right)
\\ &=
\left(\log \ell_2 - 1 +O\left(n^{- \frac 14} \log n\right)\right) \int_{\tilde c_p n^{- \frac 14}}^{\|V_1-V_2\|- \tilde c_p n^{- \frac 14}} \, \mathrm{d} x
\\ &=
\left(\log \ell_2 - 1 +O\left(n^{- \frac 14} \log n\right)\right) \int\limits_{V_1}^{V_2} \ind\big(\clE(\eta\cup\{x_1,x_2,x_3\})\big) \, \mathrm{d} x_{2,1}.
\end{aligned}
\end{multline*}
Thus, using again the multivariate Mecke formula~\eqref{eq:Mecke},
\begin{align*}
\bbE (\log \delta_{1,2} \ind(\clE)) &= \left(\log \ell_2 - 1 +O\left(n^{- \frac 14} \log n\right)\right) \\
\\ & n^3 \bbE \int\limits_{P}\int\limits_{P}\int\limits_{P}
\ind\big(\eta\cup\{x_1,x_2,x_3\}\subset L_1^+(x_1) \cap L_2^+ (x_2) \cap L_3^+ (x_3)\big)
\\ &\quad\times
\ind\left(\clE\left(\eta\cup\{x_1,x_2,x_3\}\right)\right)\, \mathrm{d} x_2 \mathrm{d} x_3 \mathrm{d} x_1
\\&= \left(\log \ell_2 - 1 +O\left(n^{- \frac 14} \log n\right)\right) \bbE \ind (\clE),
\end{align*}
and
\[
\bbE (\log \delta_{1,2} |\clE) =
\log \ell_2 - 1 +O(n^{- \frac 14} \log n).
\]
In this way we obtain Equation~\eqref{eq:E-log-delta}. In the same way, this time using $ \int (\log x)^2 \mathrm{d} x = ((\log x)^2 - 2 \log x +2) x $, one proves~\eqref{eq:E-log-delta-sqr}.

Analogously, using the identity $\int_0^{a} \log x \log(a-x) \mathrm{d} x = a [(\log a -1)^2 + 1 - \frac {\pi^2}6]$ and thus
\begin{align*}
\int\limits_\epsilon^{a-\epsilon} \log x \log(a-x) \mathrm{d} x &=
\int\limits_0^{a} \log x \log(a-x) \mathrm{d} x - 2 \int\limits_0^{\epsilon} (\log a+ O(\epsilon)) \log x \mathrm{d} x
\\ &= a \left[(\log a -1)^2 + 1 - \frac {\pi^2}6 + O(\epsilon \log \epsilon)\right],
\end{align*}
we obtain for the expectation of the product of the two neighbouring log-distances
\[
\bbE ((\log \delta_{1,2})(\log \delta_{2,2})| \clE) = (\log \ell_2 -1)^2 + 1- \frac {\pi^2}6 + O\left(n^{- \frac 14} \log n\right).
\]
Considering the product of two distances not on the same line $L_i(\,\cdot\,)$, e.g.
\[
\bbE \left(\log \delta_{1,1} \log \delta_{1,2} | \clE\right)
\quad\text{ or }\quad
\bbE (\log \delta_{1,2} \log \delta_{2,3} | \clE)
\]
we rewrite this as a multiple integral using the multivariate Mecke formula once again, and integrate first with respect to $x_{2,1}$ to obtain
\begin{align*}
\bbE \big((\log \delta_{1,1})(\log \delta_{1,2})\,\big|\, \clE\big) &= \left(\log \ell_2 - 1 +O\left(n^{- \frac 14} \log n\right)\right) \bbE (\log \delta_{1,1} | \clE)
\\ &= (\log \ell_2 - 1) (\log \ell_1 - 1) +O\left(n^{- \frac 14} \log n\right),
\end{align*}
and similarly for all other cases. The proof is thus completed.
\end{proof}

The conditional second moments can be written in a more concise way as the conditional variance and covariance of the involved quantities.
\begin{coro}
For $i,j,k,l\in\{1,\,\ldots,\,\ell\}$ with $j=i,i+1$ it holds that
\begin{equation}\label{eq:var-delta_i}
\var \left(\log \delta_{i,j}\,\middle|\,\clE\right) = 1 + O\left(n^{- \frac 14} (\log n)^2\right)
\end{equation}
and with $j=i,i+1$, $l=k,k+1$, and $(i,j) \neq (k,l)$,
\begin{equation}\label{eq:cov-delta_i}
\cov \left(\log \delta_{i,j}, \log \delta_{k,l}\,\middle|\,\clE\right) =
\ind(l=j)\left(1-\frac{\pi^2}6\right) + O\left(n^{- \frac 14} \log n\right).
\end{equation}
\end{coro}

For $i\in\{1,\,\ldots,\,\ell\}$, the logarithmic area of the triangle $\Delta_i$ equals
\[
\log {\area}(\Delta_i) =
\log \frac{\sin \alpha_i}2 + \log \delta_{i,i} + \log \delta_{i,i+1}.
\]
Because the edges of $\Delta_i$ of length $\delta_{i,i}$ and $\delta_{i,i+1}$ are parallel to $e_i$ and $e_{i+1}$, we see that
\[
F_i= \frac{\sin \alpha_i}2 \ell_i \ell_{i+1},\qquad 1\leq i\leq \ell,
\]
is the area of the triangle $[v_{i-1}, v_i, v_{i+1}]=[e_i, e_{i+1}]$. Lemma~\ref{lm:distancesmall} yields immediately the conditional expectation, variance and covariances of $\log{\area}(\Delta_i)$. For example,
\[
\cov \big(\log {\area}(\Delta_i), \log {\area} (\Delta_k)\,\big|\,\clE\big) =
\sum_{j=i, i+1,\ l=k, k+1} \cov \left(\log \delta_{i,j}, \log \delta_{k,l}\, \middle|\,\clE\right).
\]
Combined with the formula for the conditional variance~\eqref{eq:var-delta_i} and covariance~\eqref{eq:cov-delta_i} this yields the following result.

\begin{coro}\label{cor:moments-logarea}
For $i,k\in\{1,\,\ldots,\ell\}$ one has that
\begin{align}
\bbE (\log {\area} (\Delta_i)|\clE) &= \log F_i -2 + O\left(n^{- \frac 14} \log n\right),\label{eq:E-log-area} \\
\var (\log {\area} (\Delta_i) |\clE) &= 2+ O\left(n^{- \frac 14} (\log n)^2\right),\label{eq:Var-log-area}\\
\intertext{and}
\cov (\log {\area}(\Delta_i), \log {\area} (\Delta_k) |\clE) &\label{eq:Covar-log-area}\\
=\ind(|i-k|=1) &\left(1 - \frac{\pi^2}6 \right) + O\left(n^{- \frac 14} \log n\right),\nonumber
\end{align}
for $k\neq i$.
\end{coro}



\subsection{Step 3: Berry--Esseen bound for the Poisson random convex chain}\label{step3}

Let $T$ be the canonical triangle with vertices $(0,1)$, $(0,0)$ and $(0,1)$, and $\chi$ be a homogeneous Poisson point process with $\bbE(\#\chi)=M>1$ in $T$. The convex hull of the two vertices $(0,1), (1,0)$ and the points of $\chi$ is denoted by
\[
T_{\chi}:= [\chi, (0,1),(1,0)].
\]
Denote by $f_0(T_\chi)$ the number of vertices of $T_\chi$. In order to obtain the Berry--Esseen bound for the Poissonized version of the random convex chain as in~\eqref{eq:CLTChain} we will use the following transfer lemma taken from~\cite[Lemma~3.2]{BR10}. The proof of this lemma can be found, for example, in~\cite{BV06} (see also the remark after~\cite[Lemma~3.2]{BR10}).


\begin{lemm}\label{lm:transference}
Given two sequences of random variables $\xi_n$ and $\xi'_n$ with means $\mu_n\in\bbR$ and $\mu_n'\in\bbR$, and variances $0<\sigma_n^2<\infty$ and $0<\sigma_n^{'2}<\infty$, respectively. Assume that there are sequences $\varepsilon_1(n)$, $\varepsilon_2(n)$, $\varepsilon_3(n)$ and $\varepsilon_4(n)$, all tending to zero as $n\to \infty$, such that
\begin{enumerate}\romanenumi
\item\label{lemm3.6.1} $|\mu_n'-\mu_n|\leq \varepsilon_1(n)\sigma_n$,
\item\label{lemm3.6.2} $|\sigma_n^{'2}-\sigma_n^2|\leq \varepsilon_2(n)\sigma_n^{2}$,
\item\label{lemm3.6.3} for every $x\in\bbR$, $|\bbP(\xi_n'\leq x)-\bbP(\xi_n\leq x)|\leq \varepsilon_3(n)$,
\item\label{lemm3.6.4} for any $x\in\bbR$, $
|\bbP(\frac{\xi_n'-\mu_n'}{\sigma_n'}\leq x)-\Phi(x)|\leq \varepsilon_4(n). $
\end{enumerate}
Then there is a positive constant $C>0$ such that
\[
\sup_{x\,\in\,\bbR}\left|\bbP\left(\frac{\xi_n-\mu_n}{\sigma_n}\leq x\right)-\Phi(x)\right|\leq C\sum_{i=1}^4\varepsilon_i(n).
\]
\end{lemm}

In order to verify conditions~\eqref{lemm3.6.1} and~\eqref{lemm3.6.2}  of the previous lemma for our model we derive the following asymptotic formulas for the expectation and the variance of $f_0(T_{\chi})$. For later purposes we also formulate our result for general homogeneous Poisson point processes.

\begin{lemm}\label{lm:estimatesPoissonChain}
Consider a homogeneous Poisson point process $\chi$ in the canonical triangle $T$ with $\bbE(\# \chi)=M>1$. Then
\begin{align}
\bbE f_0(T_{\chi})&=\frac{2}{3}\log M + \frac {2\gamma +7}3 +O\left(M^{-1/2}\right),\label{eq:poissonExp}
\\
\var f_0(T_{\chi})&= \frac {10}{27} \log M + \frac{10 \gamma + 2\pi^2 - 28 }{27} + O\left(M^{-1/2}\right),\label{eq:poissonVar}
\end{align}
as $M\to\infty$.
\end{lemm}


\begin{proof}
Denote by $H_n = \sum_{i=1}^{n} \frac 1i$ the $n^{\rm th}$ harmonic sum and by $H^{(2)}_n = \sum_{i=1}^{n} \frac 1{i^2}$ the $n^{\rm th}$ harmonic sum of second order. Set $H_0= H^{(2)}_0:=0$ for convenience. It is well known that
\begin{align}
H_n &= \log n +\gamma + O(n^{-1}) \label{eq:HarmNum1}
,\\
H^{(2)}_n &= \frac{\pi^2}6+O(n^{-1}) \label{eq:HarmNum2}
,
\end{align}
as $n \to \infty$. Recall that $\gamma$ is the Euler--Mascheroni constant. Let $T_k$, $k\ge 1$ be the random convex chain, which is build based on the sample of $k$ independent random points $Y_1,\ldots, Y_k$, uniformly distributed inside the canonical triangle $T$. That is, $T_k:=[Y_1,\,\ldots,\,Y_k,(0,1),(1,0)]$. It is known from~\cite[Corollary~1 and Corollary~2]{Buch12} that
\begin{align}\label{eq:expUnif}
\bbE f_0(T_k)&= \frac{2}{3} H_k + \frac 73,
\\\label{eq:varUnif}
\var f_0(T_k)&=\frac{10}{27} H_k + \frac{4}{9} H^{(2)}_k - \frac{28}{27 } + \frac{4}{9(k+1)} ;
\end{align}
note that the result in~\cite{Buch12} is stated for the quantity $N_k= f_0(T_k)-2$. Let $Y$ be a Poisson random variable with mean $M \in \bbR,\ M >0$. Then
\[
\bbE H_Y =
\sum_{k=0}^\infty \left(H_k-H_{\lceil M \rceil}\right) \bbP(Y=k) + H_{\lceil M \rceil}.
\]
Because of the trivial estimate
\[
\left|H_Y-H_{\lceil M \rceil}\right| \leq \max \left\{ \frac {Y-\lceil M \rceil}{\lceil M \rceil}, \frac {\lceil M \rceil-Y}{Y+1} \right\}
\leq
\left|\frac {Y-M}M \right| + \left|\frac {M-Y+1}{Y+1} \right|
\]
we have
\begin{align*}
\left|\bbE H_Y - H_{\lceil M \rceil} \right| & \leq
\bbE \, \frac {|Y-M|}M + \, \bbE \frac {|M-Y+1|}{Y+1}
\\ & \leq
\sqrt{\bbE \frac {(Y-M)^2}{M^2}} + \sqrt{\bbE \frac {(M-Y+1)^2}{(Y+1)^2}}.
\end{align*}
Since $\bbE Y=M$ and $\bbE Y^2 = M^2 + M $, we see that
\[
\bbE \, \frac{(Y-M)^2}{M^2} = \frac{\var(Y)}{M^2}= M^{-1}.
\]
Analogously, since $
\bbE (Y+1)^{-1} = M^{-1} (1 - e^{-M})
\geq M^{-1} (1- M^{-1}) = (M-1) M^{-2} $, and
\begin{multline*}
\bbE (Y+1)^{-2} \\
= \sum_{k=2}^\infty \frac {M^{k-2}} {k!} \frac {k} {k-1} e^{-M}
\leq
\sum_{k=2}^\infty \frac {M^{k-2}} {k!} \left(1+\frac {3} {k+1} \right) e^{-M}
\leq M^{-2} \left(1+ 3 M^{-1}\right),
\end{multline*}
we see that
\begin{align*}
\bbE \frac {(M-Y+1)^2}{(Y+1)^2} &=
\bbE \frac {(M+2)^2}{(Y+1)^2} -2 \bbE \frac {(M+2)}{(Y+1)} + 1
\leq 37 M^{-1}.
\end{align*}
Because of $\log \lceil M\rceil= \log M + O(M^{-1})$, equation~\eqref{eq:HarmNum1} yields
\begin{equation}\label{eq:EHY}
\bbE H_Y = H_{\lceil M\rceil} + O\left(M^{-\frac 12}\right) = \log M + \gamma + O\left(M^{-\frac 12}\right).
\end{equation}
Together with~\eqref{eq:expUnif} this proves~\eqref{eq:poissonExp}. Similarly, by the law of total variance we have
\begin{align*}
\var f_0(T_{\chi}) &=
\bbE_Y \var \left(f_0(T_{k})\,\middle|\,Y=k\right) + \var_Y \bbE \left(f_0(T_k)\,\middle|\,Y=k\right).
\\ &=
\bbE \left(\frac {10}{27} H_Y + \frac{4}{9} H^{(2)}_Y - \frac{28}{27 } + \frac{4}{9(Y+1)}\right) + \var \left(\frac 23 H_Y \right).
\end{align*}
By~\eqref{eq:EHY}, and because $ \bbE H^{(2)}_Y = \frac{\pi^2}6+O(\bbE (Y+1)^{-1})$ by~\eqref{eq:HarmNum2} with $\bbE (Y+1)^{-1} = O(M^{-1}) $, we obtain
\begin{align*}
\var f_0(T_{\chi}) &=
\left(\frac {10}{27} \log M + \frac{10 \gamma + 2\pi^2 - 28 }{27} + O\left(M^{-\frac 12}\right) \right) + \frac 49 \var H_Y.
\end{align*}
To prove~\eqref{eq:poissonVar} it remains to show that the variance of $H_Y$ is bounded by a constant times $M^{-\frac 12}$. For this we use the Poincaré inequality for Poisson random variables, which says that
\[
\var f(Y) \leq M \bbE (f(Y+1)-f(Y))^2
\]
for functions $f:\{0,1,2,\,\ldots\}\to\bbR$ for which $\var f(Y)<\infty$. This inequality can be considered as a special case of the general Poincaré inequality~\cite[Theorem~18.7]{LP} for functionals of Poisson random measures. Applying this to $f(Y)=H_Y$ we conclude that
\[
\var H_Y
\leq M \bbE \frac 1{(Y+1)^2}
\leq
\sum\limits_{k=0}^{\infty} 2 \frac{M^{k+1}}{(k+2)!} e^{-M} \leq 2 M^{-1},
\]
which completes the argument.
\end{proof}

Now we are prepared to prove a Berry--Esseen bound for the number of vertices of the Poisson random chain $T_\chi$ in the canonical triangle $T$.

\begin{lemm}\label{lm:BerryEssenPoissonChain}
Consider a homogeneous Poisson point process $\chi$ in the canonical triangle $T$ with $\bbE(\# \chi)=M \geq 2$. Then
\[
\sup_{x\,\in\,\bbR}\left|\bbP\left(\frac{f_0(T_\chi)-\bbE f_0(T_\chi)}{\sqrt{\var f_0(T_\chi)}}\leq x\right)-\Phi(x)\right|\leq \frac{c}{\sqrt{\log M}},
\]
for some absolute constant $c>0$.
\end{lemm}

\begin{proof}
Let $T_k$ denotes the convex chain, build on random points $X_1,\,\ldots,\,X_k$ independently and uniformly distributed in $T$. We apply Lemma~\ref{lm:transference} with $\xi_M':=f_0(T_{\lceil M\rceil})$ and $\xi_M:=f_0(T_{\chi})$. The condition~\eqref{lemm3.6.1} with $\varepsilon_1(M) \leq c_1 M^{- \frac 12}/\log M$ for $c_1>0$ independent of $M$ can be verified by formulas~\eqref{eq:poissonExp}, \eqref{eq:expUnif} and~\eqref{eq:poissonVar}. Analogously, condition~\eqref{lemm3.6.2} with $\varepsilon_2(M)=c_2M^{- \frac 12}/\log M$ for $c_2>0$ independent of $M$ follows from~\eqref{eq:poissonVar} and~\eqref{eq:varUnif}. The condition (iv) with $\varepsilon_4(M)=c/\sqrt{\log \lceil M\rceil}=c_4/\sqrt{\log M}$ for some constant $c_4>0$ independent of $M$ follows immediately from~\eqref{eq:CLTChain}.

For the verification of condition~\eqref{lemm3.6.3} we use the convex floating body introduced in Section~\ref{sec:Prelim}, and follow an approach already used somewhat implicitly in~\cite{Gro88} as well as~\cite{Reitz05}. Recall that $\clA_{\lceil M \rceil}$ is the event that the vertices of the convex hull of ${\lceil M \rceil}$ random points are contained in the wet part $T(v< b_0 {\lceil M \rceil}^{-1}\log {\lceil M \rceil})$ of the triangle $T$, which clearly implies that all vertices of the convex chain $T_{\lceil M \rceil}$ are contained in this wet part. Slightly more general, for $M>1$ define $\clA_{M }$ to be the event that the vertices of the convex hull of the ${\lceil M \rceil}$ random points are contained in the wet part $T(v< b_0 {M}^{-1}\log {M})$ of the triangle $T$. Analogously, $A^\pi_M$ is the event that all vertices of the Poisson convex hull, and thus all vertices of the Poisson convex chain $T_{\chi}$ belong to $T(v< b_0 M^{-1}\log M)$. Because
\[
T\left(v < \lceil M \rceil^{-1} \log \lceil M \rceil\right) \subset T(v< M \log M),
\]
by Lemma~\ref{le:floating-Pn} and Lemma~\ref{le:floating-Peta} we have
\[
\bbP(\overline{\clA_{M}}) \leq \bbP(\overline{\clA_{\lceil M \rceil}}) = O\left(M^{-6}\right)\qquad\text{and}\qquad
\bbP(\overline{\clA^\pi_M}) = O\left(M^{-6}\right).
\]

\begin{figure}[t]
\centering
\begin{tikzpicture}
\clip (-4.5,-4.5) rectangle (4.5,4.5);
\node at (0,0) {\includegraphics[width=0.44\textwidth]{floating}};
\node at (-1.3,-1) {\tiny $T(v\ge b_0 M^{-1}\log M)$};
\node at (1.2,0) {\small $P_M$};
\node at (-4.2,-2.2) {\small $T_M$};

\draw (0.75,0) -- (0.18,-0.43);
\draw (-4,-2) -- (-3.57,-0.7);
\end{tikzpicture}
\vspace*{-20pt}
\caption{Illustration of the construction used in the proof of Lemma~\ref{lm:BerryEssenPoissonChain}. The convex hull $P_M$ (or $P_\chi$) is indicated by the dashed segments, while the convex chain $T_M$ (or $T_\chi$) by a solid line. The floating body of $T$ is drawn in grey.}\label{fig:Step3}
\end{figure}

Combining these estimates yields, for any $x\in\bbR$,
\begin{multline}\label{eq:24.01.22}
\big|\bbP\left(f_0\left(T_{\lceil M \rceil}\right)\leq x\right)-  \bbP\left(f_0\left(T_{\chi}\right)\leq x\right)\big|
\\ \leq 
\big|\bbP\left(f_0\left(T_{\lceil M \rceil}\right)\leq x, \clA_M\right)-\bbP\big(f_0\left(T_{\chi}\right)\leq x, \clA^\pi_{M}\big)\big| + O\left(M^{-6}\right).
\end{multline}
For $T_\chi$ we have that $\#(\chi \cap T(v< b_0 M^{-1} \log M))$ is Poisson distributed with mean $Mp$ with
\[
p := \, \frac{{\area}\left(T(v< b_0 M^{-1} \log M)\right)}{{\area}(T)} = O\left(M^{-1} (\log M)^2\right)
\]
by~\eqref{eq:wetpart}, and for $T_{\lceil M \rceil}$ the number of points in $T(v< b_0 M^{-1} \log M)$ is binomial distributed with mean $\lceil M \rceil p$. Denote by $E_m$ the event that precisely $m$ points of the Poisson or binomial process are in $T(v\ge b_0 M^{-1} \log M)$. Coupling both processes in the canonical way and using Lemma~\ref{le:diff-Poisson-Poisson}, Lemma~\ref{le:diff-Poisson-binom} with $k=0$, together with~\eqref{eq:24.01.22} yields
\begin{align*}
& \big| \bbP\left(f_0\left(T_{\lceil M \rceil}\right) \leq x\right)- \bbP\left(f_0(T_{\chi})\leq x\right)\big|
\\ & \leq
\sum_{m=0}^\infty \bbP\left(f_0(T_M)\leq x, \clA_M \,\middle|\, E_m\right)\left| \frac{(Mp)^m}{m!} e^{- Mp} - \binom{\lceil M \rceil}{m} p^m (1-p)^{n-m} \right| + O\left(M^{-6}\right)
\\
& \leq
\sum_{m=0}^\infty \left| \frac{(\lceil M \rceil p)^m}{m!} e^{- \lceil M \rceil p} - \frac{(M p)^m}{m!} e^{- M p} \right|
\\ & \quad + \sum_{m=0}^\infty \left| \frac{(\lceil M \rceil p)^m}{m!} e^{- \lceil M \rceil p} - \binom{\lceil M \rceil}{m} p^m (1-p)^{n-m} \right| + O\left(M^{-6}\right)
\\ & \leq 2p + 2p + O\left(M^{-6}\right)\\
& = O\left(M^{-1} (\log M)^2\right).
\end{align*}
Thus condition~\eqref{lemm3.6.3} in Lemma~\ref{lm:transference} holds with $\varepsilon_3(M)=c_3 M^{-1} (\log M)^2$ for some $c_3>0$ independent of $M$. An application of Lemma~\ref{lm:transference} finishes the proof of Lemma~\ref{lm:BerryEssenPoissonChain}.
\end{proof}



\subsection{Step 4: Berry--Esseen bound for the Poisson model under condition~\texorpdfstring{$\clE$}{E}}\label{step4}

In the next step we consider the random variable $f_0(P_{\eta})$, conditioned on the event~$\clE$ we introduced in \hyperref[step2]{Step 2}:
\[
\xi:=(f_0(P_{\eta})|\clE).
\]
Let us also condition on the positions of the points $Z_1,\dots,Z_\ell \in \eta$ and introduce the random variable
\[
\xi':= \left(f_0(P_{\eta})\,\middle|\,\clE, Z_1,\,\ldots,\,Z_\ell\right).
\]

It should be mentioned that under $\clE$ all $\ell$ points $Z_1,\,\ldots,\,Z_\ell$ are well defined and in fact distinct. Note that, conditionally on the positions of $Z_1,\,\ldots,\,Z_\ell$, the restriction of $\eta$ to the interior of the polygon $L^+(Z_1) \cap \,\dots \cap L^+(Z_\ell)$ is again a Poisson point process with the same intensity. Indeed, this follows on the one hand from the fact that the $\ell$-fold reduced Palm distribution of a stationary Poisson point process coincides with its distribution, see~\cite[Corollary~1]{Hanisch82}. Since after fixing $Z_1,\,\ldots,\,Z_\ell$, the triangles $\Delta_i$, $1\leq i\leq \ell$ are fixed and they have disjoint interiors, the restriction of $\eta$ to the interior of $\Delta_i$ is again a Poisson point process whose intensity measure is $n$ times the Lebesgue measure restricted to $\Delta_i$, see~\cite[Theorem~5.2]{LP}. Hence, the random variable $\xi'$ can be decomposed into the sum of $\ell$ independent random variables $\xi'_i$, $1\leq i\leq \ell$, where each $\xi_i'$ is defined as a number of vertices of the random convex chain, formed by the Poisson point process $\eta$ restricted to the triangle $\Delta_i$. More precisely, given an arbitrary triangle $\Delta\subset P$ with vertices $v_1,v_2,v_3$ and a Poisson point process $\eta$ we define
\[
T_{\eta}(\Delta,v_1,v_2):=[(\eta\cap\Delta), v_1,v_2].
\]
Then we take
\[
\xi'_i:= f_0(T_{\eta}(\Delta_i,Z_i,Z_{i+1}))-2,
\]
where the $-2$ is coming from the fact that we exclude the two endpoints $Z_i$ and $Z_{i+1}$ of the convex chain. Now, consider for each $1\leq i\leq \ell$ the affine transformation $A_i:\bbR^2\to\bbR^2$ which maps the triangle $\Delta_i$ with vertices $Z_i$, $V_i$ and $Z_{i+1}$ to the canonical triangle $T$ with vertices $(0,1)$, $(0,0)$ and $(0,1)$. Using the mapping property~\cite[Theorem~5.1]{LP} and restriction property~\cite[Theorem~5.2]{LP} of Poisson point processes we conclude that $\eta_i:=A_i(\eta\cap\Delta_i)$ is a homogeneous Poisson point process on $T$ with intensity $2n\,{\area}(\Delta_i)$.

Since the number of vertices is invariant under affine transformations we conclude that
\[
\xi'_i\stackrel{d}{=} f_0(T_{\eta_i})-2.
\]

In order to prove a Berry--Esseen bound for the random variable
\[
\xi'=\sum_{i=1}^\ell \xi_i'+ \ell,
\]
where the additional summand $+\ell$ is coming from the fact that we excluded in the definition of $\xi_1,\,\ldots,\,\xi_\ell$ the points $Z_1,\,\ldots,\,Z_\ell$, we will use the following lemma.


\begin{lemm}\label{lm:GlueBerryEsseen}
Let $X_1,\,\ldots,\,X_k$ be independent random variables with $\mu_i:=\bbE X_i<\infty$, $\sigma_i:=\sqrt{\var X_i}\in(0,\infty)$, $1\leq i\leq k$ and let $G_1,\,\ldots,\,G_k$ be independent standard Gaussian random variables. Let $\varepsilon_i>0$, $1\leq i\leq k$ be such that
\begin{equation}\label{eq:conditions}
\sup_{x\,\in\,\bbR}\left|\bbP\left(\frac{X_i-\mu_i}{\sigma_i}\leq x\right)-\bbP(G_i\leq x)\right|\leq \varepsilon_i,\qquad 1\leq i\leq k.
\end{equation}
Then for $X:=X_1+\ldots+X_k$ we have
\[
\sup_{x\,\in\,\bbR}\left|\bbP\left(\frac{X-\bbE X}{\sqrt{\var X}}\leq x\right)-\Phi(x)\right|\leq \sum_{i=1}^k\varepsilon_i.
\]
\end{lemm}

\begin{proof}
Let $\clF$ be the space of cumulative distribution functions, namely
\begin{multline*}
\clF:=\big\{F:\bbR\mapsto[0,1]\colon F \text{ right-continuous, monotone increasing},
\\ F(-\infty)=0, F(\infty)=1\big\},
\end{multline*}
where $F(\pm\infty)$ has to be interpreted as the appropriate limit. First of all let us recall that the classical Kolmogorov (or uniform) metric $d:\clF\times\clF\to [0,\infty)$ on the space $\clF$ is defined as
\[
d(F_1,F_2):=\sup_{x\,\in\,\bbR}|F_1(x)-F_2(x)|.
\]
Given two random variables $X,Y$ with cumulative distribution functions $F_X,F_Y$, respectively, we write
\[
d(X,Y):=d(F_X,F_Y)=\sup_{x\,\in\,\bbR}\left|\bbP(X\leq x)-\bbP(Y\leq x)\right|.
\]
Using this notation and the fact that $d(aX+b,aY+b)=d(X,Y)$ for any $a>0,b\in\bbR$ the conditions in~\eqref{eq:conditions} can be written in the form
\begin{equation}\label{eq:conditionsNew}
d(X_i-\mu_i,\sigma_i G_i)\leq \varepsilon_i,\qquad 1\leq i\leq k.
\end{equation}
Further, we apply the so-called semi-additivity property of the Kolmogorov metric, which says that for any independent $Y_1,\ldots,Y_k$ and any independent $Y'_1,\,\ldots,\,Y'_k$ one has that
\[
d(Y_1+\ldots+Y_k,Y_1'+\ldots+Y_k')\leq \sum_{i=1}^n d(Y_i,Y_i'),
\]
see~\cite[Section~2.3, Equation~(1.2)]{Zol76}. Recalling that $X=X_1+\ldots+X_k$, and taking $Y_i=X_i-\mu_i$ and $Y_i'=\sigma_i G_i$ we conclude by~\eqref{eq:conditionsNew} that
\begin{align*}
d\big(X-\bbE X, \sigma_1 G_1+\ldots+\sigma_k G_k\big)&=d\left(\frac{X-\bbE X}{\sqrt{\var X}}, \frac{\sigma_1 G_1+\ldots+\sigma_k G_k}{(\sigma_1^2+\ldots+\sigma_k^2)^{1/2}}\right)\leq \sum_{i=1}^k\varepsilon_i.
\end{align*}
Finally, we need to observe that $(\sigma_1 G_1+\ldots+\sigma_k G_k)/(\sigma_1^2+\ldots+\sigma_k^2)^{1/2}$ has the standard Gaussian distribution. This completes the proof of Lemma~\ref{lm:GlueBerryEsseen}.
\end{proof}

We apply Lemma~\ref{lm:GlueBerryEsseen} with $k=\ell$ to the random variables $X_1:=\xi_1',\,\ldots,\,X_\ell:=\xi_\ell'$.

\begin{coro}\label{cor:PoissonWithZ}
There exists a constant $c>0$ such that for any $n\geq 4$ we have that
\[
\sup_{x\,\in\,\bbR}\left|\bbP\left(\frac{\xi'-\bbE\xi'}{\sqrt{\var \xi'}}\leq x\right)-\Phi(x)\right|\leq \frac{c}{\sqrt{\log n}}.
\]
\end{coro}

\begin{proof}
Lemma~\ref{lm:BerryEssenPoissonChain} yields~\eqref{eq:conditions} for the random variables $X_i=\xi'_i$ with $\varepsilon_i=c(\log M)^{-1/2}$, $1\leq i\leq \ell$. It is clear that for $X := \xi'-\ell = \sum_{i=1}^\ell \xi'_i$ we have $X-\bbE X=\xi'-\bbE\xi'$ and $\var X=\var\xi'$. Moreover, according to~\eqref{eq:Delta>n-12} we have ${\area}(\Delta_i) \geq n^{- \frac 12}$ and
\begin{equation}\label{eq:estM}
M= n\,{\area}(\Delta_i)\ge n^{\frac12} \geq 2
\end{equation}
given $\clE$. Thus $\varepsilon_i \leq c (\frac 12 \log n)^{-1/2}$ for all $1\leq i\leq \ell$, and the proof of Corollary~\ref{cor:PoissonWithZ} is complete.
\end{proof}


Note that the obtained bound is independent of the exact position of the points $Z_1,\,\ldots,\,Z_\ell$ if we condition on the event $\clE$. This already suggests that the same bound holds for the random variable $f_0(P_\eta)$ conditionally on $\clE$ only. Our next result ensures that this is indeed the case.

\begin{lemm}\label{lem:RemovePointsZ}
There exists a constant $c>0$ such that for any $n\geq 2$ we have
\[
\sup_{x\,\in\,\bbR}\left|\bbP\left(\frac{(f_0(P_{\eta})|\clE)-\mu}{\sigma}\leq x\right)-\Phi(x)\right|\leq \frac{c}{\sqrt{\log n}}
\]
with
\begin{align*}
\mu &=\bbE \left(f_0(P_{\eta})\,\middle|\,\clE\right) = \frac{2 \ell}{3}\log n + \frac 23 \sum_{i=1}^\ell \log F_i + \frac{2 \gamma \ell }3 + O\left(n^{- \frac 14}\log n\right)
\intertext{and}
\sigma^2 &=\var \left(f_0(P_{\eta})\,\middle|\,\clE\right) = \frac{10 \ell}{27}\log n +\frac{10}{27}\sum_{i=1}^\ell \log F_i +
\frac{\left(10 \gamma -2 \pi^2\right)\ell }{27} + O\left(n^{-\frac 14} (\log n)^2\right).
\end{align*}
\end{lemm}

\begin{proof}
Let ${\bfZ}=(Z_1,\,\dots,\,Z_\ell)$. Recall that $\xi=(f_0(P_{\eta})|\clE)$ and $\xi'=(f_0(P_{\eta})|\clE, {\bfZ})$. Moreover, we define $\mu:=\bbE\xi$, $\mu':=\bbE\xi'$ and $\sigma^2:=\var \xi$, ${\sigma'}^2:=\var \xi' $ (in this proof we suppress the dependence on the parameter $n$ in our notation). Using the representation $\xi'=\sum_{i=1}^\ell \xi_i'+\ell $ together with Lemma~\ref{lm:estimatesPoissonChain}, thanks to the condition $\clE$, we obtain
\begin{align*}
\mu' &= \frac{2 \ell}{3}\log n +\frac{2}{3}\sum_{i=1}^\ell \log{\area}(\Delta_i)+\frac{(2 \gamma + 4)\ell }3 + O\left(n^{-\frac 14}\right),\qquad
\\
\sigma'^2 &= \frac{10 \ell}{27}\log n +\frac{10}{27}\sum_{i=1}^\ell \log{\area}(\Delta_i) +
\frac{\left(10 \gamma + 2\pi^2 - 28\right)\ell }{27} + O\left(n^{-\frac 14}\right).
\end{align*}
Note, that we used the estimate~\eqref{eq:estM} for $M$ here. Corollary~\ref{cor:moments-logarea} shows that
\begin{align*}
\mu =
\bbE (\mu' | \clE) &=
\bbE \left(\frac{2 \ell}{3}\log n +\frac{2}{3}\sum_{i=1}^\ell \log{\area}(\Delta_i)+\frac{(2 \gamma + 4)\ell }3 + O(n^{-\frac 14}) \, \middle|\,\clE \right)
\\ &= \frac{2 \ell}{3}\log n + \frac 23 \sum_{i=1}^\ell \log F_i + \frac{2 \gamma \ell }3 + O\left(n^{- \frac 14} \log n\right),
\end{align*}
which already coincides with the expectation~\eqref{eq:expPolUnif} by Rényi and Sulanke. Analogously, the expected conditional variance is given by
\begin{align*}
\bbE ({\sigma'}^2 | \clE) &=
\bbE \left(\frac{10 \ell}{27}\log n +\frac{10}{27}\sum_{i=1}^\ell \log{\area}(\Delta_i) +
\frac{\left(10 \gamma + 2\pi^2 - 28\right)\ell }{27} + O\left(n^{-\frac 14}\right) \right)
\\ &= \frac{10 \ell}{27}\log n +\frac{10}{27}\sum_{i=1}^\ell \log F_i +
\frac{\left(10 \gamma + 2\pi^2 - 48\right)\ell }{27} + O\left(n^{-\frac 14} \log n\right).
\end{align*}
And for the variance of the expectation we use Corollary~\ref{cor:moments-logarea} again,
\begin{align*}
\var (\mu' | \clE) = &
\frac 49 \var \left(\sum_{i=1}^\ell \log {\area} (\Delta_i) +O(n^{-\frac 14})\, \middle|\,\clE \right)
%\\ = &
\frac 49 \sum_{i=1}^\ell \var \left(\log {\area} (\Delta_i) \, \middle|\,\clE \right)
\\ & +
\frac 89 \sum_{i=1}^\ell \cov \left(\log {\area} (\Delta_i), \log {\area} (\Delta_{i+1}) \ \Big|\clE \right) + O\left(n^{- \frac 14 } \log n\right)
\\ = &
\frac 89 \ell \left(2- \frac{\pi^2}6 \right) + O\left(n^{-\frac 14} (\log n)^2\right).
\end{align*}
Hence, by the law of total variance,
\begin{align*}
\sigma^2 &=\bbE_ {\bfZ}\left(\underbrace{\var \left(f_0(P_{\eta})\,\middle|\,\clE, {\bfZ}\right)}_{= {\sigma '}^2}\, \middle|\, \clE\right) + \var_{\bfZ} \left(\underbrace{\bbE \left(f_0(P_{\eta})\,\middle|\,\clE, {\bfZ}\right)}_{= \mu'}\,\middle|\,\clE\right)\\
&= \frac{10 \ell}{27}\log n +\frac{10}{27}\sum_{i=1}^\ell \log F_i +
\frac{\left(10 \gamma -2 \pi^2\right)\ell }{27} + O\left(n^{-\frac 14} (\log n)^2\right).
\end{align*}
This also shows that
\begin{align}
\mu-\mu'& = \frac{2}{3}\sum_{i=1}^\ell \log{\area}(\Delta_i)+O(1),\label{eq:moments}
\\
\sigma'^2-\sigma^2 & = \frac{10}{27}\sum_{i=1}^\ell \log{\area}(\Delta_i)+O(1),\label{eq:variance1}
\\
\sigma+\sigma'& \geq \sigma = \left(\frac{10 \ell}{27}\log n\right)^{\frac 12} +O(1)\label{eq:variance2}
.
\end{align}

Next, we observe that
\begin{multline}\label{eq:14.05.21_2}
\sup_{x\,\in\,\bbR} \left| \bbP\left(\frac {\xi-\mu}{\sigma}\leq x\right) - \Phi(x)\right|\\
\begin{aligned}
& = 
\sup_{y\,\in\,\bbR} \left| \bbP(\xi\leq y) - \Phi\left(\frac {y-\mu}{\sigma}\right)\right|
\\ &\leq
\sup_{y\,\in\,\bbR} \left| \bbE \bbP(\xi'\leq y) - \bbE \Phi\left(\frac {y-\mu'}{\sigma'}\right)\right| +
\sup_{y\,\in\,\bbR}\left|\bbE \Phi\left(\frac {y-\mu'}{\sigma'}\right) - \Phi\left(\frac {y-\mu}{\sigma}\right)\right| \\
& \leq
\bbE \sup_{y\,\in\,\bbR} \left| \bbP(\xi'\leq y) - \Phi\left(\frac {y-\mu'}{\sigma'}\right)\right| +
\bbE \sup_{y\,\in\,\bbR} \left|\Phi\left(\frac {y-\mu'}{\sigma'}\right) - \Phi\left(\frac {y-\mu}{\sigma}\right)\right|,
\end{aligned}
\end{multline}
where the expectation is taken with respect to the law of the random vector ${\bfZ}$. From Corollary~\ref{cor:PoissonWithZ} we have
\begin{multline}\label{eq:14.05.21_3}
\bbE \sup_{y\,\in\,\bbR} \left| \bbP(\xi'\leq y) - \Phi\left(\frac {y-\mu'}{\sigma'}\right)\right|\\
=\bbE \sup_{x\,\in\,\bbR} \left| \bbP\left(\frac {\xi'-\mu'}{\sigma'}\leq x\right) - \Phi(x)\right| = O\left(\frac{1}{\sqrt{\log n}}\right).
\end{multline}
It remains to deal with the random variable
\[
Y_{\bfZ}:=\sup_{y\,\in\,\bbR} \left|\Phi\left(\frac {y-\mu'}{\sigma'}\right) - \Phi\left(\frac {y-\mu}{\sigma}\right)\right|.
\]
Assume that the supremum is attained at $y_0$. Then, with $\phi(t):=\frac{1}{\sqrt{2\pi}}e^{-t^2/2}$ the density of standard normal distribution,
\begin{multline}\label{eq:18-05-21a}
Y_{\bf{Z}}=\left|\Phi\left(\frac {y_0-\mu'}{\sigma'}\right) - \Phi\left(\frac {y_0-\mu}{\sigma}\right)\right|\\
\leq \sup_{t\,\in\,\bbR}|\phi(t)|\cdot \left|\frac {y_0-\mu'}{\sigma'} - \frac {y_0-\mu}{\sigma}\right|\leq\left|\frac {y_0-\mu'}{\sigma'} - \frac {y_0-\mu}{\sigma}\right|,
\end{multline}
where $y_0$ is such that
\[
\frac{1}{\sigma'}\phi\left(\frac {y_0-\mu'}{\sigma'}\right) = \frac{1}{\sigma}\phi\left(\frac {y_0-\mu}{\sigma}\right).
\]
By taking logarithms on both sides and putting all terms to the left hand side of the equality, we see that the last equation is equivalent to
\[
y_0^2\left(\sigma^2-\sigma'^2\right)-2y_0\left(\mu'\sigma^2-\mu\sigma'^2\right)+\mu'^2\sigma^2-\mu^2\sigma'^2-\sigma^2\sigma'^2\log\left(\frac{\sigma}{\sigma'}\right)=0.
\]
This quadratic equation has the following solutions:
\[
y^{\pm}_0=\frac{\mu'\sigma^2-\mu\sigma'^2\pm\sigma\sigma'\sqrt{(\mu'-\mu)^2+\log(\sigma/\sigma')|\sigma'^2-\sigma^2|}}{\sigma^2-\sigma'^2}.
\]
Substituting this back into~\eqref{eq:18-05-21a} leads to the bound
\begin{align*}
Y_{\bf{Z}} & \leq \frac{|\mu'-\mu|+\sqrt{(\mu'-\mu)^2+\log(\sigma/\sigma')|\sigma'^2-\sigma^2|}}{\sigma+\sigma'}
\\ & \leq 2\frac{|\mu'-\mu|}{\sigma+\sigma'} + \frac{\sqrt{|\log(\sigma/ \sigma')||\sigma'^2-\sigma^2|}}{\sigma+\sigma'},
\end{align*}
where we used the fact that $\sqrt{a+b} \leq \sqrt a + \sqrt b$ for all $a,b>0$. Observe that given $\clE$ we have ${\area}(\Delta_i) \geq c\, n^{- \frac 12}$ and thus $\sigma'^2 \geq \frac{5 \ell}{27}\log n +O(1)$. Hence, there exist constants $c_1,c_2>0$ such that $c_1<\sigma/\sigma'<c_2$. Further, $\sqrt{|\sigma'^2-\sigma^2|} \leq |\sigma'^2-\sigma^2|+1$. Thus, using~\eqref{eq:moments}, \eqref{eq:variance1}, and~\eqref{eq:variance2} we conclude that, for some constant $C_1>0$,
\[
Y_{\bf{Z}}\leq C_1\left(\sum_{i=1}^\ell \frac{|\log{\area}(\Delta_i)|}{\sqrt{\log n}}+\frac{1}{\sqrt{\log n}}\right),
\]
and Corollary~\ref{cor:moments-logarea} yields, for another constant $C_2>0$,
\[
\bbE Y_{\bf{Z}}\leq C_2\left(\frac{\bbE|\log{\area}(\Delta_1)|}{\sqrt{\log n}}+\frac{1}{\sqrt{\log n}} \right) = \frac{O(1)}{\sqrt{\log n}}.
\]
Together with~\eqref{eq:14.05.21_2} and~\eqref{eq:14.05.21_3} this completes the proof of Lemma~\ref{lem:RemovePointsZ}.
\end{proof}



\subsection{Step 5: Removing the condition \texorpdfstring{$\clE$}{E}}
\label{step5}

In order to remove the remaining condition $\clE$ in Lemma~\ref{lem:RemovePointsZ} we use Lemma~\ref{lm:transference} again. We will apply this lemma to the random variables $\xi'_n=f_0(P_{\eta}|\clE)$ and $\xi_n=f_0(P_{\eta})$. Note that condition~\eqref{lemm3.6.4} then follows from Lemma~\ref{lem:RemovePointsZ}. Checking the other conditions requires a more careful analysis.

\begin{lemm}\label{lm:condition}
The random variables $\xi_n:=f_0(P_{\eta})$ and $\xi_n':=f_0(P_{\eta}|\clE)$ satisfy conditions~\eqref{lemm3.6.1}--\eqref{lemm3.6.3} of Lemma~\ref{lm:transference} with $\varepsilon_1(n)=c_1n^{-\frac 14} (\log n)^{\frac 32}$, $\varepsilon_2(n)=c_2n^{-\frac 14} (\log n)^3$, $\varepsilon_3(n)=c_3 n^{-\frac 14}$, where $c_1,c_2,c_3>0$ are positive constants not depending on $n$.
\end{lemm}


\begin{proof}
The proof of this lemma will basically follow the lines of the proof of~\cite[Lemma~8.2]{BR10}. We will start by estimating $\bbE(f_0(P_{\eta})^k|\overline{\clE})$ for $k=1,2$. For this we assume additionally that the event $\clA^\pi_n \cap \clB^\pi_n$ holds, that is
\[
P\left(v\ge b_0 n^{-1} \log n\right)\subset P_{\eta}\quad\text{and}\quad \ \#\left(\eta \cap P\left(v< b_0 n^{-1} \log n\right)\right)\leq c_0 (\log n)^2,
\]
and make use of Lemma~\ref{le:floating-Peta}. It follows that $ f_0(P_{\eta})^k \ind (\clA^\pi_n \cap \clB^\pi_n) \leq c_0^{k} (\log n)^{2k}. $ As a consequence,
\begin{align*}
\bbE\left(f_0(P_{\eta})^k\,\middle|\,\overline{\clE}\right)&=\bbE\left(f_0(P_{\eta})^k \ind (\overline{\clA^\pi_n \cap \clB^\pi_n})\,\middle|\,\overline{\clE}\right)+ \bbE\left(f_0(P_{\eta})^k\ind (\clA^\pi_n \cap \clB^\pi_n),\middle|\,\overline{\clE}\right)
\\ &\leq
\frac{\bbE((\# \eta)^k \ind (\overline{\clA^\pi_n \cap \clB^\pi_n}) \ind (\overline{\clE}))}{\bbP(\overline{\clE})}+ C\,(\log n)^{2k}.
\\ &\leq
\frac{\left(\bbE(\# \eta)^{2k}\right)^{\frac12} \bbP (\overline{\clA^\pi_n \cap \clB^\pi_n})^{\frac 12}}{\bbP(\overline{\clE})}+ C\,(\log n)^{2k},
\end{align*}
by H\"older's inequality and where $C>0$ is some constant. Because for $m \geq 1$, $\bbE N^{m} \leq m^m (n^m+1)$ for a Poisson random variable $N$ with mean $np$,
\[
\left(\bbE(\# \eta)^{2k}\right)^{\frac12} = O(n^{k}),
\]
and from Corollary~\ref{cor:complcE} and Lemma~\ref{le:floating-Peta} we see that
\[
\frac{\bbP(\overline{\clA^\pi_n \cap \clB^\pi_n})^{\frac 12}}{\bbP(\overline{\clE})} = O\left(n^{- 2}\right).
\]
Thus, for $k=1,2$
\begin{equation}\label{eq:3}
\bbE(f_0(P_{\eta})^k|\overline{\clE}) = O\left((\log n)^{2k}\right).
\end{equation}

In order to verify conditions~\eqref{lemm3.6.1}--\eqref{lemm3.6.3} in Lemma~\ref{lm:transference} we will use the following simple inequality from~\cite[Claim~8.3]{BR10}, which says that
\[
|\bbE(\zeta)-\bbE(\zeta|A)|\leq \left(\bbE(\zeta|A)+\bbE(\zeta|\overline{A})\right)\bbP(\overline{A}),
\]
for any non-negative random variable $\zeta$ and any event $A$.

For condition~\eqref{lemm3.6.1} we take $\zeta = \bbE f_0(P_{\eta})$ and $A=\clE$. By Lemma~\ref{lem:RemovePointsZ} we get $ \bbE (f_0(P_{\eta})|\clE) = O(\log n)$. Using this and~\eqref{eq:3}, and the estimate in Corollary~\ref{cor:complcE} for $\bbP (\overline{\clE})$, we conclude that
\begin{equation}\label{eq:4}
\left|\bbE\left(f_0(P_{\eta})\right)-\bbE\left(f_0(P_{\eta})|\clE\right)\right| = O\left(n^{-\frac 14}(\log n)^2\right),
\end{equation}
and, thus,
\begin{equation}\label{eq:EE-Poiss}
\bbE f_0(P_{\eta}) = \frac{2 \ell}{3}\log n + \frac 23 \sum_{i=1}^\ell \log F_i + \frac{2 \gamma \ell }3 + O\left(n^{-\frac 14}(\log n)^2\right).
\end{equation}
In the same way, for condition~\eqref{lemm3.6.2} we take $\zeta =\bbE f_0(P_{\eta})^2$ and $A=\clE$. Then
\begin{multline*}
\left|\var(f_0(P_{\eta}))-\var(f_0(P_{\eta})|\clE)\right|\\
\leq \left|\bbE f_0(P_{\eta})^2-\bbE\left(f_0(P_{\eta})^2|\clE\right)\right| +\left|\left(\bbE f_0(P_{\eta})\right)^2-\left(\bbE(f_0(P_{\eta})|\clE)\right)^2\right|.
\end{multline*}
For the first term we get from Lemma~\ref{lem:RemovePointsZ} the bound
\[
\bbE\left(f_0(P_{\eta})^2|\clE\right) =
\var (f_0(P_{\eta})|\clE) + (\bbE (f_0(P_{\eta})|\clE))^2 = O\left((\log n)^2\right),
\]
and combine this with~\eqref{eq:3} and Corollary~\ref{cor:complcE}, in order to obtain
\begin{align*}
\left|\bbE f_0(P_{\eta})^2-\bbE\left(f_0(P_{\eta})^2|\clE\right)\right|&\leq \left(\bbE(f_0(P_{\eta})^2|\clE\right)+\bbE\left(f_0(P_{\eta})^2\,\middle|\,\overline{\clE})\right)\bbP(\overline{\clE})\\
& = O\left(n^{-\frac 14}(\log n)^4\right).
\end{align*}
For the second term we note that again by Lemma~\ref{lem:RemovePointsZ} and~\eqref{eq:4} we get
\begin{align*}
\left|\left(\bbE f_0(P_{\eta})\right)^2-\left(\bbE(f_0(P_{\eta})|\clE)\right)^2\right| & = O\left(n^{-\frac 14} (\log n)^3\right).
\end{align*}
Putting these estimates together we conclude that
\[
\left|\var(f_0(P_{\eta}))-\var(f_0(P_{\eta})|\clE)\right| = O\left(n^{-\frac 14} (\log n)^4\right),
\]
implying that
\begin{equation}\label{eq:Var-Poiss}
\var(f_0(P_{\eta})) = \frac{10 \ell}{27}\log n +\frac{10}{27}\sum_{i=1}^\ell \log F_i +
\frac{\left(10 \gamma -2 \pi^2\right)\ell }{27} + O\left(n^{-\frac 14} (\log n)^4\right).
\end{equation}
This shows that $\varepsilon_2(n)=c_2n^{-\frac 14}(\log n)^3$, where $c_2>0$ is some absolute constant. Analogously, by~\eqref{eq:4} we set $\varepsilon_1(n)=c_1n^{-\frac 14}(\log n)^{\frac 32}$, where $c_1>0$ is some absolute constant.


Finally, taking $\zeta = \ind (f_0(P_{\eta})\leq x)$ and $A=\clE$ we obtain from Corollary~\ref{cor:complcE} that
\[
\left|\bbP(f_0(P_{\eta}|\clE)\leq x)-\bbP(f_0(P_{\eta})\leq x)\right|\leq 2\bbP(\overline{\clE}) = O\left(n^{-\frac 14}\right).
\]
This completes the argument.
\end{proof}

By Lemma~\ref{lm:transference} and Lemma~\ref{lm:condition} together with Lemma~\ref{lem:RemovePointsZ} we conclude the required Berry--Esseen bound for the Poisson model of random polygons. Expectation and variance have been obtained in~\eqref{eq:EE-Poiss} and~\eqref{eq:Var-Poiss}.


\begin{theo}\label{thm:BerryEsseenPoisson}
Consider the Poisson random polygon $P_\eta$ induced by a homogeneous Poisson point process $\eta$ in a polygon $P$ of unit area with $\bbE(\# \eta)=n$. Then, for any $n\geq 2$,
\[
\sup_{x\,\in\,\bbR}\left|\bbP\left(\frac{f_0(P_{\eta})-\bbE f_0(P_{\eta})}{\sqrt{\var f_0(P_{\eta})}}\ge x\right)-\Phi(x)\right|\leq \frac{c}{\sqrt{\log n}}
\]
for some constant $c>0$ independent of $n$, with
\begin{align*}
\bbE f_0(P_{\eta}) &= \frac{2 \ell}{3}\log n + \frac 23 \sum_{i=1}^\ell \log F_i + \frac{2 \gamma \ell }3 + O\left(n^{-\frac 14}(\log n)^2\right)
\intertext{and}
\var(f_0(P_{\eta})) &= \frac{10 \ell}{27}\log n +\frac{10}{27}\sum_{i=1}^\ell \log F_i +
\frac{\left(10 \gamma -2 \pi^2\right)\ell }{27} + O\left(n^{-\frac 14} (\log n)^4\right).
\end{align*}
\end{theo}

\subsection{Step 6: Going back to uniform model}\label{sec:dePoisson}

This is the last step in the proof of Theorem~\ref{thm:main}, in which we apply Lemma~\ref{lm:transference} with $\xi'_n=f_0(P_{\eta})$ and $\xi_n=f_0(P_n)$. Condition~\eqref{lemm3.6.4} there holds due to Theorem~\ref{thm:BerryEsseenPoisson} with $\varepsilon_4(n)=c_4/\sqrt{\log n}$ for some $c_4>0$. Condition~\eqref{lemm3.6.1} with $\varepsilon_1(n)=c_1/\sqrt{\log n}$, $c_1>0$, follows from~\eqref{eq:expPolUnif} and Theorem~\ref{thm:BerryEsseenPoisson}.

\begin{proof}
In order to check condition~\eqref{lemm3.6.2} we need a more precise asymptotics for the variance of $f_0(P_n)$ compared to the one given by~\eqref{eq:varPolUnif}. In particular we need a result of the form $\var(f_0(P_{n}))=\frac{10 \ell}{27} \log n +O(1)$. First note that by the trivial estimate $f_0(P_\eta) \leq \# \eta$ and using Lemma~\ref{le:floating-Peta} we have
\begin{equation}\label{eq:diff-expP_eta-cA}
\begin{split}
\left| \bbE \left(f_0(P_\eta)^k \ind(\clA^\pi_n)\right) - \bbE f_0(P_\eta)^k \right| &= \left| \bbE \left(f_0(P_\eta)^k (\ind (\clA^\pi_n) -1)\right) \right|\\ 
&\leq \left(\bbE \left(f_0(P_\eta)^{2k}\right)^{\frac 12} (\bbE (\ind (\clA^\pi_n) -1)^2\right)^{\frac 12}
\\ & = O\left(n^k \, \bbP(\overline{\clA^\pi_n})^{\frac 12}\right) = O\left(n^{k-3}\right)
\end{split}
\end{equation}
for $k=1,2$, say. The definition of the variance gives
\begin{multline*}
\left| \var \left(f_0(P_\eta)  \ind (\clA^\pi_n)\right) - \var f_0(P_\eta) \right|
\\  \leq \left| \bbE \left(f_0(P_\eta)^2 \ind(\clA^\pi_n)\right) - \bbE f_0(P_\eta)^2 \right| + \left|\big(\bbE (f_0(P_\eta) \ind(\clA^\pi_n))\big)^2 - (\bbE f_0(P_\eta))^2 \right|.
\end{multline*}
We use now~\eqref{eq:diff-expP_eta-cA} to bound the first term by $n^{-1}$. Theorem~\ref{thm:BerryEsseenPoisson} and~\eqref{eq:diff-expP_eta-cA} show, that
\begin{multline*}
\left| \big(\bbE (f_0(P_\eta) \ind(\clA^\pi_n))\big)^2  - (\bbE f_0(P_\eta))^2 \right|\\
 = \left| \bbE (f_0(P_\eta) \ind(\clA^\pi_n)) - \bbE f_0(P_\eta) \right|\, \left| \bbE (f_0(P_\eta) \ind(\clA^\pi_n)) + \bbE f_0(P_\eta) \right|
\end{multline*}
is bounded (up to multiplicative constant) by $n^{-2} \log n = O(n^{-1})$. Hence
\begin{align*}
\left| \var (f_0(P_\eta) \ind(\clA^\pi_n)) - \var f_0(P_\eta) \right| & = O(n^{-1}).
\end{align*}
The same result holds for $ \var (f_0(P_n) \ind(\clA_n)) - \var f_0(P_n) $ with an actually slightly simpler proof because $f_0(P_n) \leq n$ in which $n$ is non-random. These estimates show that

\begin{multline*}
\left| \var f_0(P_\eta)  - \var f_0(P_n)\right|\\
\begin{aligned}
&\leq  \big| \var (f_0(P_\eta) \ind(\clA^\pi_n)) - \var (f_0(P_n) \ind(\clA_n)) \big| + O(n^{-1})\\
&\leq \left| \bbE (f_0(P_\eta) \ind(\clA^\pi_n))^2 - \bbE (f_0(P_n) \ind(\clA_n))^2 \right|\\
& \quad + \big| \bbE (f_0(P_\eta) \ind(\clA^\pi_n)) - \bbE (f_0(P_n) \ind(\clA_n)) \big| \left(\bbE f_0(P_\eta) + \bbE f_0(P_n) \right) + O(n^{-1}).
\end{aligned}
\end{multline*}
For $P_\eta$ we have that $\# \eta \cap P(v< b_0 n^{-1} \log n)$ is Poisson distributed with mean
\[
np := n\, {\area}\left(P(v< b_0 n^{-1} \log n)\right) = O\left((\log n)^2\right),
\]
by~\eqref{eq:wetpart}, and for $P_n$ the number of points in $P(v< b_0 n^{-1} \log n)$ is binomial distributed with mean $p$. Denote by $E_m$ the event that precisely $m$ points of the Poisson or binomial process are in $P(v< b_0 n^{-1} \log n)$. Coupling both processes in the canonical way yields
\begin{multline*}
\big| \bbE (f_0(P_\eta) \ind(\clA^\pi_n))^k - \bbE (f_0(P_n) \ind(\clA_n))^k\big|
\\
\begin{aligned}
& =\sum_{m=0}^\infty \bbE \left(f_0(P_n) \ind(\clA_n))^k | E_m\right) \left|\frac {(np)^m}{m!} e^{-np} - \binom{n}{m} p^m (1-p)^{n-m} \right|
\\ & \leq
\sum_{m=0}^\infty m^k \left|\frac {(np)^m}{m!} e^{-np} - \binom{n}{m} p^m (1-p)^{n-m} \right|.
\end{aligned}
\end{multline*}
Lemma~\ref{le:diff-Poisson-binom} and the fact that the expected number of vertices in both models is of order $\log n$ implies
\begin{gather*}
\left| \var f_0(P_\eta) - \var f_0(P_n) \right|  = O\left(n^{-1} (\log n)^5\right).
\\
\intertext{Thus,}
\var f_0(P_n) = \frac{10 \ell}{27}\log n +\frac{10}{27}\sum_{i=1}^\ell \log F_i +\frac{\left(10 \gamma -2 \pi^2\right)\ell }{27} + O\left(n^{-\frac 14} (\log n)^4\right),
\end{gather*}
which proves the variance expansion in Theorem~\eqref{thm:main} and shows that condition~\eqref{lemm3.6.2} holds with $\varepsilon_2(n)=c_2 n^{-1} (\log n)^4$ for some $c_2>0$ independent of $n$.

The slightly better estimate for the expectation in Theorem~\ref{thm:main} (in comparison to~\eqref{eq:expPolUnif}) follows analogously from Theorem~\ref{thm:BerryEsseenPoisson} and the estimate
\begin{align*}
| \bbE f_0(P_\eta) - \bbE f_0(P_n) |
\leq & \big| \bbE (f_0(P_\eta) \ind(\clA^\pi_n)) - \bbE (f_0(P_n) \ind(\clA_n)) \big| + O(n^{-1})
\\ & = O\left(n^{-1}(\log n)^4\right).
\end{align*}


Condition~\eqref{lemm3.6.3} can be verified following the same approach, which was already used in Lemma~\ref{lm:BerryEssenPoissonChain}. By Lemma~\ref{le:floating-Pn}, Lemma~\ref{le:floating-Peta} and Lemma~\ref{le:diff-Poisson-binom},
\begin{align*}
\big|\bbP(f_0(P_n)\leq x)- & \bbP(f_0(P_{\eta})\leq x)\big|
\\ &= \big|\bbP\left(f_0(P_n)\leq x, \clA_n\right)-\bbP\left(f_0(P_{\eta})\leq x, \clA_n^\pi\right)\big| + O(n^{-6})
\\ & \leq
\sum_{m=0}^\infty \left| \frac{(np)^m}{m!} e^{- np} - \binom{n}{m} p^m (1-p)^{n-m} \right| + O(n^{-6})
\\ & \leq 2p + O(n^{-6})\\
& = O\left(n^{-1} (\log n)^2\right).
\end{align*}
Thus, \eqref{lemm3.6.2} holds with $\varepsilon_3(n)=c_3 n^{-1} (\log n)^2$ for some constant $c_3>0$ independent of $n$. Finally, by combining all estimates and using Lemma~\ref{lm:transference} we complete the proof of Theorem~\ref{thm:main}.
\end{proof}


\subsection*{Acknowledgements}
We would like to thank Sam Johnston (Bath) for an enlightening discussion on the content of Lemma~\ref{lm:GlueBerryEsseen}. Moreover, we thank two anonymous referees for careful reading and for stimulating comments.


\bibliography{gusakova}
\end{document}
