%~Mouliné par MaN_auto v.0.31.0 2024-02-12 14:10:12
\documentclass[CRMATH, Unicode, XML]{cedram}

\TopicFR{\'Equations aux dérivées partielles, Théorie du contrôle}
\TopicEN{Partial differential equations, Control theory}

\usepackage{amssymb}
%\usepackage{ulem}%TO DELETE

\newcommand{\RR}{{\mathbb R}}
\newcommand{\R}{{\mathcal R}}
\newcommand{\Lcal}{{\mathcal L}}
\newcommand{\Kcal}{{\mathcal K}}
\newcommand{\dx}{\hspace{0.5mm}\mathrm{d}x}
\newcommand{\ds}{\hspace{0.5mm}\mathrm{d}s}

\newcommand{\defeq}{\coloneqq}

\makeatletter
\def\@setafterauthor{%
  \vglue3mm%
%  \hspace*{0pt}%
\begingroup\hsize=12cm\advance\hsize\abstractmarginL\raggedright
\noindent
%\hspace*{\abstractmarginL}\begin{minipage}[t]{10cm}
   \leftskip\abstractmarginL
  \normalfont\Small
  \@afterauthor\par
\endgroup
\vskip2pt plus 3pt minus 1pt
}

%Better widebar
\let\save@mathaccent\mathaccent
\newcommand*\if@single[3]{%
  \setbox0\hbox{${\mathaccent"0362{#1}}^H$}%
  \setbox2\hbox{${\mathaccent"0362{\kern0pt#1}}^H$}%
  \ifdim\ht0=\ht2 #3\else #2\fi
  }
%The bar will be moved to the right by a half of \macc@kerna, which is computed by amsmath:
\newcommand*\rel@kern[1]{\kern#1\dimexpr\macc@kerna}
%If there's a superscript following the bar, then no negative kern may follow the bar;
%an additional {} makes sure that the superscript is high enough in this case:
\newcommand*\widebar[1]{\@ifnextchar^{{\wide@bar{#1}{0}}}{\wide@bar{#1}{1}}}
%Use a separate algorithm for single symbols:
\newcommand*\wide@bar[2]{\if@single{#1}{\wide@bar@{#1}{#2}{1}}{\wide@bar@{#1}{#2}{2}}}
\newcommand*\wide@bar@[3]{%
  \begingroup
  \def\mathaccent##1##2{%
%Enable nesting of accents:
    \let\mathaccent\save@mathaccent
%If there's more than a single symbol, use the first character instead (see below):
    \if#32 \let\macc@nucleus\first@char \fi
%Determine the italic correction:
    \setbox\z@\hbox{$\macc@style{\macc@nucleus}_{}$}%
    \setbox\tw@\hbox{$\macc@style{\macc@nucleus}{}_{}$}%
    \dimen@\wd\tw@
    \advance\dimen@-\wd\z@
%Now \dimen@ is the italic correction of the symbol.
    \divide\dimen@ 3
    \@tempdima\wd\tw@
    \advance\@tempdima-\scriptspace
%Now \@tempdima is the width of the symbol.
    \divide\@tempdima 10
    \advance\dimen@-\@tempdima
%Now \dimen@ = (italic correction / 3) - (Breite / 10)
    \ifdim\dimen@>\z@ \dimen@0pt\fi
%The bar will be shortened in the case \dimen@<0 !
    \rel@kern{0.6}\kern-\dimen@
    \if#31
      \overline{\rel@kern{-0.6}\kern\dimen@\macc@nucleus\rel@kern{0.4}\kern\dimen@}%
      \advance\dimen@0.4\dimexpr\macc@kerna
%Place the combined final kern (-\dimen@) if it is >0 or if a superscript follows:
      \let\final@kern#2%
      \ifdim\dimen@<\z@ \let\final@kern1\fi
      \if\final@kern1 \kern-\dimen@\fi
    \else
      \overline{\rel@kern{-0.6}\kern\dimen@#1}%
    \fi
  }%
  \macc@depth\@ne
  \let\math@bgroup\@empty \let\math@egroup\macc@set@skewchar
  \mathsurround\z@ \frozen@everymath{\mathgroup\macc@group\relax}%
  \macc@set@skewchar\relax
  \let\mathaccentV\macc@nested@a
%The following initialises \macc@kerna and calls \mathaccent:
  \if#31
    \macc@nested@a\relax111{#1}%
  \else
%If the argument consists of more than one symbol, and if the first token is
%a letter, use that letter for the computations:
    \def\gobble@till@marker##1\endmarker{}%
    \futurelet\first@char\gobble@till@marker#1\endmarker
    \ifcat\noexpand\first@char A\else
      \def\first@char{}%
    \fi
    \macc@nested@a\relax111{\first@char}%
  \fi
  \endgroup
}
\makeatother

\let\oldbar\bar
\renewcommand*{\bar}[1]{{\mathchoice{\widebar{#1}}{\widebar{#1}}{\widebar{#1}}{\oldbar{#1}}}}

\graphicspath{{./figures/}}

\newcommand*{\mk}{\mkern -1mu}
\newcommand*{\Mk}{\mkern -2mu}
\newcommand*{\mK}{\mkern 1mu}
\newcommand*{\MK}{\mkern 2mu}

\hypersetup{urlcolor=purple, linkcolor=blue, citecolor=red}

\newcommand*{\relabel}{\renewcommand{\labelenumi}{(\theenumi)}}
\newcommand*{\romanenumi}{\renewcommand*{\theenumi}{\roman{enumi}}\relabel}
\newcommand*{\Romanenumi}{\renewcommand*{\theenumi}{\Roman{enumi}}\relabel}
\newcommand*{\alphenumi}{\renewcommand*{\theenumi}{\alph{enumi}}\relabel}
\newcommand*{\Alphenumi}{\renewcommand*{\theenumi}{\Alph{enumi}}\relabel}
\let\oldtilde\tilde
\renewcommand*{\tilde}[1]{\mathchoice{\widetilde{#1}}{\widetilde{#1}}{\oldtilde{#1}}{\oldtilde{#1}}}
\let\oldhat\hat
\renewcommand*{\hat}[1]{\mathchoice{\widehat{#1}}{\widehat{#1}}{\oldhat{#1}}{\oldhat{#1}}}
\let\oldforall\forall
\renewcommand*{\forall}{\mathrel{\oldforall}}


\title{A general existence theorem for a multi-valued control problem}

\author{\firstname{Behrouz} \lastname{Emamizadeh}}
\address{Department of Mathematical Sciences, University of Nottingham Ningbo China, Ningbo, China}
\address{Institute for Research in Fundamental Sciences (IPM), Tehran, Iran}
\email{behrouz.emamizadeh@nottingham.edu.cn}
\email{behrouz.emamizadeh@ipm.ir}

\author{\firstname{Yichen} \lastname{Liu}\IsCorresp}
\address{Department of Applied Mathematics, School of Mathematics and Physics, Xi'an Jiaotong-Liverpool University, Suzhou, China}
\email{yichen.liu01@xjtlu.edu.cn}

\author{\firstname{Mohsen} \lastname{Zivari-Rezapour}}
\address{Department of Mathematics, Faculty of Mathematical Sciences and Computer, Shahid Chamran University of Ahvaz, Ahvaz, Iran}
\email{mzivari@scu.ac.ir}

\thanks{Y. Liu is supported by the Natural Science Foundation of Jiangsu Province (grant number: BK20200249). M. Zivari-Rezapour is grateful to the Research Council of Shahid Chamran University of Ahvaz for the research grant (SCU.MM1402.441).}

\CDRGrant[Natural Science Foundation of Jiangsu Province]{BK20200249}
\CDRGrant[Research Council of Shahid Chamran University of Ahvaz]{SCU.MM1402.441}

\begin{abstract}
In this paper, we propose a general existence theorem for a multi-valued control problem. The proof of the theorem is based on a decomposition result of the weak$^\star$ closure of the set containing all the multi-valued controls and the bathtub principle. We also obtain the optimality condition for the optimal control.
\end{abstract}

\keywords{\kwd{multi-valued control}
\kwd{existence}
\kwd{uniqueness}
\kwd{decomposition}
\kwd{optimality condition}}
\subjclass{49J20, 49K20, 35J20}

\begin{document}
\maketitle

\section{Introduction} In this paper, we consider optimal control problems in the following form
\begin{equation}\label{min1}
\inf_{g\in A_n}\Psi(g),
\end{equation}
where $\Psi$ is a (nonlinear) energy functional which is related to a state equation (usually a differential equation), and $A_n$ is an admissible set comprising $n$-valued control functions.

Let us describe the problem. To begin with, we let $D$ be a bounded domain in $\mathbb{R}^N$ throughout this article. The set of admissible functions is given by
\begin{equation}\label{eqn7}
A_n=\left\{\sum_{k=1}^n c_k\chi_{E_k}: \; E_k\subseteq D, \{E_k\}\;\text{are mutually disjoint}, |E_k|=\alpha_k\right\},
\end{equation}
where $c_k$ are fixed non-negative constants different from each other. The notation $\chi_{E_k}$ indicates the characteristic function of $E_k$, and $\alpha_k$ are prescribed strictly positive constants, satisfying $\sum_{k=1}^n\alpha_k= |D|$. Here, $|\cdot|$ denotes the Lebesgue measure in $\mathbb{R}^N$. In this paper, we shall present a general existence theorem that can be applied to a vast class of control problems, including the minimization problem~\eqref{min1}. Before that, let us briefly review the related literature.

When $n=2$, the problem~\eqref{min1} has been widely discussed in many papers, see for example~\cite{CM90,HM01}. It is well known that the weak$^\star$ closure of $A_2$ in $L^\infty(D)$, identified as $(L^1(D))^\star$, is the following set
\[
\bar{A}:=\left\{g\in L^\infty(D):\min\{c_1,c_2\}\le g(x) \le \max\{c_1,c_2\},\,\int_{D}g(x)\dx=c_1\alpha_1+c_2\alpha_2\right\},
\]
see for example Proposition~2.4 in~\cite{CM90}. With this notation, we consider a relaxed version of the problem~\eqref{min1}
\begin{equation}\label{eqn8}
\inf_{g\in \bar{A}}\Psi(g).
\end{equation}
The minimization problem~\eqref{eqn8} has been investigated by many authors before, see for example~\cite{HM01,KS08}. On the other hand, see~\cite{CGIKO00,HP18,Pir84} for discussing shape optimization problems related to~\eqref{min1}.

Let us now return to the general case. We set $M=\max_{1\le k\le n}c_k$, $m=\min_{1\le k\le n}c_k$, and
\[
A_{m,M}:=\left\{g\in L^\infty (D):m\le g(x)\le M,\,\int_{D}g\dx=\sum_{k=1}^{n}c_k\alpha_k\right\}.
\]
Note that when $n=2$, we have $\bar{A_n}=A_{m,M}$, where $\bar{A_n}$ denotes the weak$^\star$ closure of $A_n$ in $L^\infty$, but this property ceases to be true when $n\ge 3$ (see Lemma~\ref{three} hereafter). The main result is as follows:

\begin{theo*}\label{existence1}
Let $\Psi:A_{m,M}\to\mathbb{R}$ be weak$^\star$ lower semicontinuous in $L^\infty(D)$, and strictly convex. Suppose for any $g_i\in A_{m,M}$, $i=1,2$, the following formula holds:
\begin{equation}\label{psi_derivative}
\lim_{t\to 0^+}\frac{\Psi(g_1+t(g_2-g_1))-\Psi(g_1)}{t}=\int_D(g_2-g_1)F(g_1)\dx.
\end{equation}
where $F:A_{m,M}\to L^1(D)$ is an operator. Suppose the following condition holds:
\begin{equation}\label{C1}
\tag{C1}
\text{for each $g\in A_{m,M}$, every level set of $F(g)$ has measure zero on $S(g)$,}
\end{equation}
where $S(g):=\{x\in D:g(x)>0\}$. Then, the minimization problem~\eqref{min1} has a unique solution $\hat{g}\in A_n$. Moreover, by rearranging $\{c_k\}$ in an increasing order, there exist $\gamma_1>\gamma_2>\dots>\gamma_{n-1}$ such that $\hat{g}=\sum_{k=1}^{n}c_k\chi_{\hat{E}_k}$, where
$\hat{E}_1=\{x\in D:F(\hat{g})(x)\ge \gamma_1\}$, $\hat{E}_n=\{x\in D:F(\hat{g})(x)< \gamma_{n-1}\}$, and
\[
\hat{E}_k=\{x\in D:\gamma_{k}\le F(\hat{g})(x)<\gamma_{k-1}\}\,\text{ for all } \,2\le k\le n-1.
\]
\end{theo*}

\begin{rema}
The hypothesis~\eqref{psi_derivative} is a ``restricted'' version of G\^ateaux differentiation of $\Psi$ at $g_1$. This differentiation only requires the limits to be valid in the directions $g_2-g_1$ with $g_2\in A_{m,M}$, so it is a weaker assumption compared with the G\^ateaux one. In some applications, the G\^ateaux derivative may not exist, but this version of derivative may exist which suffices for our purpose here. Moreover, in the proof of the Theorem, the hypothesis~\eqref{psi_derivative} will help us to reduce the nonlinear minimization problem~\eqref{min1} to a linear minimization problem which is easier to handle.
\end{rema}

\begin{rema}
In many cases, the condition \eqref{C1} is satisfied easily from the regularity of the solution of the partial differential equation which gives rise to the function $\Psi$, see Section~\ref{sec4}, where an example of this situation is presented.
\end{rema}

In the proof of the Theorem, we will use a decomposition result of the weak$^\star$ closure of the admissible set $A_n$ from~\cite{ZLE} and the bathtub principle, see~\cite{LL01}. In the last section of the paper, we will revisit a known example and demonstrate to which we apply the Theorem. More applications of our main result are cited in Remark~\ref{rem2}.

A surprising consequence of the Theorem is that the multi-valued control problem in~\cite{EL20}, where the authors had the impression that the most efficient way to prove the existence and uniqueness of the optimal solution was derived from the classical rearrangement optimization theory, now can be obtained by multiple applications of the bathtub principle.

\subsection*{Structure of the paper}

In Section~\ref{sec2}, we recall some background knowledge about the rearrangement of functions~\cite{Bur87} and the bathtub principle~\cite{LL01}. In addition, we will state an important decomposition lemma obtained recently by the authors, see~\cite{ZLE}. Section~\ref{sec3} is devoted to the proof of the Theorem. The final section contains applications of the Theorem, in which the partial differential equation is the Dirichlet Poisson problem; however, other differential operators can replace the Laplacian, for example the $p$-Laplacian operator.

\section{Preliminaries}\label{sec2}
We begin this section with the following

\begin{defi}\label{basicrearrange1}
Suppose $g_0,g:D\to\RR$ are two measurable functions. We say $g_0$ is a rearrangement of $g$ if and only if
\[
\lambda_{g_0}(\alpha)\equiv\left|\left\{x\in D:g_0(x)\ge \alpha\right\}\right|=\left|\left\{x\in D:g(x)\ge \alpha\right\}\right|\equiv\lambda_g(\alpha),\quad\forall\,\alpha\in\RR.
\]
\end{defi}

\begin{defi}\label{basicrearrange2}
Let $g$ be as in Definition~\ref{basicrearrange1}. The rearrangement class generated by $g$ on $D$, denoted $\R(g)$, is defined by
\[
\R(g)=\left\{h:D\to\RR\;\text{measurable}:h\;\text{is a rearrangement of}\; g\right\}.
\]
Moreover, the decreasing rearrangement of $g$ on $(0,|D|)$ is defined by $g^\Delta(s)=\max{\left\{\alpha:\lambda_g(\alpha)\ge s\right\}}$.
\end{defi}

For $E\subseteq L^\infty(D)$, we will denote the closure of $E$ with respect to the weak$^\star$ topology $\sigma(L^\infty,L^1)$ by $\bar{E}$. It is well known that $\bar{\R(g)}$ is convex, and weak$^\star$ (sequentially) compact, see Theorem~6 and Lemma~6 in~\cite{Bur87}.

Next, we present the following key decomposition lemma from~\cite{ZLE} which we include its proof for the convenience of the readers.

\begin{lemm}\label{three}
Let $A_n$ be defined as in~\eqref{eqn7} and suppose $\{c_i\}$ is strictly increasing. Then, the following equation holds
\begin{equation}\label{eqnd3}
\bar{A_n}=\sum_{i=1}^{n}\Kcal_i
\end{equation}
where
\begin{equation*}
\Kcal_1=\left\{c_1\right\}\quad\text{and}\quad \Kcal_i=\left\{f\in L^\infty(D):0\le f\le c_i-c_{i-1},\,\int_Df\dx=(c_i-c_{i-1})\sum_{k=i}^{n}\alpha_k\right\},
\end{equation*}
for all $i=2,\dots,n$. Here, $c_1$ denotes the constant function.
\end{lemm}
\begin{proof}
By setting $c_0=0$, we define
\begin{equation*}
f_i=(c_i-c_{i-1})\chi_{\bigcup_{k=i}^nE_k},
\end{equation*}
$i=1,2,\dots,n$. We then set
\begin{equation}\label{eqnd2}
f=\sum_{i=1}^{n}f_i.
\end{equation}
Clearly, $A_n=\R(f)$. On the other hand, for almost every $s\in(0,|D|)$, we have
\[
f_i^\vartriangle(s)=(c_{i}-c_{i-1})\chi_{(0,\sum_{k=i}^{n}\alpha_{k})}(s),
\]
$i=1,2,\ldots,n$. For $i,j\in\{1,2,\ldots,n\}$ with $i\leq j$, then we deduce, by direct computations, that
\begin{equation}\label{eqq}
\int_D f_i(x)f_j(x)\dx=(c_{i}-c_{i-1})(c_{j}-c_{j-1})\sum_{k=j}^{n}\alpha_{k}=\int_0^{|D|}f_i^\vartriangle(s)f_j^\vartriangle(s)\ds.
\end{equation}
From~\eqref{eqnd2}, \eqref{eqq} and Theorem~3 in~\cite{AC22}, we infer that
\[
\bar{A_n}=\bar{\R(f)}=\bar{\R\left(\sum_{i=1}^n f_i\right)}=\sum_{i=1}^{n}\bar{\R(f_i)}.
\]
Moreover, by Proposition~2.4 in~\cite{CM90}, we have
\[
\Kcal_i:=\bar{\R(f_i)}=\left\{g\in L^\infty(D):0\leq g\leq c_{i}-c_{i-1},\,\int_D g\dx=(c_{i}-c_{i-1})\sum_{k=i}^n\alpha_{k}\right\},
\]
$i=1,2,\dots,n$. Notice that $\R(f_1)=\{f_1\}=\{c_1\}$, hence we infer $\Kcal_1=\bar{\R(f_{1})}=\left\{c_1\right\}$. Therefore, the decomposition~\eqref{eqnd3} follows.
\end{proof}

We will use the following version of the bathtub principle to prove the Theorem:

\begin{prop}[Bathtub principle]\label{bathtub_principle}
Let $f$ be a real-valued, measurable function on $D$, and $\beta$ be a positive constant. Set
\[
\tilde{A}=\left\{g\in L^\infty(D):0\le g(x)\le \beta,\,\int_{D}g(x)\dx=\gamma\right\},
\]
where $0< \gamma <\beta|D|$. Then, the minimization problem
\begin{equation}\label{eqnadd1}
I=\inf_{g\in \tilde{A}}\int_D f(x)g(x)\dx
\end{equation}
is solved by
\begin{equation*}
g(x)=\beta\chi_{\left\{f<s\right\}}(x)+c\chi_{\left\{f=s\right\}}(x)
\end{equation*}
where
\begin{equation*}
s=\sup\left\{t:|\{x\in D:f(x)<t\}|\le \frac{\gamma}{\beta}\right\}
\end{equation*}
and
\begin{equation*}
c\left|\left\{x\in D:f(x)=s\right\}\right|=\gamma-\beta\left|\left\{x\in D:f(x)<s\right\}\right|.
\end{equation*}
The minimizer given in~\eqref{eqnadd1} is unique if $\frac{\gamma}{\beta}=\lvert\{x\in D:f(x)<s\}\rvert$ or $\frac{\gamma}{\beta}=\lvert\{x\in D:f(x)\le s\}\rvert$.
\end{prop}

\begin{proof}
By using the argument of the infimum in~\eqref{eqnadd1}, we can use a simple scaling to transform the problem~\eqref{eqnadd1} to the following one:
\begin{equation}\label{eqnadd2}
\inf_{g\in\tilde{B}}\int_{D}f(x)g(x)\dx,
\end{equation}
where $\tilde{B}:=\frac{\tilde{A}}{\beta}=\bigl\{g\in L^\infty (D):0\le g(x)\le 1,\,\int_{D}g(x)\dx=\frac{\gamma}{\beta}\bigr\}$. Then, we use Theorem~1.14 in~\cite{LL01} to solve~\eqref{eqnadd2}. By scaling back, we obtain the solution for~\eqref{eqnadd1}.
\end{proof}


\section{Proof of the Theorem}\label{sec3}

Let $g_0\in A_n$, and note that $A_n=\R(g_0)$. We next relax the minimization problem~\eqref{min1} by considering
\begin{equation}\label{min2}
\inf_{g\in\bar{\R(g_0)}}\Psi(g).
\end{equation}
As $\bar{\R(g_0)}\subseteq A_{m,M}$, see the proof of Lemma~2.3 in~\cite{LE16} and Lemma~2.3 in~\cite{Bur89}, is weak$^\star$ compact and $\Psi$ is weak$^\star$ lower semicontinuous, the problem~\eqref{min2} is solvable. The uniqueness of the solution follows from the convexity of $\bar{\R(g_0)}$ and the strict convexity of $\Psi$. Let us denote the minimizer of~\eqref{min2} by $\hat{g}$, and we show that $\hat{g}\in A_n$.

Now, rearranging $c_1,c_2,\dots,c_n$ in an increasing order and using the decomposition result of Lemma~\ref{three}, we infer
\begin{equation*}
\bar{\R(g_0)}=\bar{A_n}=\sum_{i=1}^{n}\Kcal_i,
\end{equation*}
where
\begin{equation*}
\Kcal_1=\left\{c_1\right\}\quad\text{and}\quad\Kcal_i=\left\{f\in L^\infty(D):0\le f\le c_i-c_{i-1},\,\int_Df\dx=(c_i-c_{i-1})\sum_{k=i}^{n}\alpha_k\right\},
\end{equation*}
for all $i=2,\dots,n$. Moreover, there exist $\hat{g}_i\in \Kcal_i$, $i=1,\dots,n$, such that $\hat{g}=\sum_{i=1}^{n}\hat{g}_i$. Note that $\hat{g}_1=c_1$. By setting $\Psi_i(h)=\Psi(h+\sum_{j=1,\,j\neq i}^{n}\hat{g}_j)$, we can deduce that $\hat{g}_i$ is the unique minimizer of the following problem
\begin{equation*}
\inf_{h\in\Kcal_i}\Psi_i(h),
\end{equation*}
for all $i=2,\dots,n$. Fix $i\in \{2,3,\dots,n\}$, and $h\in \Kcal_i$. Observe that $h+\sum_{j=1,\,j\neq i}^{n}\hat{g}_j\in \bar{A_n}\subseteq A_{m,M}$, we then apply the formula~\eqref{psi_derivative} to obtain
\begin{multline}\label{eqn1}
\lim_{t\to0^+}\frac{\Psi_i(\hat{g}_i+t(h-\hat{g}_i))-\Psi_i(\hat{g}_i)}{t} =\lim_{t\to0^+}\frac{\Psi(\hat{g}+t(h-\hat{g}_i))-\Psi(\hat{g})}{t}\\
=\lim_{t\to0^+}\frac{\Psi(\hat{g}+t(h+\sum_{j=1,\,j\neq i}^{n}\hat{g}_j-\hat{g}))-\Psi(\hat{g})}{t} =\int_{D}(h-\hat{g}_i)F(\hat{g})\dx.
\end{multline}
Since $\hat{g}_i$ is the minimizer and $\hat{g}_i+t(h-\hat{g}_i)\in \Kcal_i$ for $t\in(0,1)$, \eqref{eqn1} implies
\begin{equation*}
\int_{D}(h-\hat{g}_i)F(\hat{g})\dx\ge 0.
\end{equation*}
Whence, $\hat{g}_i$ minimizes the linear functional $\Lcal_i(h)=\int_{D}hF(\hat{g})\dx$ relative to $h\in \Kcal_i$.

From the condition \eqref{C1}, we know that the level sets of $F(\hat{g})$ have measure zero on $S(\hat{g})$. As $\hat{g}=\sum_{i=1}^{n}\hat{g}_{i}$ and $\hat{g}_i$ are non-negative functions, we have $S(\hat{g}_i)\subseteq S(\hat{g})$ and hence every level set of $F(\hat{g})$ has measure zero on $S(\hat{g}_i)$. Now, we apply the bathtub principle (Proposition~\ref{bathtub_principle}) to deduce that the unique minimizer of $\Lcal_{i}(h)$ is $\hat{g}_{i}=(c_{i}-c_{i-1})\chi_{\left\{F(\hat{g})<\gamma_{i-1}\right\}}$ for some constant $\gamma_{i-1}$ satisfying
\begin{equation}\label{eqn3}
\left|\left\{F(\hat{g})<\gamma_{i-1}\right\}\right| =\sum_{k=i}^{n}\alpha_{k}.
\end{equation}
On the other hand, by using~\eqref{eqn3}, we obtain the following order
\begin{equation*}
\gamma_1>\gamma_2>\dots>\gamma_{n-1}.
\end{equation*}
Moreover, we have
\begin{equation*}
\hat{g}=\sum_{i=1}^{n}\hat{g}_{i}=c_1 +
\sum_{i=2}^{n}(c_{i}-c_{i-1})\chi_{\left\{F(\hat{g})<\gamma_{i-1}\right\}}=\sum_{k=1}^{n}c_k\chi_{\hat{E}_k},
\end{equation*}
where
\[
\hat{E}_1=\{x\in D:F(\hat{g})(x)\ge \gamma_1\},\quad\hat{E}_n=\{x\in D:F(\hat{g})(x)< \gamma_{n-1}\},
\]
and,
\[
\hat{E}_k=\{x\in D:\gamma_{k}\le F(\hat{g})(x)<\gamma_{k-1}\}\quad \text{for all } 2\le k\le n-1.
\]
This completes the proof of the theorem.

\begin{rema}
We would like to mention that the Theorem can be proved by using technical lemmas from~\cite{Bur89,LE17}, see Section~4 in~\cite{EL20}. However, the proof we provided here only applies the bathtub principle and a decomposition result of Lemma~\ref{three}, which together make the proof drastically simpler.
\end{rema}


\section{Applications}\label{sec4}

Consider the boundary value problem
\begin{equation}\label{bvp1}
\begin{cases}
-\Delta u=g(x), &\text{in } D\\
u=0, & \text{on } \partial D,
\end{cases}
\end{equation}
where $D$ is a bounded smooth ($C^{1,1}$ is enough) domain in $\mathbb{R}^{N}$. We can apply the Theorem to prove the existence and uniqueness of the optimal solution of the following multi-valued control problem
\begin{equation}\label{min3}
\inf_{g\in A_n}J(g):=\int_{D}gu_g\dx,
\end{equation}
where $u_g\in H_0^1(D)$ denotes the unique solution of~\eqref{bvp1} with the right hand side control $g$. Indeed, we have the following

\begin{prop}\label{prop1}
The minimization problem~\eqref{min3} is uniquely solvable. Moreover, by denoting the minimizer by $\hat{g}$, there exist $\gamma_1>\gamma_2>\dots>\gamma_{n-1}$ such that $\hat{g}=\sum_{k=1}^{n}c_k\chi_{\hat{E}_k}$, where $\hat{E}_1=\{x\in D:u_{\hat{g}}(x)\ge \gamma_1\}$, $\hat{E}_n=\{x\in D:u_{\hat{g}}(x)< \gamma_{n-1}\}$, and $\hat{E}_k=\{x\in D:\gamma_{k}\le u_{\hat{g}}(x)<\gamma_{k-1}\}$ for all $2\le k\le n-1.$
\end{prop}

Although the above result has been shown in the literature, see Remark~\ref{rem1} for details, we include a sketch of its proof for the sake of completeness.

\begin{proof}[Sketch of the proof of Proposition~\ref{prop1}]
Let us examine the conditions of the Theorem:
\begin{enumerate}\alphenumi
\item $J$ is weak$^\star$ continuous: let $g_n\overset{\star}{\rightharpoonup} g$ in $L^{\infty}(D)$. To show that $J (g_n)\rightarrow J (g)$, it suffices to prove that $u_n\equiv u_{g_n}\rightarrow u\equiv u_g$ in $L^{1}(D)$. Indeed, from~\eqref{bvp1}, we find
\begin{equation*}
\int_{D}|\nabla u_n|^{2}\dx=\int_{D}g_{n}u_n\dx\le \left\|g_n\right\|_2\left\|u_n\right\|_2
\le C\left\|g_n\right\|_2\left\|\nabla u_n\right\|_2,
\end{equation*}
where we have used H\"older's inequality in the first inequality, and Poincar\'e inequality in the second one. So, $\{u_n\}$ is bounded in $H_0^1(D)$. By passing to a subsequence, if necessary, we have
\begin{equation*}
u_n\rightharpoonup\bar{u} \;\text{ in } \;H_0^1(D),\quad\text{and}\quad u_n\to \bar{u}\;\text{ in }\;L^2(D).
\end{equation*}
Letting $n$ go to infinity in the following identity:
\begin{equation*}
\int_{D}\nabla u_n\cdot\nabla\varphi \dx=\int_{D}g_n\varphi \dx\quad\text{for all }\; \varphi\in C_c^\infty(D),
\end{equation*}
we deduce $\bar{u}=u$ by uniqueness. Thus, $u_n\to u$ in $L^2(D)$, which implies $u_n\to u$ in $L^1(D)$ (remembering that $D$ is bounded), as desired.
\item $J$ is strictly convex: first, by observing that
\begin{equation}\label{eqn4}
J(g)=-2\inf_{w\in H_0^1(D)}\left(\frac{1}{2}\int_D|\nabla w|^2\dx-\int_{D}gw\dx\right) =\sup_{w\in H_0^1(D)}\left(2\int_{D}gw\dx-\int_{D}|\nabla w|^2\dx\right),
\end{equation}
$J$ is convex as it is the supremum of a class of affine functions. To show $J$ is strictly convex, we argue by contradiction and suppose there exist different $g_1,g_2\in A_{m,M}$ and $t\in (0,1)$ such that $g_t=tg_1+(1-t)g_2$ and
\begin{equation*}
J(g_t)=tJ(g_1)+(1-t)J(g_2).
\end{equation*}
Rearranging terms, we find
\begin{equation*}
t\overbrace{\left\{J(g_1)-\left(2\int_{D}g_1u_{g_t}\dx-\int_{D}|\nabla u_{g_t}|^2\dx\right)\right\}}^A+ (1-t)\overbrace{\left\{J(g_2)-\left(2\int_{D}g_2u_{g_t}\dx-\int_{D}|\nabla u_{g_t}|^2\dx\right)\right\}}^B=0.
\end{equation*}
As the maximizer is uniquely attained in~\eqref{eqn4} by the solution $u_g$ corresponding to the control $g$, we have $A,B\ge0$ and so $A=B=0$ which implies $u_{g_1}=u_{g_t}=u_{g_2}$. By using~\eqref{bvp1}, we can show $g_1=g_2$ which is a contradiction.
\item $J$ satisfies the formula~\eqref{psi_derivative} with $F(g_1)=2u_{g_1}$: fix $g_1,g_2\in A_{m,M}$. Setting $g_t=g_1+t(g_2-g_1)$ with $t\in (0,1)$ and using the variational formulation~\eqref{eqn4}, we find
\begin{align}\label{eqn5}
\begin{split}
J(g_t)&=2\int_{D}g_tu_{g_t}\dx-\int_{D}|\nabla u_{g_t}|^2\dx
\ge 2\int_{D}g_tu_{g_1}\dx-\int_{D}|\nabla u_{g_1}|^2\dx\\
&=2\int_{D}g_1u_{g_1}\dx-\int_{D}|\nabla u_{g_1}|^2\dx+2t\int_{D}(g_2-g_1)u_{g_1}\dx=J(g_1)+2t\int_{D}(g_2-g_1)u_{g_1}\dx.
\end{split}
\end{align}
Similarly, we can derive
\begin{equation}\label{eqn6}
J(g_1)\ge J(g_t)-2t\int_{D}(g_2-g_1)u_{g_t}\dx.
\end{equation}
From~\eqref{eqn5} and~\eqref{eqn6}, we obtain
\begin{equation*}
2\int_{D}(g_2-g_1)u_{g_1}\dx \le\frac{J(g_t)-J(g_1)}{t}\le 2\int_{D}(g_2-g_1)u_{g_t}\dx.
\end{equation*}
Thus, $J$ satisfies the formula~\eqref{psi_derivative} with $F(g_1)=2u_{g_1}$, since $u_{g_t}\to u_{g_1}$ in $L^1(D)$ as $t\to 0^+$.
\item the condition \eqref{C1} is satisfied: fix $g\in A_{m,M}$. In order to derive a contradiction, we suppose there exists $\beta\in\mathbb{R}$ such that $|\tilde{E}|\neq 0$ with $\tilde{E}:=\{x\in D:u_g(x)=\beta\}\cap S(g)$. However, as $u_g\in H^2_{loc}(D)$, by using the differential equation in~\eqref{bvp1} and Lemma~7.7 in~\cite{GT01}, we deduce $g=0$ a.e. in $\tilde{E}$ which is clearly a contradiction.
\end{enumerate}
As all the conditions for the Theorem are verified, the assertions clearly follow.
\end{proof}

\begin{rema}\label{rem1}
The problem~\eqref{min3} has been investigated by Burton and McLeod in~\cite{BM91} in the following form
\begin{equation*}
\inf_{g\in \R(g_0)}J(g)
\end{equation*}
where $g_0\in L^\infty(D)$ is a prescribed function. Later, it has been generalized to the corresponding $p$-Laplacian problem in~\cite{EL15,LE16,Mar10}. Recently, a more generalized case is discussed in~\cite{EL20}, and the Theorem can be directly applied to solve multi-valued control problems in~\cite{EL20}.
\end{rema}

\begin{rema}\label{rem2}
The Theorem can also be applied to the extremal eigenvalue problems in~\cite{CM90,CEP09,MPV13}, and the details are left to the readers.
\end{rema}

\bibliographystyle{crplain}
\bibliography{CRMATH_Liu_20230506}
\end{document}