\documentclass[CRMATH,Unicode,biblatex,XML]{cedram}

\TopicFR{Algorithmes et outils informatiques}
\TopicEN{Algorithmic and computer tools}
\TopicFR{Analyse numérique}
\TopicEN{Numerical analysis}


\addbibresource{CRMATH_Wang_20220706.bib}

%% Insert here your own symbols, as the following ones:
\newcommand{\rank}{\mathrm{rank}}
\newcommand{\tr}{\mathrm{tr}}
\newcommand{\ETR}{\mathrm{ETR}}
\newcommand{\SOC}{\mathrm{SOC}}
\newcommand{\SOCP}{\mathrm{SOCP}}
\newcommand{\NSOCP}{\mathrm{NSOCP}}

\usepackage{algorithm}
\usepackage{amssymb}
\usepackage{mathtools}

%\renewcommand{\thealgorithm}{\arabic{algorithm}.} 


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\makeatletter
\let\save@mathaccent\mathaccent
\newcommand*\if@single[3]{%
 \setbox0\hbox{${\mathaccent"0362{#1}}^H$}%
 \setbox2\hbox{${\mathaccent"0362{\kern0pt#1}}^H$}%
 \ifdim\ht0=\ht2 #3\else #2\fi
 }
%The bar will be moved to the right by a half of \macc@kerna, which is computed by amsmath:
\newcommand*\rel@kern[1]{\kern#1\dimexpr\macc@kerna}
%If there's a superscript following the bar, then no negative kern may follow the bar;
%an additional {} makes sure that the superscript is high enough in this case:
\newcommand*\widebar[1]{\@ifnextchar^{{\wide@bar{#1}{0}}}{\wide@bar{#1}{1}}}
%Use a separate algorithm for single symbols:
\newcommand*\wide@bar[2]{\if@single{#1}{\wide@bar@{#1}{#2}{1}}{\wide@bar@{#1}{#2}{2}}}
\newcommand*\wide@bar@[3]{%
 \begingroup
 \def\mathaccent##1##2{%
%Enable nesting of accents:
 \let\mathaccent\save@mathaccent
%If there's more than a single symbol, use the first character instead (see below):
 \if#32 \let\macc@nucleus\first@char \fi
%Determine the italic correction:
 \setbox\z@\hbox{$\macc@style{\macc@nucleus}_{}$}%
 \setbox\tw@\hbox{$\macc@style{\macc@nucleus}{}_{}$}%
 \dimen@\wd\tw@
 \advance\dimen@-\wd\z@
%Now \dimen@ is the italic correction of the symbol.
 \divide\dimen@ 3
 \@tempdima\wd\tw@
 \advance\@tempdima-\scriptspace
%Now \@tempdima is the width of the symbol.
 \divide\@tempdima 10
 \advance\dimen@-\@tempdima
%Now \dimen@ = (italic correction / 3) - (Breite / 10)
 \ifdim\dimen@>\z@ \dimen@0pt\fi
%The bar will be shortened in the case \dimen@<0 !
 \rel@kern{0.6}\kern-\dimen@
 \if#31
 \overline{\rel@kern{-0.6}\kern\dimen@\macc@nucleus\rel@kern{0.4}\kern\dimen@}%
 \advance\dimen@0.4\dimexpr\macc@kerna
%Place the combined final kern (-\dimen@) if it is >0 or if a superscript follows:
 \let\final@kern#2%
 \ifdim\dimen@<\z@ \let\final@kern1\fi
 \if\final@kern1 \kern-\dimen@\fi
 \else
 \overline{\rel@kern{-0.6}\kern\dimen@#1}%
 \fi
 }%
 \macc@depth\@ne
 \let\math@bgroup\@empty \let\math@egroup\macc@set@skewchar
 \mathsurround\z@ \frozen@everymath{\mathgroup\macc@group\relax}%
 \macc@set@skewchar\relax
 \let\mathaccentV\macc@nested@a
%The following initialises \macc@kerna and calls \mathaccent:
 \if#31
 \macc@nested@a\relax111{#1}%
 \else
%If the argument consists of more than one symbol, and if the first token is
%a letter, use that letter for the computations:
 \def\gobble@till@marker##1\endmarker{}%
 \futurelet\first@char\gobble@till@marker#1\endmarker
 \ifcat\noexpand\first@char A\else
 \def\first@char{}%
 \fi
 \macc@nested@a\relax111{\first@char}%
 \fi
 \endgroup
}
\makeatother

\let\oldbar\bar
\renewcommand*{\bar}[1]{\mathchoice{\widebar{#1}}{\widebar{#1}}{\widebar{#1}}{\oldbar{#1}}}

\let\oldtilde\tilde
\renewcommand*{\tilde}[1]{\mathchoice{\widetilde{#1}}{\widetilde{#1}}{\oldtilde{#1}}{\oldtilde{#1}}}
%\let\tilde\widetilde

%\let\hat\widehat
\let\oldhat\hat
\renewcommand*{\hat}[1]{\mathchoice{\widehat{#1}}{\widehat{#1}}{\oldhat{#1}}{\oldhat{#1}}}
%\let\tilde\widetilde

\let\oldcheck\check
\renewcommand*{\check}[1]{\mathchoice{\widehat{#1}}{\widehat{#1}}{\oldcheck{#1}}{\oldcheck{#1}}}

\renewcommand*{\to}{\mathchoice{\longrightarrow}{\rightarrow}{\rightarrow}{\rightarrow}}

\let\oldmapsto\mapsto
\renewcommand*{\mapsto}{\mathchoice{\longmapsto}{\oldmapsto}{\oldmapsto}{\oldmapsto}}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand*{\mk}{\mkern -1mu}
\newcommand*{\Mk}{\mkern -2mu}
\newcommand*{\mK}{\mkern 1mu}
\newcommand*{\MK}{\mkern 2mu}

\hypersetup{urlcolor=purple, linkcolor=blue, citecolor=red}


\newcommand*{\romanenumi}{\renewcommand*{\theenumi}{\roman{enumi}}}
\newcommand*{\Romanenumi}{\renewcommand*{\theenumi}{\Roman{enumi}}}
\newcommand*{\alphenumi}{\renewcommand*{\theenumi}{\alph{enumi}}}
\newcommand*{\Alphenumi}{\renewcommand*{\theenumi}{\Alph{enumi}}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%



\title{New results on eliminating the duality gap of the second-order-cone reformulation for extended trust-region subproblem with two intersecting cuts}

\alttitle{Nouveaux résultats sur l'élimination de l'écart de dualité de la reformulation du cône du second ordre pour le sous-problème de la région de confiance étendue avec deux coupes intersectées}

\author{\firstname{Meiling} \lastname{Wang}}

\address{School of Statistics and Data Science, Beijing Wuzi University, Beijing 101149, China}

%% Support for the first author

\email{wangmeiling@bwu.edu.cn}


 \thanks{This work was supported by the National Natural Science Foundation of China (Grants No. 11871115).}

\CDRGrant[National Natural Science Foundation of China]{11871115}

\subjclass{90C20, 90C22, 90C25, 90C26, 90C30}

\keywords{\kwd{Second-order-cone reformulation} \kwd{extended trust-region subproblem} \kwd{linear inequality constraints} \kwd{duality gap} \kwd{semidefinite programming relaxation}}

\altkeywords{\kwd{Reformulation de cône de second ordre} \kwd{sous-problèmes de domaine de confiance étendu} \kwd{contraintes d'inégalité linéaire} \kwd{écart de dualité} \kwd{relaxation de planification semi-définie}}


\begin{abstract}
In this paper, we consider the nonconvex extended trust-region subproblem with two intersecting linear inequality constraints, $(\mathrm{ETR}_2)$, and use a sequence of semi-definite programming (SDP) problems with second-order-cone(SOC) constraints to eliminate the duality gap of the SOC reformulation for $(\mathrm{ETR}_2)$. We first narrow the duality gap of the SOC reformulation by adding a new appropriate SOC constraint, and a sufficient condition is presented to characterize when the new SOC constraint is valid. Then we establish an iterative algorithm and the results of numerical experiments show that the iterative algorithm works efficiently in eliminating the SDPR-SOCR gap of $(\mathrm{ETR}_2)$.
\end{abstract}

\begin{altabstract}
Dans cet article, nous considérons le sous-problème non convexe de la région de confiance étendue avec deux contraintes d'inégalité linéaires qui se croisent, $(\mathrm{ETR}_2)$, et nous utilisons une suite de problèmes de programmation semi-définie (SDP) avec des contraintes de cône de second ordre (SOC) pour éliminer l'écart de dualité de la reformulation SOC pour $(\mathrm{ETR}_2)$. Nous réduisons d'abord l'écart de dualité de la reformulation SOC en ajoutant une nouvelle contrainte SOC appropriée, et une condition suffisante est présentée pour caractériser quand la nouvelle contrainte SOC est valide. Ensuite, nous établissons un algorithme itératif et les résultats des expériences numériques montrent que l'algorithme itératif fonctionne efficacement pour éliminer l'écart SDPR-SOCR de $(\mathrm{ETR}_2)$.
\end{altabstract}


\begin{document}


% Use the \maketitle command after the abstract
\maketitle

\section{Introduction}
\label{introduction}
In this paper, we consider the nonconvex extended trust-region subproblem with $m$ linear cuts as follows:
\begin{equation}
\label{eq:subproblem}
\begin{array}{lcl}
(\ETR_m)\quad&\mbox{min} &d^TQ_0d+2b_0^Td\\
&\mbox{s.t.} &\|d\|^2\leq 1,\\
&& b_1^Td+c_1\geq0,\\
&& b_2^Td+c_2\geq0,\\
&&\multicolumn{1}{c}{\cdots}\\
&&b_m^Td+c_m\geq0.
\end{array}
\end{equation}
$Q_0$ is an $n\times n$ symmetric matrix, $d, b_i\,(i=0,1,2,\ldots,m)\in\mathcal{R}^n$, and $c_i\,(i=1,2,\ldots,m)$ are constants. This model arises from the trust-region method to solve constrained optimization problems and can be traced back to two papers presented by Yuan~\cite{Yuan1994_2,Yuan1994_1} in 1994. Many papers have involved studying the implicit convexity under some conditions when the problem is nonconvex~\cite{Jeyakumar2014,Locatelli2016}. It is well known that even for $m=1$, the problem~\eqref{eq:subproblem} may have a positive duality gap with its Lagrangian dual problem; moreover, it keeps the same gap with its SDP relaxation~\cite{AZ09,YuanAi,Jin2010}.

The second-order-cone (SOC) reformulation technique has played an important role in narrowing or even eliminating the duality gaps of $(\ETR_m)$~\cite{StZh03,Burer-Anstreicher-2013,Burer-Yang-2014,YuanAi} and other quadratically constrained quadratic optimization problems, e.g., the Celis--Dennis--Tapia (CDT) problem~\cite{Burer-Anstreicher-2013,YuanAi2}.
For the SOC reformulation of $(\ETR_m)$, the linear constraints $b_i^Td+c_i\geq0~(i=1,2,\ldots,m)$ are replaced with SOC constraints $\|(b_i^Td+c_i)d\| \leq b_i^Td+c_i~(i=1,2,\ldots,m)$ and redundant quadratic inequalities $(b_i^Td+c_i)(b_j^Td+c_j)\ge0~(1\leq i< j\leq m)$ are added which are valid in the SDP relaxation model. Naturally, the SOC reformation is equivalent to $(\ETR_m)$. The SDP relaxation model of the SOC reformulation is called the SDP relaxation with second-order-cone reformulation (SDPR-SOCR).

\looseness-1
To our knowledge, Sturm and Zhang in 2003 first used one SOC constraint to reformulate $(\ETR_1)$ and proved that SDPR-SOCR is an exact relaxation of $(\ETR_1)$, that is, the SOC reformulation is an implicit convex optimization problem~\cite{StZh03} . It was the first time when the duality gap was thoroughly eliminated by the SOC reformulation technique.
In 2003, Ye and Zhang first reformulated $(\ETR_2)$ using SOC and proved that if at least one of both linear cuts is active then its SDPR-SOCR is a tight relaxation~\cite{YeZh03}.
In 2013, Burer and Anstreincher~\cite{Burer-Anstreicher-2013} further proved that the SDPR-SOCR is a tight relaxation if the two linear cuts are parallel.
In 2015, Burer and Yang~\cite{Burer-Yang-2014} showed that the tightness of the SDPR-SOCR still holds in the nonintersecting case for any positive integer $m$, where {\em the nonintersecting case means that any two linear cuts are nonintersecting in the trust-region ball}.
Dai et al.~\cite{Dai-Xing-2018} provided a recovery algorithm that constructs an optimal solution of nonintersecting $(\ETR_3)$ from its SDPR-SOCR solution based on Burer and Yang's work~\cite{Burer-Yang-2014}.
Karbasy et al.~\cite{Karbasy2021} proposed an algorithm to solve nonintersecting $(\ETR_m)$ by finding global and local non-global minimizers of trust-region subproblem.
Yuan et al.~\cite{YuanAi} have presented a sufficient and necessary condition under which the SDPR-SOCR is not tight for $(\ETR_2)$, which totally answered the question: when is the SOC reformulation of $(\ETR_2)$ an implicit convex optimization problem.

\looseness-1
When $(\ETR_2)$ admits a positive duality gap with its SDPR-SOCR, some algorithms have been proposed to solve $(\ETR_2)$ via the traditional trust region subproblem~\cite{Deng2019,karbasy2022efficient}.
Some kinds of branch and bound algorithms have been presented for solving $(\ETR_m)$~\cite{Beck2017,Dai2020,Karbasy2022}.
In each iteration of the branch and bound algorithm in~\cite{Beck2017}, Beck and Pan generate a trust region subproblem and
find its global and local non-global minimizers.
In~\cite{Karbasy2022}, Karbasy and Salahi apply the branch and bound algorithm in~\cite{Beck2017} to solve intersecting $(\ETR_m)$.
In~\cite{Dai2020}, Dai proposes two branch and bound algorithms based on the SDP relaxation problems with SOC constraints for solving $(\ETR_m)$, i.e., r-BW algorithm and r-ED algorithm.
In each iteration of r-BW algorithm of~\cite{Dai2020}, one needs to solve a SDP problem with $m$ SOC constraints and $n+1$ linear constraints.
In each iteration of r-ED algorithm of~\cite{Dai2020}, one needs to solve a SDP problem with $m$ SOC constraints and at most $n+1$ linear constraints.
The numerical results of~\cite{Dai2020} show that the average iteration numbers of r-BW algorithm and r-ED algorithm both exceed $8.4$ for solving $(\ETR_2)$.
In other words, one always needs at least $9$ SDP problems with SOC constraints to obtain the global optimal solution of $(\ETR_2)$.

In this paper, we focus on the nonconvex $(\ETR_2)$ which admits a positive duality gap with its SDPR-SOCR, that is, we only consider the nonconvex and intersecting $(\ETR_2)$ satisfying the sufficient and necessary condition presented by~\cite{YuanAi}. We call this gap the SDPR-SOCR gap of $(\ETR_2)$. To dig deeper the implicit convexity of $(\ETR_2)$, we try to narrow and even eliminate its SDPR-SOCR gap by SDP problems with SOC constraints.
Inspired by the results on narrowing the duality gap of the extended CDT problem by adding an appropriate new SOC constraint by~\cite{YuanAi}, we try to narrow and even eliminate the duality gap of the SOC reformulation for $(\ETR_2)$ by adding an appropriate SOC constraint, which may lead to dividing the problem into two separate subproblems.
In theory, a sufficient condition is presented to narrow the SDPR-SOCR gap of $(\ETR_2)$ by adding an appropriate SOC constraint.
Then an iterative algorithm is presented to eliminate the SDPR-SOCR gap by a sequence of SDP problems with SOC constraints.
In each iteration of our iterative algorithm, one needs only to solve a SDP problem with $4$ SOC constraints and $3$ linear constraints, which is much simpler than the SDP problems in~\cite{Dai2020}.
Finally, numerical experiments are conducted to show the effectiveness of our iterative algorithm.

Throughout this paper, $\mathcal{S}^{n\times n}$ and $\mathcal{S}^{n\times n}_{+}$ denote the set of all real $n\times n$ symmetric matrices and the set of all real $n\times n$ positive semi-definite matrices, respectively. For $A,B\in \mathcal{S}^{n\times n},$ the notation $A\bullet B\coloneqq \tr AB$ denotes the matrix inner-product between $A$ and $B.$
$\SOC$ denotes Second-Order Cone, that is, an $n$-dimensional vector $x=(x_1,\ldots,x_n)^T\in \SOC$ iff $\sqrt{x_2^2+\cdots+x_{n}^2}\le x_1$; moreover, $x=(x_1,\ldots,x_n)^T\in \partial(\SOC)$ iff $\sqrt{x_2^2+\cdots+x_{n}^2}=x_1.$
The notations $v(*)$ and $\Omega(*)$ denotes the optimal objective value and the feasible region of a problem $(*)$, respectively. Sometimes we may use $(x)_1$ to denote the first component of a vector $x$.

\section{ Main theoretical results}
\label{main-result}

\subsection{Some preliminary knowledge}\label{Pre-knowledge}
In this paper, we consider the nonconvex extended trust-region subproblem with two intersecting linear cuts as follows:
\begin{equation}
\label{eq:m=2-subproblem}
\begin{array}{lcl}
(\ETR_2)\quad&\mbox{min} & d^TQ_0d+2b_0^Td \\
&\mbox{s.t.} & \|d\|^2 \leq 1,\\
&& b_1^Td+c_1\geq 0,\\
&& b_2^Td+c_2\leq 0,
\end{array}
\end{equation}
where both planes $b_1^Td+c_1=0$ and $b_2^Td+c_2=0$ intersect in the open unit ball $\{d~|~\|d\|^2 < 1\}$.
Here, we always say such $(\ETR_2)$ is intersecting.
In the model~\eqref{eq:m=2-subproblem}, the second linear inequality constraint is rewritten for expediently adding an appropriate SOC constraint to narrow and even eliminate the duality gap of the SOC reformulation for $(\ETR_2)$.

The classical second-order-cone reformulation of the problem $(\ETR_2)$ is as follows:
\begin{equation}
\label{eq:reformulation-form}
\begin{array}{lcl}
(QP)\quad&\mbox{min} & d^TQ_0d+2b_0^Td \\
&\mbox{s.t.} & \|d\|^2 \leq 1,\\
&& (b_1^Td+c_1)(b_2^Td+c_2)\leq0,\\
&& \|(b_1^Td+c_1)d\| \leq b_1^Td+c_1,\\
&& \|(-b_2^Td-c_2)d\| \leq -b_2^Td-c_2.
\end{array}
\end{equation}
The model $(QP)$ is just a variant equivalent to the model $(\ETR_2)$.
Its semidefinite programming relaxation is
\begin{equation}\label{eq:SDPR-SOCR}
\begin{array}{lcl}
(SP)\quad& \mbox{min} & M_0\bullet X \\
& \mbox{s.t.} & M_1\bullet X\le 0, \\
& & M_2\bullet X=a_1^TXa_2\le 0, \\
&& Xa_1\in \SOC,\\
&& -Xa_2\in \SOC,\\
& & E_{00}\bullet X=1,\\
& & X\succeq0,
\end{array}
\end{equation}
\begin{equation}\label{eq:M1M2-etc-definition}
%\begin{array}{lll}
M_0\Mk \coloneqq \Mk \left[\begin{matrix}
0& b_0^T \\
b_0& Q_0
\end{matrix}
\right],\,
M_1\Mk \coloneqq \Mk \left[\begin{matrix}
-1& \mathbf{0}^T \\
\mathbf{0}&I
\end{matrix}
\right],M_2\Mk \coloneqq \Mk \frac12\left(a_1a_2^T+a_2a_1^T\right),~~
a_1\coloneqq \left[\begin{matrix}
c_1\\
b_1
\end{matrix}
\right],\,
a_2\Mk \coloneqq \Mk \left[\begin{matrix}
c_2\\
b_2
\end{matrix}
\right],\,
E_{00}\Mk \coloneqq\Mk \left[\begin{matrix}
1& \mathbf{0}^T \\
\mathbf{0}&O
\end{matrix}
\right].
%\end{array}
\end{equation}
The model $(SP)$ is also called the SDPR-SOCR of $(\ETR_2)$.
The dual problem of $(SP)$ is
\begin{equation}\label{eq:dual-SDPR-SOCR}
\begin{array}{lcl}
(SD)\quad & \mbox{max} & y_0 \\
& \mbox{s.t.} & Z\displaystyle =M_0-y_0 E_{00}+y_1M_1+y_2M_2-\frac12\left(u_1a_1^T+a_1u_1^T\right)+\frac12\left(u_2a_2^T+a_2u_2^T\right)\succeq0, \\
& & y_1\ge0,~y_2\ge0,~u_1\in \SOC,~u_2\in \SOC.
\end{array}
\end{equation}

The following proposition is easily verified and presented in~\cite{YuanAi}.
\begin{proposition} [{\cite[Proposition~2.1]{YuanAi}}]
	If $(\ETR_2)$ satisfies the Slater condition, then both $(SP)$ and $(SD)$ have interior feasible points.
\label{th:interior-poit-existence}
\end{proposition}

For the intersecting $(\ETR_2)$, a sufficient and necessary condition of the tightness of $(SP)$ has been proposed in~\cite{YuanAi}. Here we recall the conclusion and it will be used in this paper.

\begin{theorem}[{\cite[Theorem~2.7]{YuanAi}}]
Suppose that $(\ETR_2)$ satisfies the Slater condition. Let $\hat{X}$ and $(\hat{y}_0,\hat{y}_1,\hat{y}_2,\hat u_1,\hat u_2)$ be an optimal pair for $(SP)$ and $(SD)$.
Let $\hat{Z}\coloneqq M_0-\hat{y}_0 E_{00}+\hat{y}_1M_1+\hat{y}_2M_2-\frac12(\hat u_1a_1^T+a_1\hat u_1^T)+\frac12(\hat u_2a_2^T+a_2\hat u_2^T)$.
Then $v(SP)\ne v(QP)$ $\left(=v(\ETR_2)\right)$ $\Longleftrightarrow$ $(SP)$ and $(SD)$ have Property ${\mathcal I}$ as follows:
	\begin{enumerate}[label=(\arabic*)]
		\item\label{theo2_1} $\rank(\hat X)=3 \mbox{ and } \rank(\hat Z)=n-2$;
		\item\label{theo2_2} $\hat y_1>0$;
		\item\label{theo2_3} $a_1^T \hat X a_2<0$;
		\item\label{theo2_4} $\hat u_1\neq0$, $\hat u_2\neq0$ and $\hat Xa_1\nparallelslant\hat Xa_2$.
	\end{enumerate}
\label{gap-theorem}
\end{theorem}

Based on Proposition~\ref{th:interior-poit-existence} and Theorem~\ref{gap-theorem}, Lemma 2.5 of~\cite{YuanAi} can be restated below, which will be used in later discussions.
\begin{lemma}[{\cite[Lemma~2.5]{YuanAi}}]
Suppose that $(\ETR_2)$ satisfies the Slater condition and $v(\ETR_2)>v(SP)$. Let $\hat{X}$ and $(\hat{y}_0,\hat{y}_1,\hat{y}_2,\hat u_1,\hat u_2)$ be an optimal pair for $(SP)$ and $(SD)$, respectively.
Then
\begin{equation}\label{eq:Xa-in-boundary-SOC}
\begin{aligned}
0&\ne\hat u_1\in \partial(\SOC),&0&\neq\hat Xa_1\in \partial(\SOC),\\
0&\ne\hat u_2\in \partial(\SOC),&0&\neq-\hat Xa_2\in \partial(\SOC).
\end{aligned}
\end{equation}
$\hat X$ has a rank-one decomposition $\hat X=\hat x_1\hat x_1^T+\hat x_2\hat x_2^T+\hat x_3\hat x_3^T$ satisfying
\begin{equation}\label{eq:hatX-special-decompositon-Xa1}
\begin{aligned}
\hat x_1&=\dfrac{\hat Xa_1}{\sqrt{a_1^T\hat Xa_1}}\in\partial(\SOC),~M_1\bullet\hat x_2\hat x_2^T=M_1\bullet\hat x_3\hat x_3^T=\dfrac{M_1\bullet\hat X}{2}=0,\\
\hat x_2^T a_1&=\hat x_3^T a_1=0,\ (\hat x_2)_1\hat x_2^Ta_2<0,\ (\hat x_3)_1\hat x_3^Ta_2>0;
\end{aligned}
\end{equation}
similarly, $\hat X$ has another rank-one decomposition $\hat X=\tilde{x}_1\tilde{x}_1^T+\tilde{x}_2\tilde{x}_2^T+\tilde{x}_3\tilde{x}_3^T$ satisfying
\begin{equation}\label{eq:hatX-special-decompositon-Xa2}
\begin{aligned}
\tilde{x}_1&=\dfrac{-\hat X a_2}{\sqrt{a_2^T\hat X a_2}}\in\partial(\SOC),~M_1\bullet\tilde{x}_2\tilde{x}_2^T=M_1\bullet\tilde{x}_3\tilde{x}_3^T=\dfrac{M_1\bullet\hat X}{2}=0,\\
\tilde{x}_2^T a_2&=\tilde{x}_3^T a_2=0,\ (\tilde{x}_2)_1\tilde{x}_2^Ta_1>0,~(\tilde{x}_3)_1\tilde{x}_3^Ta_1<0.
\end{aligned}
\end{equation}
\label{SOC-lemma} 	
\end{lemma}

\subsection{Narrowing the SDPR-SOCR gap of \texorpdfstring{$(\ETR_2)$}{(ETR2)}}
\label{narrow-gap}
In this paper, we focus on the case when $(\ETR_2)$ admits a positive SDPR-SOCR gap (i.e., Property ${\mathcal I}$ holds), and try to narrow the gap. Let
\begin{equation}\label{a3}
a_3(\beta)\coloneqq (1-\beta)a_1+\beta a_2
\end{equation}
be a vector with the parameter $\beta\in [0,1]$.
Denote $a_3(\beta)=[c_3(\beta),\ (b_3(\beta))^T]^T$, then
$c_3(\beta)=(1-\beta)c_1+\beta c_2$ and $b_3(\beta)=(1-\beta)b_1+\beta b_2$.
Let
\[
\Omega\coloneqq \left\{d\in \mathcal{R}^n|\|d\|^2\leq 1,~b_1^Td+c_1\geq0,~b_2^Td+c_2\leq0\right\}
\]
be the feasible region of $(\ETR_2)$. The hyperplane,
\begin{equation}\label{hyperplane}
[1,\ d^T]a_3(\beta)=b_3(\beta)^Td+c_3(\beta)= (1-\beta)(b_1^Td+c_1)+\beta(b_2^Td+c_2)=0,
\end{equation}
divides $\Omega$ into two following parts:
\[
\begin{aligned}
 \Omega_1&\coloneqq 
\left\{d\in \mathcal{R}^n
\;\middle|\;
 \|d\|^2\leq 1,~b_1^Td+c_1\geq0,~b_2^Td+c_2\leq0,~b_3(\beta)^Td+c_3(\beta)\geq0
 \right\},
\\
 \Omega_2&\coloneqq 
\left\{d\in \mathcal{R}^n
\;\middle|\;
 \|d\|^2\leq 1,~b_1^Td+c_1\geq0,~b_2^Td+c_2\leq0,~b_3(\beta)^Td+c_3(\beta)\leq0
\right\}.
 \end{aligned}
\]
For any $\beta\in [0,1]$, we obtain $\Omega=\Omega_1\cup\Omega_2$.
Note that when $\beta=0$,
\begin{equation}\label{beta=0}
\Omega_1=\Omega,~~
\Omega_2=\{d\in \mathcal{R}^n\mid \|d\|^2\leq 1,~b_1^Td+c_1=0,~b_2^Td+c_2\leq0\}\subseteq\Omega;
\end{equation}
when $\beta=1$,
\begin{equation}\label{beta=1}
\Omega_1=\{d\in \mathcal{R}^n \mid \|d\|^2\leq 1,~b_1^Td+c_1\leq0,~b_2^Td+c_2=0\}\subseteq\Omega,~~
\Omega_2=\Omega;
\end{equation}
when $0<\beta<1$, $\Omega_1$ and $\Omega_2$ are two different sets, and both are different from $\Omega$.

Two new problems derived from $\Omega_1$ and $\Omega_2$ are presented as follows:
\begin{equation}
\label{eq:QP1}
\begin{array}{lcl}
(QP_1(\beta))\quad&\mbox{min} &d^TQ_0d+2b_0^Td\\
&\mbox{s.t.} &\|d\|^2\leq 1,\\
&& b_1^Td+c_1\geq0,\\
&& b_2^Td+c_2\leq0,\\
&& b_3(\beta)^Td+c_3(\beta)\geq0,\\
\end{array}
\end{equation}
and
\begin{equation}
\label{eq:QP2}
\begin{array}{lcl}
(QP_2(\beta))\quad&\mbox{min} &d^TQ_0d+2b_0^Td\\
&\mbox{s.t.} &\|d\|^2\leq 1,\\
&& b_1^Td+c_1\geq0,\\
&& b_2^Td+c_2\leq0,\\
&& b_3(\beta)^Td+c_3(\beta)\leq0.\\
\end{array}
\end{equation}
Apparently we have
\[
v(\ETR_2)=v(QP)=\min\{v(QP_1(\beta)),\ v(QP_2(\beta))\},
\]
for any $\beta\in [0,1]$.
Both problems can be equivalently reformulated, by the SOC reformulation technique, into the following forms:
\begin{equation}
\label{eq:SOC-reformulation-QP1}
\begin{array}{lcl}
(QP_1(\beta))\quad&\mbox{min} &d^TQ_0d+2b_0^Td\\
&\mbox{s.t.} & \|d\|^2\leq 1,\\
&& \left(b_2^Td+c_2\right)\left(b_3(\beta)^Td+c_3(\beta)\right)\le0,\\
&& \|(b_1^Td+c_1)d\| \leq b_1^Td+c_1,\\
&& \|(-b_2^Td-c_2)d\| \leq -b_2^Td-c_2,\\
&&\|\left(b_3(\beta)^Td+c_3(\beta)\right)d\| \leq b_3(\beta)^Td+c_3(\beta),
\end{array}
\end{equation}
and
\begin{equation}
\label{eq:SOC-reformulation-QP2}
\begin{array}{lcl}
(QP_2(\beta))\quad&\mbox{min} &d^TQ_0d+2b_0^Td\\
&\mbox{s.t.} &\|d\|^2\leq 1,\\
&& \left(b_1^Td+c_1\right)\left(b_3(\beta)^Td+c_3(\beta)\right)\le0,\\
&& \|(b_1^Td+c_1)d\| \leq b_1^Td+c_1,\\
&& \|(-b_2^Td-c_2)d\| \leq -b_2^Td-c_2,\\
&&\|\left(-b_3(\beta)^Td-c_3(\beta)\right)d\| \leq -b_3(\beta)^Td-c_3(\beta).
\end{array}
\end{equation}
The SDP relaxation models of $(QP_1(\beta))$ and $(QP_2(\beta))$ are
\begin{equation}\label{SOC1}
\begin{array}{lcl}
(\SOCP_1(\beta)) \quad& \mbox{min} & M_0\bullet X \\
& \mbox{s.t.} & M_1\bullet X\le 0, \\
& & a_2^TXa_3(\beta)\le 0, \\
&& Xa_1\in \SOC,\\
&& -Xa_2\in \SOC,\\
&& Xa_3(\beta)\in \SOC,\\
& & E_{00}\bullet X=1,\\
& & X\succeq0,
\end{array}
\end{equation}
and
\begin{equation}\label{SOC2}
\begin{array}{lcl}
(\SOCP_2(\beta))\quad & \mbox{min} & M_0\bullet X \\
& \mbox{s.t.} & M_1\bullet X\le 0, \\
& & a_1^TXa_3(\beta)\le 0, \\
&& Xa_1\in \SOC,\\
&& -Xa_2\in \SOC,\\
&& -Xa_3(\beta)\in \SOC,\\
& & E_{00}\bullet X=1,\\
& & X\succeq0,
\end{array}
\end{equation}
respectively.

For simplification, we denote $v_1(\beta)\coloneqq v(\SOCP_1(\beta))$, $v_2(\beta)\coloneqq v(\SOCP_2(\beta))$, $\Omega_1(\beta)\coloneqq \Omega(\SOCP_1(\beta))$ and $\Omega_2(\beta)\coloneqq \Omega(\SOCP_2(\beta))$. It is easily verified that $v_1(\beta)$ and $v_2(\beta)$ are functions of $\beta$, where $\beta\in [0,1]$.

\begin{remark}{}
\label{note-1}
For any $\beta\in (0,1)$, the constraint $b_1^Td+c_1\geq0$ is redundant for $(QP_1(\beta))$, and
the constraint $b_2^Td+c_2\leq0$ is redundant for $(QP_2(\beta))$.
For any $\beta\in (0,1)$, the constraints $-Xa_2\in \SOC$ and $Xa_3(\beta)\in \SOC$ deduce that
\[
{X}a_1=\frac{\beta}{1-\beta}(-{X}a_2)+\frac{1}{1-\beta}{X}a_3(\beta)\in \SOC,
\]
thus the constraint $Xa_1\in \SOC$ is redundant for $(\SOCP_1(\beta))$;
and simultaneously the constraints $Xa_1\in \SOC$ and $-Xa_3(\beta)\in \SOC$ deduce that
\[
-Xa_2=\frac{1-\beta}{\beta}Xa_1+\frac{1}{\beta}\left(-Xa_3(\beta)\right)\in \SOC,
\]
thus the constraint $-Xa_2\in \SOC$ is redundant for $(\SOCP_2(\beta))$.
\end{remark}

The following lemma reveals that there always are interior feasible points to $(\SOCP_1(\beta))$ and $(\SOCP_2(\beta))$ for any $\beta\in[0,1]$, if $(\ETR_2)$ satisfies the Slater condition.
\begin{lemma}{}
 Suppose that $(\ETR_2)$ satisfies the Slater condition. If $0\leq\beta\leq1$, then $(\SOCP_1(\beta))$ and $(\SOCP_2(\beta))$ have interior feasible points, $\Omega_1(\beta)\cup\Omega_2(\beta)\subseteq\Omega(SP)$ and $\min\{v_1(\beta),v_2(\beta)\}\geq v(SP)$.
\label{interior-feasible}
\end{lemma}
\begin{proof}{}
As $(\ETR_2)$ is intersecting and satisfies the Slater condition, for any $\beta\in (0,1)$, $(QP_1(\beta))$ and $(QP_2(\beta))$ also satisfy the Slater condition. When $0<\beta<1$, by Remark~\ref{note-1} and Proposition~\ref{th:interior-poit-existence}, both $(\SOCP_1(\beta))$ and $(\SOCP_2(\beta))$ have interior feasible points when ignoring their redundant constraints.
Furthermore, for the model $(\SOCP_1(\beta)),$ we obtain
\[
a_2^TXa_3(\beta)=(1-\beta)a_1^TXa_2+\beta a_2^TXa_2
\Longrightarrow a_1^TXa_2=\frac{1}{1-\beta}a_2^TXa_3(\beta)+\frac{\beta}{1-\beta}(-a_2^TXa_2),
\]
and the inequality $a_2^TXa_3(\beta)\le 0$ implies $a_1^TXa_2\le 0$. Hence, $\Omega_1(\beta)\subseteq \Omega(SP)$ and one obtains $v_1(\beta)\ge v(SP)$.
For the model $(\SOCP_2(\beta)),$ we obtain
\[
a_1^TXa_3(\beta)=(1-\beta)a_1^TXa_1+\beta a_1^TXa_2
\Longrightarrow a_1^TXa_2=\frac{1}{\beta}a_1^TXa_3(\beta)+\frac{\beta-1}{\beta}a_1^TXa_1,
\]
and the inequality $a_1^TXa_3(\beta)\le 0$ implies $a_1^TXa_2\le 0$. Hence, $\Omega_2(\beta)\subseteq \Omega(SP)$ and one obtains $v_2(\beta)\ge v(SP)$.

Moreover, when $\beta=0$ or $\beta=1$, we have
\begin{equation}\label{Omega01}
\begin{aligned}
 \Omega_1(0)&\Mk =\Mk
 \left\{X\Mk \in \Mk \mathcal{S}^{(n\Mk + \Mk1)\Mk\times\Mk (n\Mk+\Mk 1)}_{+}
 \;\middle|\;
 M_1\Mk \bullet \Mk X\Mk \le \Mk 0,~E_{00}\Mk \bullet \Mk X\Mk =\Mk 1,~
 a_1^TXa_2\Mk \le \Mk 0,~
 Xa_1\Mk \in \Mk \SOC,~-Xa_2\Mk \in\Mk \SOC
 \right\}\\
 & =\Mk \Omega(SP),
 \\
\Omega_2(0)&\Mk =\Mk
 \left\{X\Mk \in \Mk \mathcal{S}^{(n\Mk + \Mk1)\Mk\times\Mk (n\Mk+\Mk 1)}_{+}
 \;\middle|\;
 M_1\Mk \bullet \Mk X\Mk \le \Mk 0,~~E_{00}\Mk \bullet \Mk X\Mk =\Mk 1,~~
 Xa_1\Mk =\Mk 0,~~-Xa_2\Mk \in \Mk \SOC
 \right\}\subseteq \Omega(SP),
 \\
\Omega_1(1)&\Mk =\Mk
 \left\{X\Mk \in \Mk \mathcal{S}^{(n\Mk + \Mk1)\Mk\times\Mk (n\Mk+\Mk 1)}_{+}
 \;\middle|\;
 M_1\Mk \bullet \Mk X\Mk \le \Mk 0,~~E_{00}\Mk \bullet \Mk X=1,~~
 Xa_1\Mk \in \Mk \SOC,~~Xa_2\Mk =\Mk 0
\right\}\subseteq \Omega(SP),
\\
 \Omega_2(1) &\Mk =\Mk
 \left\{X\Mk \in \Mk \mathcal{S}^{(n\Mk + \Mk1)\Mk\times\Mk (n\Mk+\Mk 1)}_{+}
 \;\middle|\;
 M_1\Mk \bullet \Mk X\Mk \le \Mk 0,~E_{00}\Mk \bullet \Mk X\Mk =\Mk 1,~
 a_1^TXa_2\Mk \le \Mk 0,~
 Xa_1\Mk \in \Mk \SOC,~-Xa_2\Mk \in \Mk \SOC
 \right\}\\
 &=\Mk \Omega(SP),
\end{aligned}
\end{equation}
It is easily verified that $(\SOCP_1(0)),~(\SOCP_2(0)),~(\SOCP_1(1)),~(\SOCP_2(1))$ have interior feasible points, and
\begin{equation}\label{v1201}
\begin{gathered}
v_2(0)\geq v_1(0)=v(SP), \\
\min\{v_1(0),v_2(0)\}\geq v(SP),\\
\end{gathered}
\quad\begin{gathered}
v_1(1)\geq v_2(1)=v(SP),\\
\min\{v_1(1),v_2(1)\}\geq v(SP).\\
\end{gathered}
\end{equation}

Therefore, for any $\beta\in[0,1]$, $(\SOCP_1(\beta))$ and $(\SOCP_2(\beta))$ have interior feasible points, $\Omega_1(\beta)\cup\Omega_2(\beta)\subseteq\Omega(SP)$ and
$\min\{v_1(\beta),v_2(\beta)\}\geq v(SP).$
\end{proof}

From Remark~\ref{note-1}, the constraint $Xa_1\in \SOC$ is redundant for $(\SOCP_1(\beta))$ and the constraint $-Xa_2\in \SOC$ is redundant for $(\SOCP_2(\beta))$, for any $\beta\in (0,1)$.
However, from the formula~\eqref{Omega01}, we come to a conclusion that the constraint $Xa_1\in \SOC$ is indispensable for $(\SOCP_1(1))$, and the constraint $-Xa_2\in \SOC$ is indispensable for $(\SOCP_2(0))$.
The following example is a case of the point.

\begin{example}
One instance of $(\ETR_2)$ is defined as follows:
\begin{equation*}
n=2,\,
Q_0=\left[\begin{matrix}
 -35& 5\\
 5& -88
\end{matrix}
\right],\,
b_0=\left[\begin{matrix}
8 \\
21
\end{matrix}
\right],\,
b_1=\left[\begin{matrix}
0.42 \\
0.29
\end{matrix}
\right],~
b_2=\left[\begin{matrix}
0 \\
-0.67
\end{matrix}
\right],\,
c_1=0.33,\,
c_2=-0.23. 
\end{equation*}
\label{example:indispensable}
\end{example}

One can check that
\[
\begin{aligned}
v(\ETR_2)&\approx -51.0957,~ &v(SP)&\approx-57.9590,\\
v_1(0)&\approx-57.9590,~&v_2(0)&\approx -45.6193,\\
v_1(1)&\approx-43.8601,~& v_2(1)&\approx -57.9590,\\
\end{aligned}
\]
and it coincides with
\begin{equation*}
\begin{gathered}
v_2(0)\geq v_1(0)=v(SP),\\
\min\{v_1(0),v_2(0)\}\geq v(SP),\\
\end{gathered}
\quad 
\begin{gathered}
v_1(1)\geq v_2(1)=v(SP),\\
\min\{v_1(1),v_2(1)\}\geq v(SP).\\
\end{gathered}
\end{equation*}

We refer to the model $(N\SOCP_1(\beta))$ as the model $(\SOCP_1(\beta))$ without the constraint $Xa_1\in \SOC$, and
the model $(\NSOCP_2(\beta))$ as the model $(\SOCP_2(\beta))$ without the constraint $-Xa_2\in \SOC$.
For Example~\ref{example:indispensable}, one obtains that
\[
\begin{gathered}
v(\NSOCP_1(0))\approx-57.9590,\quad v(\NSOCP_2(0))\approx -129.8765,\\
\min\{v(\NSOCP_1(0)),v(\NSOCP_2(0))\}\approx -129.8765<v(SP),\\
v(\NSOCP_1(1))\approx-67.4671,\quad v(\NSOCP_2(1))\approx -57.9590,\\
\min\left\{v(\NSOCP_1(1)),v(\NSOCP_2(1))\right\}\approx -67.4671<v(SP),
\end{gathered}
\]
and it violates the formula~\eqref{v1201}.

Then we obtain the main result of this paper: for a SDPR-SOCR-gap-existing $(\ETR_2)$, if \hbox{$0<\beta<1$}, then $a_3(\beta)$ defined in~\eqref{a3} is effective in narrowing the gap.

\begin{theorem}\label{narrow-theorem}

	Suppose that $(\ETR_2)$ satisfies the Slater condition and $v(\ETR_2)>v(SP)$. If $0<\beta<1$, then
\[
\min\left\{v_1(\beta),v_2(\beta)\right\}>v(SP).
\]
\end{theorem}

\begin{proof}{}
From Lemma~\ref{interior-feasible}, both $(\SOCP_1(\beta))$ and $(\SOCP_2(\beta))$ have interior feasible points, and $\min\{v_1(\beta),v_2(\beta)\}\geq v(SP).$
Suppose that $v_1(\beta)=v(SP)$.
Let $\tilde{X}$ be the optimal solution to $(\SOCP_1(\beta))$. Then $\tilde{X}$ is also an optimal solution to $(SP)$, and $\tilde{X}$ satisfies Property ${\mathcal I}$ by Theorem~\ref{gap-theorem}.
By Lemma~\ref{SOC-lemma}, we have
\[
0\neq\tilde{X}a_1\in \partial(\SOC),~~0\neq-\tilde{X}a_2\in \partial(\SOC).\]
As $\tilde{X}a_3(\beta)=(1-\beta)\tilde{X}a_1+\beta\tilde{X}a_2$, we have
\[
\tilde{X}a_1=\frac{1}{1-\beta}\tilde{X}a_3(\beta)+\frac{\beta}{1-\beta}(-\tilde{X}a_2)\in \partial(\SOC).
\]
Then $\tilde{X}a_3(\beta)\in \SOC$ and $-\tilde{X}a_2	\in \partial(\SOC)$ imply that
$\tilde{X}a_1 \parallel \tilde{X}a_2\parallel \tilde{X}a_3(\beta)$, which contradicts the condition $\tilde{X}a_1\nparallelslant \tilde{X}a_2$ of Property ${\mathcal I}$.
Similarly, one can prove $v_2(\beta)>v(SP)$. Therefore we come to a conclusion that $\min\{v_1(\beta),v_2(\beta)\}>v(SP).$
\end{proof}

%From Remark~\ref{note-1} and Theorem~\ref{narrow-theorem}, it is easily verified that, for any $\beta\in [0,1]$, $\Omega_1(\beta)\cup\Omega_2(\beta)\subseteq\Omega(SP)$ and $\min\{v_1(\beta),v_2(\beta)\}\geq v(SP)$.
By Theorem~\ref{narrow-theorem}, the vector $a_3(\beta)$ defined in~\eqref{a3} or the hyperplane defined in~\eqref{hyperplane} is effective in narrowing the SDPR-SOCR gap of $(\ETR_2)$ for any $\beta\in (0,1)$.
A natural question is: among these valid hyperplanes, which one is the best, in other words, which one can minimize the SDPR-SOCR gap? Can the SDPR-SOCR gap be eliminated?

To answer this question, some facts of $v_1(\beta)$ and $v_2(\beta)$ are presented.
The following lemma reveals the monotonicity of $v_1(\beta)$ and $v_2(\beta)$ on the interval $[0,1]$.
\begin{lemma}{}
 Suppose that $(\ETR_2)$ satisfies the Slater condition and $v(\ETR_2)>v(SP)$. Then $v_1(\beta)$ is nondecreasing from $\beta=0$ to $\beta=1$, and $v_2(\beta)$ is nonincreasing from $\beta=0$ to $\beta=1$.
%The functions $v_1(\beta)$ and $v_2(\beta)$ are monotonic on the interval $[0,1]$.
\label{monotonicity}
\end{lemma}
\begin{proof}{}
For an arbitrary pair of points $\beta, \hat{\beta}\in [0,1]$ such that $\beta<\hat{\beta}$, we have
\[
\begin{aligned}
\Omega_1(\beta)&=
 \left\{X\in\mathcal{S}^{(n+1)\times (n+1)}_{+}
 \;\middle|\;
 \begin{aligned}
 & M_1\bullet X\le 0,~~E_{00}\bullet X=1,~~(1-\beta)a_1^TXa_2+\beta a_2^TXa_2\le 0,\\
& Xa_1\in \SOC,~~-Xa_2\in \SOC,~~(1-\beta)Xa_1+\beta Xa_2\in \SOC
 \end{aligned}
\right\},\\
\Omega_1(\hat{\beta})&=
 \left\{X\in\mathcal{S}^{(n+1)\times (n+1)}_{+}
 \;\middle|\;
 \begin{aligned}
& M_1\bullet X\le 0,~~E_{00}\bullet X=1,~~(1-\hat{\beta})a_1^TXa_2+\hat{\beta} a_2^TXa_2\le 0,\\
& Xa_1\in \SOC,~~-Xa_2\in \SOC,~~(1-\hat{\beta})Xa_1+\hat{\beta} Xa_2\in \SOC
 \end{aligned}
\right\},\\
&=
 \left\{X\in\mathcal{S}^{(n+1)\times (n+1)}_{+}
 \;\middle|\;
 \begin{aligned}
& M_1\bullet X\le 0,~~E_{00}\bullet X=1,~~Xa_1\in \SOC,~~-Xa_2\in \SOC,\\
& (1-\beta)a_1^TXa_2+\beta a_2^TXa_2\le(\hat{\beta}-\beta)(a_1^TXa_2-a_2^TXa_2)\\
&(1-\beta)Xa_1+\beta Xa_2-(\hat{\beta}-\beta)(Xa_1-Xa_2)\in \SOC
 \end{aligned}
\right\}.\\
\end{aligned}
\]
It is easily verified that $\Omega_1(\hat{\beta})\subseteq \Omega_1(\beta)$.
Then $v_1(\beta)$ is a nondecreasing function from $\beta=0$ to $\beta=1$.
Similarly, one can prove
$\Omega_2(\beta)\subseteq \Omega_2(\hat{\beta}),$ and $v_2(\beta)$ is a nonincreasing function from $\beta=0$ to $\beta=1$.
\end{proof}
%\begin{proof}{}
%For any $\beta\in(0,1)$ and an arbitrarily small positive number $\Delta\beta$, we have
%\[\begin{array}{l}
%\Omega_1(1)=
% \left\{X\in\mathcal{S}^{(n+1)\times (n+1)}_{+}
% \left|
% \begin{array}{l}
% M_1\bullet X\le 0,~~E_{00}\bullet X=1,~~
% Xa_1\in SOC,~~Xa_2= 0
% \end{array}
% \right. \right\},\\
%\Omega_1(1-\Delta\beta)=
% \left\{X\in\mathcal{S}^{(n+1)\times (n+1)}_{+}
% \left|
% \begin{array}{l}
% M_1\bullet X\le 0,~~E_{00}\bullet X=1,~~a_2^TXa_2+\Delta\beta(a_1^TXa_2-a_2^TXa_2)\le 0,\\
% Xa_1\in SOC,~~-Xa_2\in SOC,~~Xa_2+\Delta\beta(Xa_1-Xa_2)\in SOC
% \end{array}
% \right. \right\},\\
%\Omega_1(\beta+\Delta\beta)=
% \left\{X\in\mathcal{S}^{(n+1)\times (n+1)}_{+}
% \left|
% \begin{array}{l}
% M_1\bullet X\le 0,~~E_{00}\bullet X=1,~~Xa_1\in SOC,~~-Xa_2\in SOC,\\
% (1-\beta)a_1^TXa_2+\beta a_2^TXa_2-\Delta\beta(a_1^TXa_2-a_2^TXa_2)\le 0,\\
%(1-\beta)Xa_1+\beta Xa_2-\Delta\beta(Xa_1-Xa_2)\in SOC
% \end{array}
% \right. \right\},\\
%\Omega_1(\beta)=
% \left\{X\in\mathcal{S}^{(n+1)\times (n+1)}_{+}
% \left|
% \begin{array}{l}
% M_1\bullet X\le 0,~~E_{00}\bullet X=1,~~(1-\beta)a_1^TXa_2+\beta a_2^TXa_2\le 0,\\
% Xa_1\in SOC,~~-Xa_2\in SOC,~~(1-\beta)Xa_1+\beta Xa_2\in SOC
% \end{array}
% \right. \right\},\\
% \Omega_1(\Delta\beta)=
% \left\{X\in\mathcal{S}^{(n+1)\times (n+1)}_{+}
% \left|
% \begin{array}{l}
% M_1\bullet X\le 0,~~E_{00}\bullet X=1,~a_1^TXa_2-\Delta\beta(a_1^TXa_2-a_2^TXa_2)\le 0,\\
% Xa_1\in SOC,~~-Xa_2\in SOC,~Xa_1-\Delta\beta(Xa_1-Xa_2)\in SOC
% \end{array}
% \right. \right\},\\
% \Omega_1(0)=
% \left\{X\in\mathcal{S}^{(n+1)\times (n+1)}_{+}
% \left|
% \begin{array}{l}
% M_1\bullet X\le 0,~E_{00}\bullet X=1,~
% a_1^TXa_2\le 0,~
% Xa_1\in SOC,~-Xa_2\in SOC
% \end{array}
% \right.
% \right\}=\Omega(SP).\\
%\end{array}\]
%It is easily verified that
%\[\Omega_1(1)\subseteq \Omega_1(1-\Delta\beta),~~~ \Omega_1(\beta+\Delta\beta)\subseteq \Omega_1(\beta),~~~
%\Omega_1(\Delta\beta)\subseteq \Omega_1(0)=\Omega(SP).\]
%Then $v_1(\beta)$ is a nondecreasing function from $\beta=0$ to $\beta=1$.
%Similarly, one can prove
%\[\Omega_2(0)\subseteq \Omega_2(\Delta\beta),~~~ \Omega_2(\beta)\subseteq \Omega_2(\beta+\Delta\beta),~~~\Omega_2(1-\Delta\beta)\subseteq \Omega_2(1)=\Omega(SP),\]
%and $v_2(\beta)$ is a nonincreasing function from $\beta=0$ to $\beta=1$.
%\end{proof}

Based on Lemma~\ref{monotonicity}, the following lemma is presented to reveal the continuity of $v_1(\beta)$ and $v_2(\beta)$ on the interval $[0,1]$.
\begin{lemma}{}
Suppose that $(\ETR_2)$ satisfies the Slater condition and $v(\ETR_2)>v(SP)$. Then the functions $v_1(\beta)$ and $v_2(\beta)$ are continuous on the interval $[0,1]$.
\label{continuous}
\end{lemma}

\begin{proof}
For any $\beta_{\infty}\in(0,1]$ and any non-decreasing sequence $\{\beta_n\}$ converging to $\beta_{\infty}$, where $0<\beta_n\leq1$ for any $n\subseteq N_{+}$, we have
\[
0<\beta_1\leq\beta_2\leq\beta_3\leq\cdots\leq\beta_n\leq\beta_{n+1}\leq\cdots\leq\beta_{\infty}\leq1.
\]
From Lemma~\ref{interior-feasible}, both $(\SOCP_1(\beta_n))$ and $(\SOCP_1(\beta_{\infty}))$ always have interior feasible points. Let $X_n$ be the optimal solution to $(\SOCP_1(\beta_n))$.
The compactness of the feasible sets
of the SDP implies that the sequence $\{X_n\}$ has a convergent subsequence, i.e. $\{X_{n_k}\}, ~k\subseteq N_{+}$. Assume that
$\lim\limits_{k\to+\infty}X_{n_k}=X_{\infty}$, then $X_{\infty}$ is feasible for $(\SOCP_1(\beta_{\infty}))$.
It is easily verified (see Lemma~\ref{monotonicity}) that
\begin{gather*}
v_1(\beta_{\infty})\leq M_0\bullet X_{\infty}=\lim\limits_{k\to+\infty}M_0\bullet X_{n_k}=\lim\limits_{k\to+\infty}v_1(\beta_{n_k})= \lim\limits_{n\to+\infty}v_1(\beta_n).
\\
\because v_1(\beta_n)\leq v_1(\beta_{\infty})(n\subseteq N_{+}),~
\therefore
\lim\limits_{n\to+\infty}v_1(\beta_n)\leq v_1(\beta_{\infty}).
\end{gather*}
Then
$\lim\limits_{n\to+\infty}v_1(\beta_n) = v_1(\beta_{\infty}).$
Thus, $v_1(\beta)$ is left-continuous on the interval $(0,1]$.


For any $\beta\in[0,1)$, by Lemma~\ref{interior-feasible}, $(\SOCP_1(\beta))$ always has interior feasible points. Let $X$ be an interior feasible point of $(\SOCP_1(\beta))$. For any $\epsilon>0$, one can find another interior feasible point $\hat{X}$ such that $M_0\bullet\hat{X}<v_1(\beta)+\epsilon$.
For any $\hat{\beta}~(\hat{\beta}>\beta)$ sufficiently close to $\beta$, $\hat{X}$ is also feasible for $(\SOCP_1(\hat{\beta}))$.
Then
\[
v_1(\hat{\beta})\leq M_0\bullet\hat{X}<v_1(\beta)+\epsilon.
\]
As $v_1(\beta)$ is nondecreasing from $\beta=0$ to $\beta=1$, $v_1(\hat{\beta})\geq v_1(\beta).$ Thus
\[
|v_1(\hat{\beta})-v_1(\beta)|<\epsilon.
\]
It means that $v_1(\beta)$ is right-continuous on the interval $[0,1)$.

Therefore, $v_1(\beta)$ is continuous on the interval $[0,1]$.
Similarly, one can prove that $v_2(\beta)$ is also continuous on the interval $[0,1]$.
\end{proof}

From the formula~\eqref{v1201}, $v_1(0)\leq v_2(0)$ and $v_1(1)\geq v_2(1)$. By Lemma~\ref{continuous} and the Zero-Point Theorem for continuous functions on closed intervals, one gets immediately the following conclusion.

\begin{theorem}{}
Suppose that $(\ETR_2)$ satisfies the Slater condition and $v(\ETR_2)>v(SP)$. Then there exists some $\beta_0\in (0,1)$ satisfying $v_1(\beta_0)=v_2(\beta_0)$. Moreover, $v_1(\beta_0)=v_2(\beta_0)>v(SP)$.
\label{v1=v2-theorem}
\end{theorem}

From Lemma~\ref{monotonicity} and Theorem~\ref{v1=v2-theorem}, as $v_1(\beta)$ is nondecreasing and $v_2(\beta)$ is nonincreasing, $\min\{v_1(\beta_0),v_2(\beta_0)\}$ is the maximum of all the $\min\{v_1(\beta),v_2(\beta)\}~~~(\beta\in [0,1])$ and it is the closest to $v(\ETR_2)$.
That is to say, $a_3(\beta_0)$ defined in Theorem~\ref{v1=v2-theorem} is the best choice when narrowing the SDPR-SOCR gap of $(\ETR_2)$ by Theorem~\ref{narrow-theorem}.
Moreover, if at least one of $(\SOCP_1(\beta_0))$ and $(\SOCP_2(\beta_0))$ is a tight relaxation of its original problem, then one can easily verify that
\[
\begin{array}{ll}
v(\ETR_2)&=\min\left\{v(QP_1(\beta_0)),\ v(QP_2(\beta_0))\right\}=\min\left\{v_1(\beta_0),v_2(\beta_0)\right\}=v_1(\beta_0)=v_2(\beta_0),
\end{array}
\]
i.e. the SDPR-SOCR gap is eliminated.
For the great majority of SDPR-SOCR-gap-existing instances, at least one of $(\SOCP_1(\beta_0))$ and $(\SOCP_2(\beta_0))$ is a tight relaxation of its original problem, i.e., the SDPR-SOCR model violates Property ${\mathcal I}$.
However, there exists instances for which both $(\SOCP_1(\beta_0))$ and $(\SOCP_2(\beta_0))$ satisfy Property ${\mathcal I}$ and we cannot eliminate their SDPR-SOCR gaps only by $(\SOCP_1(\beta_0))$ and $(\SOCP_2(\beta_0))$. The following example is a case of the point.

\begin{example}
One instance of $(\ETR_2)$ is defined as follows:
\[
n=2,\,
Q_0=\left[\begin{matrix}
 -69& -4\\
 -4& -62
\end{matrix}
\right],\,
b_0=\left[\begin{matrix}
7 \\
14
\end{matrix}
\right],\,
b_1=\left[\begin{matrix}
0.2 \\
0.99
\end{matrix}
\right],\,
b_2=\left[\begin{matrix}
-0.7 \\
-0.6
\end{matrix}
\right],\,
c_1=0.94,\,
c_2=-0.75.
\]
\label{example:v1=v2-2gap}
\end{example}

One can check that $v(\ETR_2)\approx -86.8220$ with the optimal solution $d^*\approx( -0.3115,\ -0.8866)^T$,
and $v(SP)\approx -92.4781$ with the optimal solution
\[
X^*\approx\left[\begin{matrix}
 1 & -0.2914 & -0.8342\\
-0.2914 & 0.2033 & 0.2022\\
-0.8342 & 0.2022 & 0.7967
\end{matrix}
\right].
\]
Then we obtain $\beta_0\approx 0.5069$ and
\begin{equation*}
\begin{aligned}
v(QP_1(\beta_0))\approx -86.8220,\ v_1(\beta_0)&\approx -87.8057,\\
v(QP_2(\beta_0))\approx -86.8220,\ v_2(\beta_0)&\approx -87.8057.
\end{aligned}
\end{equation*}

In Example~\ref{example:v1=v2-2gap}, all the three SDPR-SOCR models admit positive duality gaps with their original problems, i.e.,
\begin{equation*}
\begin{aligned}
v(\ETR_2)-v(SP)&\approx 5.6561,\\
v(QP_1(\beta_0))- v_1(\beta_0)&\approx 0.9837,\\
v(QP_2(\beta_0))- v_2(\beta_0)&\approx 0.9837.
\end{aligned}
\end{equation*}
Although the SDPR-SOCR gap $v(\ETR_2)-v(SP)\approx 5.6561$ is not eliminated thoroughly, it is greatly decreased into $v(\ETR_2)-\min\{v_1(\beta_0),\ v_2(\beta_0)\}\approx 0.9837$.

From Example~\ref{example:v1=v2-2gap}, we know that the SDPR-SOCR gap of $(\ETR_2)$ cannot always be eliminated by $(\SOCP_1(\beta_0))$ and $(\SOCP_2(\beta_0))$.
That is to say, we can only make sure that the SDPR-SOCR gap of $(\ETR_2)$ can be narrowed by adding an appropriate SOC constraint. As for eliminating the SDPR-SOCR gap thoroughly, we should add several appropriate SOC constraints and use a sequence of SDP problems with SOC constraints to obtain the optimal solution of $(\ETR_2)$.
In the next section, an iterative algorithm will be presented to eliminate the SDPR-SOCR gap by a sequence of SDP problems with SOC constraints.


\section{An algorithm and numerical results}

In this section, an algorithm is proposed to eliminate the SDPR-SOCR gap of $(\ETR_2)$, and some nonconvex and intersecting examples are presented to illustrate how the algorithm works in eliminating the SDPR-SOCR gap.

\subsection{An algorithm for eliminating the SDPR-SOCR gap of \texorpdfstring{$(\ETR_2)$}{(ETR2)}}

We propose an algorithm to eliminate the SDPR-SOCR gap of $(\ETR_2)$ and use a sequence of SDP problems with SOC constraints to obtain the optimal solution of $(\ETR_2)$. The framework of the algorithm is depicted in Algorithm~\ref{Multiple-Bisection}.



\begin{algorithm}[htb]
\caption{An iterative algorithm to solve $n$-dimensional $(\ETR_2)$ with a positive SDPR-SOCR gap.}
\label{Multiple-Bisection}
\begin{description}
\item[Input]
Matrices $M_0,~M_1,~E_{00}$ and vectors $a_1,~a_2$ defined in~\eqref{eq:M1M2-etc-definition}; positive tolerances $\eta_1$, $\eta_2$, $\Delta$.\\
\item[Output]
The optimal value $\tilde{v}$, the optimal solution $\tilde{d}$ and the error $\tilde{\xi}$.
	\item[\hypertarget{A1_Step 1}{Step 1}] Set $ {a}_1^{(1)}\coloneqq \dfrac{a_1}{\|a_1\|},~~ {a}_2^{(1)}\coloneqq \dfrac{a_2}{\|a_2\|}.$
Denote $S_{a}^{(1)}\coloneqq \left\{ {a}_1^{(1)},~ {a}_2^{(1)}\right\}$.
Based on $S_{a}^{(1)}$, we solve the model $(\SOCP_1^{(1)})$ defined in~\eqref{SOCPj} and obtain its optimal value $v_1^{(1)}$ and optimal solution $X_1^{(1)}$. Calculate $P_1^{(1)}$ in Definition~\ref{Pj}.
Denote $S_{v}^{(1)}\coloneqq \left\{v_1^{(1)}\right\}$, $S_{X}^{(1)}\coloneqq \left\{X_1^{(1)}\right\}$, $S_{P}^{(1)}\coloneqq \left\{P_1^{(1)}\right\}$.
Set $j\coloneqq 1$ and $k\coloneqq 1$.

	\item[\hypertarget{A1_Step 2}{Step 2}] Pick up the $j$-th elements of the sets $S_{v}^{(k)}$, $S_{X}^{(k)}$, $S_{P}^{(k)}$, and obtain $v_{j}^{(k)},\ X_{j}^{(k)},\ P_{j}^{(k)}$.
Pick up the $j$-th and $j+1$-th elements of $S_{a}^{(k)}$, and obtain ${a}_{j}^{(k)},\ {a}_{j+1}^{(k)}$.
Put $M_0$, $M_1$, $X_{j}^{(k)},\ {a}_{j}^{(k)},\ {a}_{j+1}^{(k)}$ and $\Delta$ into Algorithm~\ref{decomposition} and obtain $v_a$ and $x_a$.
Compute $\xi\coloneqq |v_{j}^{(k)}-v_a|$.
	Let ${b}_j^{(k)}\coloneqq {a}_j^{(k)}(2:n+1)$ and ${b}_{j+1}^{(k)}\coloneqq {a}_{j+1}^{(k)}(2:n+1)$.

	\item[\hypertarget{A1_Step 3}{Step 3}]If any one of the three conditions, $P_j^{(k)}=0$, $\xi\leq \eta_1$, ${{b}_j^{(k)}}^T{b}_{j+1}^{(k)}\geq 1-\eta_2$, is satisfied, then output $\tilde{v}=v_a,~~\tilde{d}=x_a(2:n+1),~~\tilde{\xi}=\xi$ and stop; Otherwise, go to Step 4.

	\item[\hypertarget{A1_Step 4}{Step 4}]
Compute $ {a}_{s}^{(k)}\coloneqq \frac{1}{\left\| {a}_{j}^{(k)}+ {a}_{j+1}^{(k)}\right\|}\left( {a}_{j}^{(k)}+ {a}_{j+1}^{(k)}\right)$.
Add $ {a}_{s}$ between $ {a}_j^{(k)}$ and $ {a}_{j+1}^{(k)}$ and obtain
\[
S_{a}^{(k+1)}\coloneqq \left\{ {a}_1^{(k)},\ldots,~ {a}_j^{(k)},~{\color{blue} {a}_{s}^{(k)}},~ {a}_{j+1}^{(k)},\ldots,~ {a}_{k+1}^{(k)}\right\}.
\]
Relabel its elements sequentially and rewrite it as
\[
S_{a}^{(k+1)}=\left\{ {a}_i^{(k+1)},i=1,2,\ldots,k+2\right\}.
\]
	
\item[\hypertarget{A1_Step 5}{Step 5}] Based on $S_{a}^{(k+1)}$, we solve the new problems $(\SOCP_j^{(k+1)})$ and $(\SOCP_{j+1}^{(k+1)})$ defined in~\eqref{SOCPj},
and obtain their optimal values $v_{j}^{(k+1)}$, $v_{j+1}^{(k+1)}$, and optimal solutions $X_{j}^{(k+1)}$, $X_{j+1}^{(k+1)}$.
Calculate $P_{j}^{(k+1)}$ and $P_{j+1}^{(k+1)}$ in Definition~\ref{Pj}.
Replacing the $j$-th elements of $S_{v}^{(k)}$, $S_{X}^{(k)}$, $S_{P}^{(k)}$ with them, we obtain
\[
\begin{array}{lll}
S_{v}^{(k+1)}: &=& \left\{v_1^{(k)},\ldots,v_{j-1}^{(k)},~{\color{blue}v_j^{(k+1)}},~{\color{blue}v_{j+1}^{(k+1)}},v_{j+1}^{(k)},\ldots,~v_{k}^{(k)}\right\}, \\
S_{X}^{(k+1)}: &=& \left\{X_1^{(k)},\ldots,X_{j-1}^{(k)},~{\color{blue}X_j^{(k+1)}},~{\color{blue}X_{j+1}^{(k+1)}},X_{j+1}^{(k)},\ldots,~X_{k}^{(k)}\right\}, \\
S_{P}^{(k+1)}: &=& \left\{P_1^{(k)},\ldots,P_{j-1}^{(k)},~{\color{blue}P_j^{(k+1)}},~{\color{blue}P_{j+1}^{(k+1)}},P_{j+1}^{(k)},\ldots,~P_{k}^{(k)}\right\}. \\
\end{array}
\]
Relabel the elements of these sets sequentially. Then we rewrite them as
\[
S_{v}^{(k+1)}=\left\{v_i^{(k+1)},i=1,2,\ldots,k+1\right\},
\]
\[
S_{X}^{(k+1)}=\left\{X_i^{(k+1)},i=1,2,\ldots,k+1\right\},
\]
\[
S_{P}^{(k+1)}=\left\{P_i^{(k+1)},i=1,2,\ldots,k+1\right\}.
\]

	\item[\hypertarget{A1_Step 6}{Step 6}] Let $j^*\coloneqq \text{argmin}\left\{v_i^{(k+1)},\ v_i^{(k+1)}\in S_{v}^{(k+1)}\right\}$.
Set $j=j^*$, $k=k+1$ and go to Step 2.

 \end{description}
\end{algorithm}

\begin{algorithm}[htb]
\caption{Finding an approximate optimal solution and the corresponding objective function value for $(QP_j^{(k)})$, which is defined in \eqref{eq:QPj}.}
\label{decomposition}
 \begin{description}
 	\item[Input] Matrices $M_0$, $M_1$, $X_j^{(k)}$ and vectors $a_j^{(k)}$, $a_{j+1}^{(k)}$; positive tolerance $\Delta$.
 	
 	\item[Output] An approximate optimal solution $x_a$, the corresponding objective function value $v_a$.

	\item[\hypertarget{A2_Step 1}{Step 1}] Calculate the eigenvector $x_{\lambda}$, which corresponds to the maximal eigenvalue of $X_j^{(k)}$.

 	\item[\hypertarget{A2_Step 2}{Step 2}] Let $x_1=X_j^{(k)}(:,1)$, $x_2=\dfrac{X_j^{(k)}a_j^{(k)}}{\left(X_j^{(k)}a_j^{(k)}\right)_1}$, and $x_3=\dfrac{-X_j^{(k)}a_{j+1}^{(k)}}{\left(-X_j^{(k)}a_{j+1}^{(k)}\right)_1}$. Set $\Phi=\{1,2,3\}$.

 \item[\hypertarget{A2_Step 3}{Step 3}] If $|(x_{\lambda})_1|>\Delta$ ,then set $x_4=\dfrac{x_{\lambda}}{(x_{\lambda})_1}$ and go to Step 4; Otherwise, go to Step 5.

	\item[\hypertarget{A2_Step 4}{Step 4}] If $x_4^TM_1x_4\leq\Delta$, $x_4^Ta_j^{(k)}\geq-\Delta$ and $x_4^Ta_{j+1}^{(k)}\leq\Delta$, then we add the index $4$ into the set $\Phi$.

	\item[Step 5] Find $h\coloneqq \text{argmin} \{M_0\bullet x_ix_i^T,\ i\in \Phi\}$ and output $x_a=x_h,~~v_a=M_0\bullet x_hx_h^T$.

 \end{description}
\end{algorithm}

In Algorithm~\ref{Multiple-Bisection}, we solve some following subproblems:
\begin{equation}\label{SOCPj}
\begin{array}{lcl}
(\SOCP_j^{(k)})& \mbox{min} & M_0\bullet X \\
& \mbox{s.t.} & M_1\bullet X\le 0, \\
& &{{a}_j^{(k)}}^{T} X {a}_{j+1}^{(k)}\le 0, \\
&& X {a}_j^{(k)}\in \SOC,\\
&& -X {a}_{j+1}^{(k)}\in \SOC,\\
&& Xa_1\in \SOC,\\
&& -X{a}_2\in \SOC,\\
& & E_{00}\bullet X=1,\\
& & X\succeq0,
\end{array}
\end{equation}
where ${a}_j^{(k)}$ and ${a}_{j+1}^{(k)}$ derive from $S_{a}^{(k)}$.
Compared with~\eqref{SOC1} and~\eqref{SOC2}, $(\SOCP_j^{(k)})$ adds a redundant SOC constraint $Xa_1\in \SOC$ or $-X{a}_2\in \SOC$ in a convenient way for later iteration.
In each iteration of Algorithm~\ref{Multiple-Bisection}, one needs only to solve $(\SOCP_j^{(k)})$, which is a SDP problem with $4$ SOC constraints and $3$ linear constraints. From Remark~\ref{note-1},
as ${a}_j^{(k)}$ and ${a}_{j+1}^{(k)}$ are the linear combinations of $a_1$ and $a_2$, two of the four SOC constraints are redundant.
$(\SOCP_j^{(k)})$ is much simpler than the SDP problems in each iteration of both algorithms in~\cite{Dai2020}.

As shown in~\hyperlink{A1_Step 2}{Step 2} of Algorithm~\ref{Multiple-Bisection}, ${b}_j^{(k)}={a}_j^{(k)}(2:n+1)$ and ${b}_{j+1}^{(k)}={a}_{j+1}^{(k)}(2:n+1)$.
Denote ${c}_j^{(k)}\coloneqq {a}_j^{(k)}(1)$ and ${c}_{j+1}^{(k)}\coloneqq {a}_{j+1}^{(k)}(1)$. We obtain the following problem:
\begin{equation}
\label{eq:QPj}
\begin{array}{lcl}
(QP_j^{(k)})\quad&\mbox{min} &d^TQ_0d+2b_0^Td\\
&\mbox{s.t.} &\|d\|^2\leq 1,\\
&& b_1^Td+c_1\geq0,\\
&& b_2^Td+c_2\leq0,\\
&& {b_j^{(k)}}^Td+c_j^{(k)}\geq0,\\
&& {b_{j+1}^{(k)}}^Td+c_{j+1}^{(k)}\leq0.\\
\end{array}
\end{equation}
$(QP_j^{(k)})$ is the original problem of $(\SOCP_j^{(k)})$, and $(\SOCP_j^{(k)})$ is the SDP relaxation of the SOC reformulation of $(QP_j^{(k)})$.
Similar with $(\SOCP_j^{(k)})$, as ${a}_j^{(k)}$ and ${a}_{j+1}^{(k)}$ are the linear combinations of $a_1$ and $a_2$, two of the four linear constraints are redundant for $(QP_j^{(k)})$.

\begin{remark}{}
\label{QPj}
$(QP_j^{(k)})$ is essentially the extended trust-region subproblem with two linear constraints, i.e., it belongs to the type of $(\ETR_2)$. Similarly, $(\SOCP_j^{(k)})$ belongs to the type of $(SP)$. Thus, we can use Property ${\mathcal I}$ in Theorem~\ref{gap-theorem} to determine whether $(\SOCP_j^{(k)})$ is an exact relaxation of $(QP_j^{(k)})$.
\end{remark}

\begin{definition}{}
We use $P_j^{(k)}$ to determine whether Property ${\mathcal I}$ in Theorem~\ref{gap-theorem} is satisfied for $(\SOCP_j^{(k)})$.
If $P_j^{(k)}=0$, it means that $(\SOCP_j^{(k)}))$ violates Property ${\mathcal I}$, i.e., $(\SOCP_j^{(k)}))$ is an exact relaxation of $(QP_j^{(k)})$;
If $P_j^{(k)}=1$, it means that $(\SOCP_j^{(k)})$ satisfies Property ${\mathcal I}$, i.e., $(QP_j^{(k)})$ admits a positive duality gap with $(\SOCP_j^{(k)}))$.
Note that, when calculating $P_j^{(k)}$, we ignore redundant SOC constraints.
\label{Pj}
\end{definition}

\begin{remark}{}
\label{C-Pj}
When determining whether Property ${\mathcal I}$ in Theorem~\ref{gap-theorem} is satisfied, there is so much attention to pay as the optimal solutions are with tiny errors when using the CVX package. For the first condition of Property ${\mathcal I}$, one needs to calculate the ranks of matrices. For a matrix $X\in\mathcal{S}^{n\times n}_{+}$, we first calculate its eigenvalues, without loss of generality, we arrange them in descending order, i.e., $\lambda_1\geq\lambda_2\geq\cdots\geq\lambda_n\geq0$.
Suppose that $\varepsilon_1,~\varepsilon_2,~\varepsilon_3,~\varepsilon_4,~\varepsilon_5$ are sufficiently small positive numbers.
If the maximum eigenvalue $\lambda_1\leq\varepsilon_1$, then we set the rank of $X$ to a value of zero. Otherwise, for $i=2,3,\ldots,n$, if $\frac{\lambda_i}{\lambda_1}<\varepsilon_2$ then we set $\lambda_i=0$ and then calculate the rank of $X$. Note that one should normalize $a_1$ and $a_2$ before evaluating the conditions (3) and (4). For the conditions (2) and (3), we set the tolerance to be $\varepsilon_3$. In the last condition, one should first judge whether $\hat u_1\neq0$, $\hat u_2\neq0$, $\hat Xa_1\neq0$ and $\hat Xa_2\neq0$. For a vector $x\in\mathcal{R}^{n}$, if $\|x\|>\varepsilon_4$, then we determine $x\neq0$. As for evaluating $\hat Xa_1\nparallelslant\hat Xa_2$, one needs to calculate the cosine coefficient of the two vectors, i.e., if $1-\frac{(\hat Xa_1)^T\hat Xa_2}{\|\hat Xa_1\|\|\hat Xa_2\|}>\varepsilon_5$, then the condition $\hat Xa_1\nparallelslant\hat Xa_2$ holds.
\end{remark}

In each iteration of Algorithm~\ref{Multiple-Bisection}, we aim to find one particular subproblem $(\SOCP_{j^*}^{(k)})$ corresponding to the minimum objective value among all these subproblems $(\SOCP_i^{(k)})~~~(i=1,2,\ldots,k)$.
If $(\SOCP_{j^*}^{(k)})$ is an exact relaxation, then the SDPR-SOCR gap is eliminated, i.e., $v(\ETR_2)=v(\SOCP_{j^*}^{(k)})$.
Otherwise, we add a new hyperplane and solve new subproblems as soon as it fits to the stopping criterion.
Note that the stopping criterion of Algorithm~\ref{Multiple-Bisection} has three cases:
$P_{j^*}^{(k)}=0$, or $\xi=|v_{j^*}^{(k)}-v_a|\leq \eta_1$, or ${{b}_{j^*}^{(k)}}^T{b}_{j^*+1}^{(k)}\geq 1-\eta_2$.
When $P_{j^*}^{(k)}=0$, the SDPR-SOCR gap is eliminated, i.e., $v(\ETR_2)=v(\SOCP_{j^*}^{(k)})$.
In each iteration, we apply Algorithm~\ref{decomposition} to find an approximate optimal solution $x_a$ and the corresponding objective function value $v_a$ for $(QP_{j^*}^{(k)})$.
Suppose that $\eta_1,~\eta_2$ are sufficiently small positive numbers.
The second stopping criterion $\xi=|v_{j^*}^{(k)}-v_a|\leq \eta_1$ represents that the gap between $(QP_{j^*}^{(k)})$ and $(\SOCP_{j^*}^{(k)})$ is too small to neglect, in other words, $(\SOCP_{j^*}^{(k)})$ can be regarded as an exact relaxation.
The third stopping criterion ${{b}_{j^*}^{(k)}}^T{b}_{j^*+1}^{(k)}\geq 1-\eta_2$ means that ${a}_{j^*}^{(k)}$ and ${a}_{j^*+1}^{(k)}$ are too close to insert a new hyperplane between them, in other words, $(QP_{j^*}^{(k)})$ reduces to the extended trust-region subproblem with only one linear constraints and $(\SOCP_{j^*}^{(k)})$ can be regarded as an exact relaxation.

The convergence of the sequence generated by Algorithm~\ref{Multiple-Bisection} is summarized as follows.

\begin{theorem}
\label{Convergence}
The sequence generated by Algorithm~\ref{Multiple-Bisection}, $\big\{\min\{S_{v}^{(k)}\},~k\in N_+\big\}$, is convergent.
\end{theorem}

\begin{proof}
In the $k$-th ($k\in N_+$) iteration of Algorithm~\ref{Multiple-Bisection}, $S_{a}^{(k)}$ denotes the set of $k+1$ hyperplanes. Every two adjacent hyperplanes of $S_{a}^{(k)}$ are associated to a SDP problem with SOC constraints. Based on $S_{a}^{(k)}$, we obtain $k$ problems, i.e., $\SOCP_1^{(k)},~\SOCP_2^{(k)},\ldots, \SOCP_k^{(k)}$.
The new hyperplane ${a}_{s}^{(k)}$, which is defined in~\hyperlink{A1_Step 4}{Step 4}, will be added between ${a}_j^{(k)}$ and ${a}_{j+1}^{(k)}$.
Then we obtain the new set $S_{a}^{(k+1)}$ as shown in~\hyperlink{A1_Step 4}{Step 4}. Based on $S_{a}^{(k+1)}$, $(\SOCP_j^{(k)})$ is replaced with two new problems $(\SOCP_j^{(k+1)})$ and $(\SOCP_{j+1}^{(k+1)})$. From Remark~\ref{QPj} and Theorem~\ref{narrow-theorem}, the SDPR-SOCR gap of $(QP_j^{(k)})$ can be narrowed, i.e.,
\[
v(QP_j^{(k)})=\min \left\{v(QP_j^{(k+1)}),v(QP_{j+1}^{(k+1)})\right\}\geq\left\{v(\SOCP_j^{(k+1)}),v(\SOCP_{j+1}^{(k+1)})\right\}>v(\SOCP_j^{(k)}).
\]
It is easily verified that $\min\left\{S_{v}^{(k+1)}\right\}>\min\left\{S_{v}^{(k)}\right\}$. Then the sequence $\left\{\min\{S_{v}^{(k)}\},~k\in N_+\right\}$ is monotonically increasing.
Moreover, for any $k\in N_+,$
\[
\begin{array}{ll}
v(\ETR_2)=\min\left\{v(QP_1^{(k)}),\ v(QP_2^{(k)}),\ldots,v(QP_k^{(k)})\right\}\geq\min\left\{S_{v}^{(k)}\right\}>v(SP).
\end{array}
\]
By the Monotone Sequence Theorem, the sequence $\left\{\min\{S_{v}^{(k)}\},~k\in N_+\right\}$ converges.
\end{proof}


In the following part, we manage to illustrate numerically how our Algorithm~\ref{Multiple-Bisection} works in eliminating the SDPR-SOCR gap of the noncovex original problem $(\ETR_2)$. In our numerical experiments, we set tolerances $\varepsilon_1=10^{-3}$, $\varepsilon_2=\varepsilon_3=\varepsilon_4=\varepsilon_5=10^{-5}$, $\eta_1=\eta_2=\Delta=10^{-4}$.

\subsection{Numerical examples in the literature}
In this subsection, we consider the following two nonconvex and intersecting examples proposed in~\cite{YuanAi} and~\cite{Burer-Anstreicher-2013} to illustrate how our Algorithm~\ref{Multiple-Bisection} works in eliminating the SDPR-SOCR gap.

\begin{example}[{\cite[Example~3.1]{YuanAi}}]
Consider the following example of $(\ETR_2)$ from~\cite{YuanAi}, which is defined as follows:
\begin{equation*}
n=2,\,
Q_0=\left[\begin{matrix}
 -100& 10\\
 10& -61
\end{matrix}
\right],\,
b_0=\left[\begin{matrix}
45 \\
-14
\end{matrix}
\right],\,
b_1=\left[\begin{matrix}
0 \\
-0.4
\end{matrix}
\right],\,
b_2=\left[\begin{matrix}
-0.9 \\
-0.6
\end{matrix}
\right],\,
c_1=0.1,\,
c_2=0.2.
\end{equation*}
\label{example:YuanAi}
\end{example}

One can check that $v(\ETR_2)\approx -12.5791$ with the optimal solution $d^*\approx( 0.9682,\ 0.25)^T$, $v(SP)\approx-13.1898$ and the SDPR-SOCR gap is $v(\ETR_2)-v(SP)\approx 0.6106$. We adopt Algorithm~\ref{Multiple-Bisection} to solve this example. The algorithm stops at only one iteration and outputs $\tilde{v}\approx -12.5791$ and $\tilde{d}\approx( 0.9682,\ 0.2500)^T$.


\begin{example}[{\cite[Section~4]{Burer-Anstreicher-2013}}]
Consider the following example of $(\ETR_2)$ from~\cite{Burer-Anstreicher-2013}, which is defined as follows:
\begin{equation*}
n=3,\,
Q_0=\left[\begin{matrix}
 2& 3& 12\\
 3& -19& 6\\
 12& 6& 0\\
\end{matrix}
\right],\,
b_0=\left[\begin{matrix}
7 \\
7\\
4.5
\end{matrix}
\right],\,
b_1=\left[\begin{matrix}
1 \\
1.2\\
0
\end{matrix}
\right],\,
b_2=\left[\begin{matrix}
1 \\
0\\
0
\end{matrix}
\right],\,
c_1=0.5,\,
c_2=0.
\end{equation*}
\label{example:Burer}
\end{example}

One can check that 
\[
v(\ETR_2)\approx -12.9419
\] 
with the optimal solution $d^{*} \approx (-0.8536,\ 0.2947,\ 0.4294)^T$, $v(SP)\approx-13.8410$ and the SDPR-SOCR gap is $v(\ETR_2)-v(SP)\approx 0.8991$. Based on the Karush--Kuhn--Tucker complementary conditions, one can verify that the optimal value is $-12.9420$ and the optimal solution is $(-0.8534,\ 0.2945,\ 0.4301)^T$.
We adopt Algorithm~\ref{Multiple-Bisection} to solve this example. The algorithm stops at only one iteration and outputs $\tilde{v}\approx -12.9420$ and $\tilde{d}\approx(-0.8534,\ 0.2945,\ 0.4302)^T$. The result of our algorithm is the same as the result obtained by the global optimization software SCIP and the result in~\cite{Deng2019}.

The results of Example~\ref{example:YuanAi} and Example~\ref{example:Burer} show that Algorithm~\ref{Multiple-Bisection} performs very well in eliminating the SDPR-SOCR gap $(\ETR_2)$ and it terminates within only one iteration.

\subsection{Randomly generated instances}

In this subsection, we test randomly generated nonconvex and intersecting instances to illustrate numerically how our Algorithm~\ref{Multiple-Bisection} works in eliminating the SDPR-SOCR gap of $(\ETR_2)$.

For the $n\times n$ matrix $Q_0$, all the upper triangular entries (including the principal-diagonal entries) are generated uniform-randomly on the interval $[-50, ~50]$ (the lower part takes the values by symmetry) and then, to ensure the ``nonconvexity'', we subtract $60$ from each of the principal-diagonal entries.
The entries of the vector $b_0$ are generated uniform-randomly on the interval $[-50, ~50]$.
For the two $n$-dimensional vectors $b_1$ and $b_2$, all entries are generated uniform-randomly on the interval $[-1,\ 1]$. Then we set $c_1=-b_1^Td_0$ and $c_2=-b_2^Td_0$, where the $n$-dimensional vector $d_0$ is generated uniform-randomly in the open unit ball $\{d~|~\|d\|^2 < 1\}$. Finally, we should normalize $b_1$ and $c_1$ with dividing them by $\|b_1\|$, and normalize $b_2$ and $c_2$ with dividing them by $\|b_2\|$ (thus it is guaranteed that $a_1$ and $a_2$ are normalized).
Any instance generated by the above method is always ``nonconvex'' and ``intersecting'' (the intersection point is exactly $d_0$).

We test $10000$ nonconvex and intersecting instances of $(\ETR_2)$ randomly generated for each of $n=2,3,\ldots,10$, where $n$ denotes the dimension of $(\ETR_2)$. For each instance, we use Property ${\mathcal I}$ in Theorem~\ref{gap-theorem} to check whether it has a positive SDPR-SOCR gap, as shown in Remark~\ref{C-Pj}.
We should draw attention to the phenomenon that when $n$ is greater than $10$, there are no SDPR-SOCR-gap-existing instances left in our experiments. For the SDPR-SOCR-gap-existing instances, we adopt Algorithm~\ref{Multiple-Bisection} to solve them and obtain the error $\tilde{\xi}$ for each instance.
Throughout the paper, all the numerical instances are computed in MATLAB R2017b on a PC with Intel Core i7-7700HQ CPU of 2.8 GHz and 8G memory, using the CVX package (version 1.21) based solver SDPT3 for solving SDP problems.

The numerical results are shown in Table~\ref{numerical-results-2}.
For the table, the second column ``SDP-gap'' denotes the number of the $10000$ instances that admit positive duality gaps with their classical SDP relaxation. If $v(SDP)<v(SP)$ or if $v(SDP)=v(SP)$ but Property ${\mathcal I}$ holds, then we say the instance has a positive SDP gap, where $v(SDP)$ denotes the optimal value of the classical SDP relaxation of $(\ETR_2)$.
The third column ``SDPR-SOCR-gap'' denotes the number of the $10000$ instances that have positive gaps with their SDPR-SOCR models $(SP)$. The fourth column ``Average error'' denotes the average error of Algorithm~\ref{Multiple-Bisection} for all the SDPR-SOCR-gap-existing instances.
The fifth column ``Max error'' denotes the maximum error of Algorithm~\ref{Multiple-Bisection} for all the SDPR-SOCR-gap-existing instances.
Note that the error of the ``Average error'' and ``Max error'' refers to ``the error $\tilde{\xi}$'' in Algorithm~\ref{Multiple-Bisection}.
The sixth column ``Average iteration'' denotes the average iteration number of Algorithm~\ref{Multiple-Bisection} for all the SDPR-SOCR-gap-existing instances.
The last column ``Worst case'' denotes the maximum iteration number of Algorithm~\ref{Multiple-Bisection} for all the SDPR-SOCR-gap-existing instances.


\begin{table}[htb]\centering

\setlength{\tabcolsep}{3.9pt}
\renewcommand{\arraystretch}{1.2}
\caption{Calculating errors for SDPR-SOCR-gap-existing instances.}\label{numerical-results-2}
\begin{tabular}{ccccccc}
\hline
n & SDP-gap & SDPR-SOCR-gap & Average error & Max error & Average iteration & Worst case\\
\hline
2& 5103& 109 & $1.5189\times 10^{-5}$ & $9.7966\times 10^{-5}$ & 1.7064 & 6\\
3& 3735& 21 & $3.1324\times 10^{-5}$ & $9.3417\times 10^{-5}$ & 1.8571 & 4\\
4& 3075& 2 & $3.1731\times 10^{-9}$ & $5.8866\times 10^{-9}$ & 1 & 1\\
5& 2557& 5 & $2.3720\times 10^{-5}$ & $9.9777\times 10^{-5}$ & 2 & 3\\
6& 2155& 1 & $8.6787\times 10^{-9}$ & $8.6787\times 10^{-9}$ & 1 & 1\\
7& 2003& 0\\
8& 1789& 2 & $2.3411\times 10^{-8}$ & $4.5879\times 10^{-8}$ & 1 & 1\\
9& 1618& 0\\
10& 1420& 1 & $3.7420\times 10^{-9}$ & $3.7420\times 10^{-9}$ & 1 & 1\\
\hline
\end{tabular}
\end{table}

As is shown in Table~\ref{numerical-results-2}, the maximum error of all the SDPR-SOCR-gap-existing instances is less than $1\times 10^{-4}$, it indicates that the stopping criterion of Algorithm~\ref{Multiple-Bisection} is effective. When Algorithm~\ref{Multiple-Bisection} terminates, the SDPR-SOCR gap is already eliminated. The SDPR-SOCR-gap-existing instances appear much less frequent than the SDP-gap-existing instances, and very rare for the larger $n$. For SDPR-SOCR-gap-existing instances, Algorithm~\ref{Multiple-Bisection} performs very well in eliminating the SDPR-SOCR gap and the average iteration numbers do not exceed $2$.
In other words, one always needs at most $5$ SDP problems with SOC constraints to obtain the global optimal solution of $(\ETR_2)$.
For the ``Worst case'',
Algorithm~\ref{Multiple-Bisection} terminates after six iterations. That is to say, our Algorithm~\ref{Multiple-Bisection} terminates within six iterations and it is better than
r-BW algorithm and r-ED algorithm of~\cite{Dai2020}, as the average iteration numbers of both algorithm exceed $8.4$.

\section{Conclusion}
In this paper, we consider the nonconvex extended trust-region subproblem with two intersecting cuts. By adding a new appropriate SOC constraint, we narrow the SDPR-SOCR gap and a sufficient condition is presented to characterize when a new SOC constraint is valid to narrow the SDPR-SOCR gap. There exists the best SOC constraint which minimizes the SDPR-SOCR gap under this condition. However, the SDPR-SOCR gap can not always be eliminated.
By using a sequence of SDP problems with SOC constraints, we establish an iterative algorithm to eliminate the SDPR-SOCR gap.
In theory, it has been proved that the sequence generated by our algorithm is convergent.
%the sequence generated by Algorithm~\ref{Multiple-Bisection}
%and it terminates at the optimal solution of $(\ETR_2)$ with precisions $\eta_1,~\eta_2$.
The numerical results show that our algorithm works efficiently in eliminating the SDPR-SOCR gap and the average iteration numbers do not exceed $2$.
For the ``Worst case'', one only needs six iterations to obtain the global optimal solution of $(\ETR_2)$.


\section*{Acknowledgments}

The author would like to thank Prof. Wenbao Ai for giving valuable suggestions on the algorithm design for this work.
The author is particularly grateful to the editor and the anonymous referee for their valuable comments that led to improving the quality of this paper.

\section*{Declaration of interests}

The authors do not work for, advise, own shares in, or receive funds from
any organization that could benefit from this article, and have declared no affiliations other than their research organizations.


\printbibliography


\end{document}










