\documentstyle[12pt]{article}

\pagestyle{empty}

%  shortcut definition

\newcommand{\lineitem}[1]{\item[#1]\mbox{}}
\newcommand{\oneto}[1]{1, \ldots, {#1}}



% roman text in mathmode commands

\newcommand{\insmat}[1]{\mathop{\rm {#1}}}  % for mathmode (with underlines)
\newcommand{\inmat}[1]{\mbox{\rm {#1}}}     % roman text in mathmode
\newcommand{\maximize}{\insmat{maximize}}
\newcommand{\minimize}{\insmat{minimize}}
\newcommand{\diag}{\insmat{diag}}

% common definitions needed

\newcommand{\lp}{Linear Program}
\newcommand{\lpp}{Linear Programming Problem }
\newcommand{\geeq}{ {  {{}\atop{>}}\atop{{=}\atop{}}}}
\newcommand{\leeq}{ {  {{}\atop{<}}\atop{{=}\atop{}}}}
\newcommand{\infinity}{\infty}
\newcommand{\KKT}{Karush--Kuhn--Tucker}
\newcommand{\KKTpt}{Karush--Kuhn--Tucker point }
\newcommand{\KKTconds}{Karush--Kuhn--Tucker conditions}
%\newcommand{\ip}[2]{\left\langle #1,#2 \right\rangle}
\newcommand{\ip}[2]{{#1}^T{#2}}
\newcommand{\inv}[1]{#1^{-1}}
\newcommand{\norm}[1]{\left\| #1 \right\|}
\newcommand{\pnorm}[2]{\norm{#1}_#2}
\newcommand{\pop}[1]{P_{#1}}
\newcommand{\ppop}[2]{P_{#2,#1}}
\newcommand{\pproj}[3]{P_{#2,#1}(#3)}
\newcommand{\proj}[2]{\pop{#1}(#2)}
\newcommand{\modulus}[1]{\left| #1 \right|}
\newcommand{\real}{\rm I\!R}
\newcommand{\set}[2]{\left\{ #1 \mid #2 \right\}}
\newcommand{\seq}[1]{\left\{ #1 \right\}}

% more common naming conventions

\newcommand{\der}{\nabla}
\newcommand{\grad}{\nabla}
\newcommand{\half}{\frac{1}{2}}
\newcommand{\intsect}{\bigcap}
\newcommand{\implies}{\;\Longrightarrow\;}
\newcommand{\summ}[2]{\sum_{#1}^{#2}}
\newcommand{\union}{\bigcup}
\newcommand{\define}{:=}
% \iff is already defined as \implies but with \Longleftrightarrow

% convex analysis definitions

\newcommand{\cl}[1]{\insmat{cl} #1}
\newcommand{\cone}[1]{\insmat{cone} #1}
\newcommand{\rec}[1]{0^+ #1}
\newcommand{\conj}[1]{#1^*}
\newcommand{\indfn}[1]{\psi_{#1}}
\newcommand{\intr}[1]{\insmat{int} #1}
\newcommand{\ncone}[2]{N_{#1}(#2)}
\newcommand{\pcone}[1]{{#1}^\circ}
\newcommand{\tcone}[2]{T_{#1}(#2)}

% shorter definitions for common usages

\newcommand{\ba}{\begin{array}}
\newcommand{\ea}{\end{array}}
\newcommand{\ds}{\displaystyle}
\newcommand{\arrayc}[1]{\ba{c} #1 \ea}
\newcommand{\be}{\begin{equation}}
\newcommand{\ee}{\end{equation}}

\renewcommand{\matrix}[1] {\left[ \arrayc{#1} \right]}
\renewcommand{\pmatrix}[2] {\matrix{\ba{#1} #2 \ea}}

% problem definitions and mathematical constructions

\newcommand{\eeqncond}[3]
    	{\begin{equation} \ba{rl} #1 & \mbox{   #2} \ea \label{#3}
     	 \end{equation}}
\newcommand{\eqncond}[2]
    	{\[ \ba{rl} #1 & \mbox{   #2} \ea \] }
\newcommand{\simpleproblem}[3]
	{\ba{lc} {\ds #3_{#2}} & {#1} \ea}
\newcommand{\epmin}[3]
   	{\begin{equation} \simpleproblem{#1}{#2}{\minimize} \label{#3}
    	 \end{equation}}
\newcommand{\pmin}[2]
   	{\[ \simpleproblem{#1}{#2}{\minimize} \] }
\newcommand{\epmax}[3]
   	{\begin{equation} \simpleproblem{#1}{#2}{\maximize} \label{#3}
     	 \end{equation}}
\newcommand{\pmax}[2]
   	{\[ \simpleproblem{#1}{#2}{\maximize} \] }
\newcommand{\problem}[4]
	{\ba{lc} {\ds #4_{#1}} & {#2} \\*
		{\inmat{subject to}} & {\ba[t]{c} #3 \ea} 
	 \ea}
\newcommand{\eprobmin}[4]
   	{\begin{equation} \problem{#1}{#2}{#3}{\minimize} \label{#4}
    	 \end{equation}}
\newcommand{\probmin}[3]
   	{\[ \problem{#1}{#2}{#3}{\minimize} \] }
\newcommand{\eprobmax}[4]
   	{\begin{equation} \problem{#1}{#2}{#3}{\maximize} \label{#4}
     	 \end{equation}}
\newcommand{\probmax}[3]
   	{\[ \problem{#1}{#2}{#3}{\maximize} \] }
\newcommand{\compproblem}[2]
	{ \ba{rcl} #2 \ge 0 \\* #1 \ge 0 \\* \ip{#1}{#2} = 0 \ea }
\newcommand{\compproblemb}[2]
	{ \ba{rcl} #2 \ge 0 \\* #1 \ge 0 \\* \ip{#1}{(#2)} = 0 \ea }
\newcommand{\ecomp}[3]
   	{\begin{equation}
		\compproblem{#1}{#2} \label{#3}
    	 \end{equation}}
\newcommand{\ecompb}[3]
   	{\begin{equation}
		\compproblemb{#1}{#2} \label{#3}
    	 \end{equation}}
\newcommand{\comp}[2]
   	{\[ \compproblem{#1}{#2} \] }
\newcommand{\compb}[2]
   	{\[ \compproblemb{#1}{#2} \] }

\def\br{\real}
\def\subpar{\hfill\break\indent\indent}
\def\bigmid{\Big\vert}
\newcommand{\refe}[1]{(\ref{#1})}
\newcommand{\textspace}{\openup 3\jot\relax}
\textspace
\renewcommand{\arraystretch}{1.4}

\begin{document}
{\bf MATHEMATICAL PROGRAMMING}

{\bf Instructions for Depth Exam Students}: Answer any 6 of the
8 following questions.

{\bf Instructions for Breadth Exam Students}: Answer any 3 of the
8 following questions.

\begin{enumerate}

\item Use the simplex method to solve the following problem:
{\probmax{}{5x_1 - x_2 + 11x_3}
{{\begin{array}[t]{rcrcrcr}
x_1  &  -   &  x_2 & + & 2x_3  &  \leq & 3 \\
-2x_1  &  +   &  x_2 & - & 2x_3  &  \leq & 8 \end{array}}\\
{-2 \leq x_1 \leq 3},\;\;{-1 \leq x_2 \leq 1},\;\;{x_3 \leq 0}}



\item Consider
\eprobmin{}{c^Tx}{\ba{cc} A_i x \ge b_i & i=1,\ldots,m \ea}{eq:dlp}
Let $x^1$ be optimal for
\probmin{}{c^Tx}{\ba{cc} A_i x \ge b_i & i=2,\ldots,m \ea}
and $x^2$ be optimal for
\probmin{}{c^Tx}{\ba[t]{cc} A_1x = b_1 \\
A_i x \ge b_i & i=2,\ldots,m \ea}
\begin{enumerate}
\item Prove that \refe{eq:dlp} is solvable.
\item Give an upper and a lower bound for the minimum  value of \refe{eq:dlp}.
\item Prove that one of  $x^1$ or $x^2$ solves \refe{eq:dlp}.
\end{enumerate}
\eject 
\item Let
\begin{equation}  \theta(b) := \inmat{min} \{f(x) \mid g(x) \leq b \} \label{prob:1} \end{equation} 

where $f\colon \real^n \mapsto     \real$, $g\colon \real^n \mapsto    \real^m$ are convex functions on 
$\real^n$ and $ b\in\real^m$. Suppose that for each $b$ in some set $B$ in $\real^m$,
the minimization problem of \refe{prob:1} is solvable. Suppose now that  an optimal Lagrange 
multiplier $u(\bar{b}) \in \real^m_+$ exists for some fixed  $\bar{b} \in B$, such that  in addition
to satisfying  the Kuhn-Tucker saddlepoint conditions, $u(\bar{b})$ also satisfies
\[ u(\bar{b})(\bar{b}-b) \geq 0 \;\; \forall b \in B \]
What can you say about $\theta(\bar{b})$ relative to $\theta(b)$ for all $b \in B$?
Prove your claim.

\item Suppose that for some real number $\gamma > 0$, $x(\gamma) > 0$ solves the
interior penalty problem
\[  \inmat{min} \left \{ f(x) -\gamma \summ{j=1}{n} \inmat{log } x_j\;\;  \bigmid\;\;  Ax = b \right \}  \]
where $f\colon \real^n \mapsto     \real$, $A $ is an $m$x$n$ real matrix, $b$ is an $m$x1 real
vector and f is convex and differentiable on $\real^n$. Give a lower bound to 
\[ \inmat{inf} \left \{ f(x) \mid  Ax = b, \;\; x\geq 0 \right \}  \] 
in terms of $f(x(\gamma))$, $\gamma$ and $n$. Establish your claim.

\item Suppose the problem
\eprobmin{}{f(x)}{h(x) = 0}{eq:oprob}
(where $f\colon \real^n \mapsto \real$ and $h\colon \real^n \mapsto \real^m$
are continuous functions)
has a solution $x^*$.
Let $M$ be an optimistic estimate of $f(x^*)$, that is, $M \le f(x^*)$.
Consider the
unconstrained problem
\epmin{v(M,x) \define [f(x) - M]^2 + \norm{h(x)}^2}{x}{eq:pprob}
Consider the following algorithm.
Given $M_k \le f(x^*)$, a solution $x_k$ to 
problem \refe{eq:pprob} with $M = M_k$ is found, then $M_k$ is updated by
\begin{equation} 
M_{k+1} = M_k + [v(M_k, x_k)]^{1/2} 
\label{eq:update}
\end{equation}
and the process repeated.
\begin{enumerate}
\item Show that if $M = f(x^*)$, a solution of \refe{eq:pprob} is a solution of
\refe{eq:oprob}.
\item Show that if $x_M$ is a solution of \refe{eq:pprob}, then 
$f(x_M) \le f(x^*)$.
\item Show that if $M_k \le f(x^*)$ then $M_{k+1}$ determined by \refe{eq:update}
satisfies $M_{k+1} \le f(x^*)$.
\item Show that $M_k \longrightarrow f(x^*)$.
\end{enumerate}


\item Let $f$ be a semipositive vector in $\br^n$ (that is, each component
of $f$ is non-negative, and $f\neq0$), and define a function $\phi$
from $\br^n$ to the extended reals by 
$$\phi(x)=\hbox{\rm inf}\,\{\,\alpha\mid x\le\alpha f\,\}.$$
\begin{enumerate}

\item  Show that $\phi$ is a closed proper convex function.
\item Compute the conjugate function $\phi^*$ and show that
$\phi^*$ is the indicator of a certain polyhedral convex set $S$.
Exhibit $S$ explicitly.
\item  Explain what the support function of a set is. Prove 
that $\phi$ is a support function, and identify the set of which
it is the support function.
\item Give a condition on $f$ (only) to ensure that the effective
domain of $\phi^*$ is compact. Justify your condition.
\end{enumerate}

\item Let
\probmin{x}{cx}{{Ax = b}\\{x\geq 0}}
be a min cost network flow problem
\begin{enumerate}
\item State the corresponding ``big M'' problem, being precise about how M is obtained
from the data of the original problem.
\item Show that if the {\bf big M problem is unbounded}, then the original problem is
either unbounded or infeasible.
\item What {\bf additional} procedure may be performed to identify which of the
alternatives in (b) holds?
\end{enumerate}


\item Write an integer program equivalent to 

\probmax{}{z = 3x_1 + 4x_2 -3x_3} 
{{\begin{array}[t]{rcrcrcr}
x_1  &  +  &  x_2 & + & 4x_3  &  \leq & 60 \\
-x_1  &  +   & 2x_2 & + & x_3  &  \geq & 12 \end{array}}\\
{\inmat{if } x_2 + x_3 > 0 \inmat{ then } x_1 + x_3 \leq 54}\\
{(x_1,x_2,x_3) \geq 0}}}

\end{enumerate}
\end{document}
