\documentclass[11pt]{amsart} \setlength{\topmargin}{-0.3in} % usually -0.25in \addtolength{\textheight}{1.0in} % usually 1.25in \addtolength{\oddsidemargin}{-0.45in} \addtolength{\evensidemargin}{-0.45in} \addtolength{\textwidth}{1.0in} %\setlength{\parindent}{0pt} \newcommand{\normalspacing}{\renewcommand{\baselinestretch}{1.05}\tiny\normalsize} \normalspacing % macros \usepackage{amssymb,xspace,alltt,verbatim,pgfplots} %\usepackage[final]{graphicx} \usepackage{fancyvrb} \newtheorem*{lem*}{Lemma} \newtheorem*{thm}{Theorem} \newcommand{\bs}{\mathbf{s}} \newcommand{\bu}{\mathbf{u}} \newcommand{\bv}{\mathbf{v}} \newcommand{\bx}{\mathbf{x}} \newcommand{\bbf}{\mathbf{f}} \newcommand{\CC}{{\mathbb{C}}} \newcommand{\RR}{{\mathbb{R}}} \newcommand{\eps}{\epsilon} \newcommand{\ZZ}{{\mathbb{Z}}} \newcommand{\ZZn}{{\mathbb{Z}}_n} \newcommand{\NN}{{\mathbb{N}}} \newcommand{\ip}[2]{\mathrm{\left<#1,#2\right>}} \newcommand{\spn}{\operatorname{span}} \newcommand{\emach}{\epsilon_{\mathrm{mach}}} \newcommand{\prob}[1]{\bigskip\noindent\large\textbf{\underline{#1.}} \, \normalsize} \newcommand{\apart}[1]{\textbf{(#1)} \,} \newcommand{\epart}[1]{\medskip\noindent \textbf{(#1)} \,} \begin{document} \thispagestyle{empty} \Large \noindent \underline{\textbf{SAMPLE}} \hfill\underline{\textbf{SAMPLE}} \scriptsize \noindent February 2025 \hfill \tiny [AUTHOR: Bueler] \normalsize\bigskip \centerline{\large \textbf{Optimization Comprehensive Exam}} \bigskip \centerline{\textsc{Do \textbf{SIX} of the following eight problems; clearly indicate which ones.}} \smallskip \thispagestyle{empty} \prob{A} \apart{a} Consider the linear programming problem \begin{alignat*}{2} \text{minimize} && \qquad z = - 2 &x_1 - 3 x_2 \\ \text{subject to} && -x_1 + x_2 &\ge -3 \\ && - 2 x_1 + x_2 &\le 1 \\ && x_1 + x_2 & \le 7 \\ && x &\ge 0 \end{alignat*} Sketch the feasible set $S$ for the above problem. Be sure to label the axes, and \textbf{give the coordinates for each extreme point of $S$}. \epart{b} Put the above problem in standard form. \epart{c} Which extreme point solves the problem? \epart{d} If you start at the extreme point $(0,0)$, and apply the usual simplex method, which will be the next extreme point to be visited? \prob{B} Consider a feasible set defined by linear equality and inequality constraints: $$S = \left\{x \in \RR^n\,:\quad \begin{matrix} a_i^T x = b_i \quad \text{for $i$ in $\mathcal{E}$} \\ a_i^T x \ge b_i \quad \text{for $i$ in $\mathcal{I}$} \end{matrix}\right\}$$ Here, for each $i$ in the finite index sets $\mathcal{E}$ and $\mathcal{I}$, we have $a_i\in \RR^n$ and $b_i\in\RR$. Let $\tilde x\in S$ be a feasible point. \epart{a} By definition, $p\in\RR^n$ is a feasible direction at $\tilde x$ if there is $\eps>0$ so that $x = \tilde x + \alpha p$ is feasible ($x\in S$) for all $0\le \alpha \le \eps$. Identify necessary and sufficient conditions on $p\in\RR^n$ so that $p$ is a feasible direction. \epart{b} Suppose $p\in\RR^n$ is a feasible direction at $\tilde x$. Identify necessary and sufficient conditions on $\alpha \ge 0$ so that $x = \tilde x + \alpha p$ is feasible, that is, $x\in S$. (\emph{Hint. The conditions will depend on $a_i,b_i,\tilde x,p$.}) \prob{C} Suppose $x$ is a point of a set $S = \{x \in \RR^n\,:\,Ax=b, x\ge 0\}$, where $A$ is an $m\times n$ matrix with $m\le n$, and $b\in\RR^m$. Show that if $x$ is a basic feasible solution then it is an extreme point of $S$. \prob{D} Consider the 2D unconstrained minimization problem $$\text{minimize} \quad f(x)=3\sin(x_1)+\cos(x_2) + \frac{1}{20} \left(x_1^2 - x_1 x_2 + 2 x_2^2\right).$$ \epart{a} Compute the gradient and the Hessian. \epart{b} The surface $z=f(x)$ has many local maxima and minima. Choose an algorithm which would quickly find a local minima, and write a pseudocode for this algorithm. Choose an algorithm which uses the Hessian and which has guarantees of finding critical points. \epart{c} Describe an algorithm which would find all of the local minima in a closed rectangle, for example $R=\{-100\le x_1 \le 100, -100\le x_2 \le 100\}$. \prob{E} Assume $f:\RR^n\to\RR$ is smooth, $b\in \RR^m$, and $A\in \RR^{m\times n}$ with $m\le n$. Consider nonlinear optimization problems which have standard-form linear constraints: $$\begin{matrix} \text{minimize} \qquad & f(x) \\ \text{subject to} \qquad & Ax = b \\ & x \ge 0 \end{matrix}$$ In 2D ($n=2$) there are three possibilities for the dimension of the feasible set. The cartoons below illustrate these three possibilities in the cases where the feasible sets $S$ are non-empty, generic, and bounded when $m>0$. \bigskip \begin{tikzpicture}[scale=0.65] \filldraw [gray!50] (0,0) -- (4.9,0) -- (4.9,3.9) -- (0,3.9) -- cycle; \draw [->] (-0.5,0)--(5,0) node[right] {$x_1$}; \draw [->] (0,-0.5)--(0,4) node[above] {$x_2$}; \node at (2,2) {\Large $S$}; \node at (2,-1) {$m=0$}; \end{tikzpicture} \qquad \begin{tikzpicture}[scale=0.65] \draw [->] (-0.5,0)--(5,0) node[right] {$x_1$}; \draw [->] (0,-0.5)--(0,4) node[above] {$x_2$}; \draw [very thick] (0,3) -- (3.5,0); \draw [->] (2.1,2.1) node[xshift=2mm,yshift=2mm] {\Large $S$} -- (1.7,1.7); \node at (2,-1) {$m=1$}; \end{tikzpicture} \qquad \begin{tikzpicture}[scale=0.65] \draw [->] (-0.5,0)--(5,0) node[right] {$x_1$}; \draw [->] (0,-0.5)--(0,4) node[above] {$x_2$}; \draw [black,fill=black] (1.5,1.5) circle (0.5mm); \draw [->] (2.1,2.1) node[xshift=2mm,yshift=2mm] {\Large $S$} -- (1.7,1.7); \node at (2,-1) {$m=2$}; \end{tikzpicture} \bigskip \noindent For 3D ($n=3$) there are four possibilities $m=0,1,2,3$. Sketch the four corresponding cartoons. These cartoons should have the same annotations as the 2D versions above. \prob{F} Consider the problem \begin{alignat*}{2} \text{minimize} && f(x) &= (x_1-1)^2 + (x_2+1)^2 \\ \text{subject to} && \quad x_1^2 + x_2^2 &\le 4 \\ && x_2 &\ge 0 \end{alignat*} \epart{a} Sketch the feasible set and explain informally, perhaps using contours of $f$, why $x_*=(1,0)^\top$ is the solution. \epart{b} Write the constraints in the form $g_i(x)\ge 0$. Compute the Lagrangian and its gradient. \epart{c} For each of the points $A = (0,0)^\top$, $B=(0,2)^\top$, and $C=(1,0)^\top$, compute the values of the Lagrange multipliers $\lambda_i$ satisfying the zero-gradient condition. Do any of these points satisfy the first-order optimality conditions? \prob{G} Suppose $c\in\RR^n$ is a nonzero vector and consider the problem \begin{alignat*}{2} \text{minimize} && z = c^\top &x \\ \text{subject to} && \quad \sum_{i=1}^n x_i^2 &= 1 \end{alignat*} where $x\in\RR^n$. The single equality constraint, which can be written $\|x\|^2=1$. \epart{a} Compute the Lagrangian, and its gradient, and state the first-order necessary conditions. \epart{b} Solve the first-order conditions algebraically. How many points $(x_*,\lambda_*)$ satisfy the first-order necessary conditions? What is the solution to the problem? \prob{H} Consider the nonlinear constrained optimization problem \begin{alignat*}{2} \text{minimize} && &f(x) \\ \text{subject to} && \qquad g_i(x) &= 0, \quad i=1,\dots,\ell \\ && h_i(x) &\ge 0, \quad i=1,\dots,m \end{alignat*} \epart{a} Assume $f,g_i,h_i$ are all smooth. State the Lagrangian for this problem. \epart{b} Suppose $x_*$ is a local minimizer. State the complete first-order necessary conditions for the above problem. That is, give the general KKT conditions. \end{document}