@unpublished {36565, title = {Mean and Variance Estimation Complexity in Arbitrary Distributions via Wasserstein Minimization}, year = {10040}, url = {https://arxiv.org/abs/2501.10172}, author = {V. Iverson and S Vavasis} } @article {35266, title = {MGProx: A nonsmooth multigrid proximal gradient method with adaptive restriction for strongly convex optimization}, journal = {SIAM J. Optimization}, year = {Accepted}, url = {https://arxiv.org/abs/2302.04077}, author = {Andersen Ang and Hans De Sterck and Stephen Vavasis} } @article {31681, title = {Nonlinear conjugate gradient for smooth convex functions}, journal = {Mathematical Programming - Computation}, year = {Accepted}, abstract = {

The method of nonlinear conjugate gradients (NCG) is widely used in practice for unconstrained optimization, but it satisfies weak complexity bounds at best when applied to smooth convex functions. In contrast, Nesterov{\textquoteright}s accelerated gradient (AG) method is optimal up to constant factors for this class.

However, when specialized to quadratic function, conjugate gradient is optimal in a strong sense among function-gradient methods. Therefore, there is seemingly a gap in the menu of available algorithms: NCG, the optimal algorithm for quadratic functions that also exhibits good practical performance for general functions, has poor complexity bounds compared to AG.

We propose an NCG method called C+AG ({\textquotedblleft}conjugate plus accelerated gradient{\textquotedblright}) to close this gap, that is, it is optimal for quadratic functions and still satisfiesthe best possible complexity bound for more general smooth convex functions. It takes conjugate gradient steps until insufficient progress is made, at which time it switches to accelerated gradient steps, and later retries conjugate gradient. The proposed method has the following theoretical properties: (i) It is identical to linear conjugate gradient (and hence terminates finitely) if the objective function is quadratic; (ii) Its running-time bound is O(ǫ-1/2) gradient evaluations for an L-smooth convex function, where ǫ is the desired residual reduction, (iii) Its running-time bound is O(L/l ln(1)) if the function is both L-smooth and l-strongly convex. We also conjecture and outline a proof that a variant of the method has the property: (iv) It is n-step quadratically convergent for a function whose second derivative is smooth and invertible at the optimizer. Note that the bounds in (ii) and (iii) match AG and are the best possible, i.e., they match lower bounds up to constant factors for the classes of functions under consideration. On the other hand, (i) and (iv) match NCG.

}, url = {https://arxiv.org/pdf/2111.11613.pdf}, author = {Sahar Karimi and Stephen A. Vavasis} } @report {35269, title = {A Primal-Dual Frank-Wolfe Algorithm for Linear Programming}, year = {2024}, abstract = {We present two first-order primal-dual algorithms for solving saddle point formulations of linear programs, namely FWLP (Frank-Wolfe Linear Programming) and FWLP-P. The former iteratively applies the Frank-Wolfe algorithm to both the primal and dual of the saddle point formulation of a standard-form LP. The latter is a modification of FWLP in which regularizing perturbations are used in computing the iterates. We show that FWLP-P converges to a primal-dual solution with error\ O(1/sqrt(k))\ after\ k\ iterations, while no convergence guarantees are provided for FWLP. We also discuss the advantages of using FWLP and FWLP-P for solving very large LPs. In particular, we argue that only part of the matrix\ A\ is needed at each iteration, in contrast to other first-order methods.}, url = {https://arxiv.org/abs/2402.18514}, author = {Matthew Hough and Stephen Vavasis} } @report {35268, title = {Accelerated gradient descent: A guaranteed bound for a heuristic restart strategy}, year = {2023}, abstract = {The\ O(1/k^2)\ convergence rate in function value of accelerated gradient descent is optimal, but there are many modifications that have been used to speed up convergence in practice. Among these modifications are restarts, that is, starting the algorithm with the current iteration being considered as the initial point. We focus on the adaptive restart techniques introduced by O{\textquoteright}Donoghue and Cand{\`e}s, specifically their gradient restart strategy. While the gradient restart strategy is a heuristic in general, we prove that applying gradient restarts preserves and in fact improves the\ O(1/k^2)\ bound, hence establishing function value convergence, for one-dimensional functions. Applications of our results to separable and nearly separable functions are presented.}, url = {https://arxiv.org/abs/2310.07674}, author = {Walaa Moursi and Viktor Pavlovic and Stephen Vavasis} } @article {31682, title = {Computational complexity of decomposing a symmetric matrix as a sum of positive semidefinite and diagonal matrices}, journal = {Foundations of Computational Mathematics}, volume = {2023}, year = {2023}, pages = {1-47}, abstract = {We study several variants of decomposing a symmetric matrix into a sum of a low-rank positive semidefinite matrix and a diagonal matrix. Such decompositions have applications in factor analysis and they have been studied for many decades. On the one hand, we prove that when the rank of the positive semidefinite matrix in the decomposition is bounded above by an absolute constant, the problem can be solved in polynomial time. On the other hand, we prove that, in general, these problems as well as their certain approximation versions are all NP-hard. Finally, we prove that many of these low-rank decomposition problems are complete in the first-order theory of the reals; i.e., given any system of polynomial equations, we can write down a low-rank decomposition problem in polynomial time so that the original system has a solution iff our corresponding decomposition problem has a feasible solution of certain (lowest) rank.}, url = {https://arxiv.org/abs/2209.05678}, author = {Levent Tun{\c c}el and Stephen A. Vavasis and Jingye Xu} } @report {35267, title = {Range of the displacement operator of PDHG with applications to quadratic and conic programming}, year = {2023}, abstract = {Primal-dual hybrid gradient (PDHG) is a first-order method for saddle-point problems and convex programming introduced by Chambolle and Pock. Recently, Applegate et al.\ analyzed the behavior of PDHG when applied to an infeasible or unbounded instance of linear programming, and in particular, showed that PDHG is able to diagnose these conditions. Their analysis hinges on the notion of the infimal displacement vector in the closure of the range of the displacement mapping of the splitting operator that encodes the PDHG algorithm. In this paper, we develop a novel formula for this range using monotone operator theory. The analysis is then specialized to conic programming and further to quadratic programming (QP) and second-order cone programming (SOCP). A consequence of our analysis is that PDHG is able to diagnose infeasible or unbounded instances of QP and of the ellipsoid-separation problem, a subclass of SOCP.}, url = {https://arxiv.org/abs/2309.15009}, author = {Tao Jiang and Walaa Moursi and Stephen Vavasis} } @report {35265, title = {Re-embedding data to strengthen recovery guarantees of clustering}, year = {2023}, abstract = {We propose a clustering method that involves chaining four known techniques into a pipeline yielding an algorithm with stronger recovery guarantees than any of the four components separately. Given n points in R d , the first component of our pipeline, which we call leapfrog distances, is reminiscent of density-based clustering, yielding an n {\texttimes} n distance matrix. The leapfrog distances are then translated to new embeddings using multidimensional scaling and spectral methods, two other known techniques, yielding new embeddings of the n points in R^d{\textquoteright} , where d{\textquoteright} satisfies d{\textquoteright} \<\< d in general. Finally, sum-of-norms (SON) clustering is applied to the re-embedded points. Although the fourth step (SON clustering) can in principle be replaced by any other clustering method, our focus is on provable guarantees of recovery of underlying structure. Therefore, we establish that the re-embedding improves recovery SON clustering, since SON clustering is a well-studied method that already has provable guarantees.}, url = {https://arxiv.org/abs/2301.10901}, author = {Tao Jiang and Samuel Tang and Stephen Vavasis} } @article {31680, title = {Low-rank matrix recovery with Ky Fan 2-k-norm}, journal = {Journal of Global Optimization}, volume = {82}, year = {2022}, pages = {727-751}, abstract = {Low-rank matrix recovery problem is difficult due to its non-convex properties and it is usually solved using convex relaxation approaches. In this paper, we formulate the non-convex low-rank matrix recovery problem exactly using novel Ky Fan 2-k-norm-based models. A general difference of convex functions algorithm (DCA) is developed to solve these models. A proximal point algorithm (PPA) framework is proposed to solve sub-problems within the DCA, which allows us to handle large instances. Numerical results show that the proposed models achieve high recoverability rates as compared to the truncated nuclear norm method and the alternating bilinear optimization approach. The results also demonstrate that the proposed DCA with the PPA framework is efficient in handling larger instances.}, url = {https://link.springer.com/article/10.1007/s10898-021-01031-0}, author = {Xuan Vinh Doan and Stephen A. Vavasis} } @article {31683, title = {Certifying clusters from sum-of-norms clustering}, year = {2021}, abstract = {Sum-of-norms clustering is a clustering formulation based on convex optimization that automatically induces hierarchy. Multiple algorithms have been proposed to solve the optimization problem: subgradient descent by Hocking et al., ADMM and ADA by Chi and Lange, stochastic incremental algorithm by Panahi et al. and semismooth Newton-CG augmented Lagrangian method by Sun et al. All algorithms yield approximate solutions, even though an exact solution is demanded to determine the correct cluster assignment. The purpose of this paper is to close the gap between the output from existing algorithms and the exact solution to the optimization problem. We present a clustering test that identifies and certifies the correct cluster assignment from an approximate solution yielded by any primal-dual algorithm. Our certification validates clustering for both unit and multiplicative weights. The test may not succeed if the approximation is inaccurate. However, we show the correct cluster assignment is guaranteed to be certified by a primal-dual path following algorithm after sufficiently many iterations, provided that the model parameter λ avoids a finite number of bad values. Numerical experiments are conducted on Gaussian mixture and half-moon data, which indicate that carefully chosen multiplicative weights increase the recovery power of sum-of-norms clustering.}, url = {https://arxiv.org/abs/2006.11355}, author = {Tao Jiang and Stephen A Vavasis} } @conference {26333, title = {Provable overlapping community detection in weighted graphs}, booktitle = {Neural Information Processing Systems (NeurIPS)}, volume = {2020}, year = {2020}, abstract = {Community detection is a widely-studied unsupervised learning problem in which the task is to group similar entities together based on observed pairwise entity interactions. This problem has applications in diverse domains such as social network analysis and computational biology. There is a significant amount of literature studying this problem under the assumption that the communities do not overlap. When the communities are allowed to overlap, often a pure nodes assumption is made, i.e. each community has a node that belongs exclusively to that community. This assumption, however, may not always be satisfied in practice. In this paper, we provide a provable method to detect overlapping communities in weighted graphs without explicitly making the pure nodes assumption. Moreover, contrary to most existing algorithms, our approach is based on convex optimization, for which many useful theoretical properties are already known. We demonstrate the success of our algorithm on artificial and real-world datasets}, url = {https://proceedings.neurips.cc/paper/2020}, author = {Jimit Majmudar and Stephen Vavasis} } @article {jiangvavasiszhai, title = {Recovery of a mixture of Gaussians by sum-of-norms clustering}, journal = {Journal of Machine Learning Research}, volume = {21}, year = {2020}, note = {Arxiv link: }, pages = {1-16}, abstract = {Sum-of-norms clustering is a method for assigning n points in Rd to K clusters, 1<=K<=n, using convex optimization. Recently, Panahi et al. (2017) proved that sum-of-norms clustering is guaranteed to recover a mixture of Gaussians under the restriction that the number of samples is not too large. The purpose of this note is to lift this restriction, that is, show that sum-of-norms clustering can recover a mixture of Gaussians even as the number of samples tends to infinity. Our proof relies on an interesting characterization of clusters computed by sum-of-norms clustering that was developed inside a proof of the agglomeration conjecture by Chiquet et al. (2017). Because we believe this theorem has independent interest, we restate and reprove the Chiquet et al. (2017) result herein.}, url = {https://jmlr.org/papers/volume21/19-218/19-218.pdf}, author = {Jiang, T. and Vavasis, S. and Zhai., C. W.} } @article {21100, title = {Second-order cone interior-point method for quasistatic and moderate dynamic cohesive fracture}, journal = {Comput. Meth. Appl. Mech. Engr.}, volume = {358}, year = {2020}, note = {Arxiv version: }, pages = {112633}, abstract = {

Second-order cone interior-point method for quasistatic and moderate dynamic cohesive fracture by S. Vavasis, , M. R. Hirmand

\  Cohesive fracture is among the few techniques able to
\  model complex fracture nucleation and propagation
\  with a sharp (nonsmeared) representation
\  of the crack.\  Implicit time-stepping schemes are often favored
\  in mechanics due to their ability to take larger time steps in
\  quasistatic and moderate dynamic problems.\  Furthermore,
\  initially rigid cohesive models are typically preferred when
\  the location of the crack is not known in advance, since
\  initially elastic models artificially lower the material stiffness.
\  It is challenging to include an initially rigid
\  cohesive model in an implicit scheme because
\  the initiation of fracture corresponds
\  to a nondifferentiability of the underlying potential.\  In
\  this work, an interior-point method is proposed for implicit time
\  stepping of initially rigid cohesive
\  fracture.\  It uses techniques developed for convex second-order
\  cone programming for the nonconvex problem at hand.\  The underlying cohesive model
\  is taken from Papoulia (2017) and is based on a nondifferentiable
\  energy function.\  That previous work proposed an algorithm based on successive
\  smooth approximations to the nondifferential objective for solving
\  the resulting optimization problem.\  It is argued herein that cone
\  programming can capture the nondifferentiability without smoothing,
\  and the resulting cone formulation is amenable to interior-point
\  algorithms.\  A further benefit of the formulation is that other
\  conic inequality constraints are straightforward to incorporate.
\  Computational results are provided showing that certain contact
\  constraints can be easily handled and that the
\  method is practical.

}, url = {https://arxiv.org/abs/1909.10641}, author = {Vavasis, Stephen and Papoulia, Katerina and Hirmand, M. Reza} } @manuascript {24289, title = {A termination criterion for stochastic gradient descent for binary classification}, year = {2020}, abstract = {We propose a new, simple, and computationally inexpensive termination test for constant step-sizestochastic gradient descent (SGD) applied to binary classification on the logistic and hinge loss with homogeneous linear predictors. Our theoretical results support the effectiveness of our stopping criterion when the data is Gaussian distributed. This presence of noise allows for the possibility of non-separable data. We show that our test terminates in a finite number of iterations and when the noise in the data isnot too large, the expected classifier at termination nearly minimizes the probability of misclassification. Finally, numerical experiments indicate for both real and synthetic data sets that our termination test exhibits a good degree of predictability on accuracy and running time.}, url = {https://arxiv.org/abs/2003.10312}, author = {Sina Baghal and Courtney Paquette and Stephen Vavasis} } @inbook {doanvavasis3, title = {Low-rank matrix recovery with Ky Fan 2-k-norm}, booktitle = {Optimization of Complex Systems: Theory, Models and Applications}, year = {2019}, note = {Arxiv link: }, pages = {310-319}, author = {Doan, X. V. and Vavasis, S.}, editor = {Le Thi et al.} } @unpublished {paquettevavasis, title = {Potential-based analyses of first-order methods for constrained and composite optimization}, year = {2019}, note = {Arxiv link: }, author = {Paquette, C. and Vavasis, S.} } @article {gillisvavasis3, title = {On the Complexity of Robust PCA and l1-Norm Low-Rank Matrix Approximation}, journal = {Mathematics of Operations Research}, volume = {43}, year = {2018}, note = {Arxiv link: }, pages = {1072-1084}, author = {Nicolas Gillis and Stephen A. Vavasis} } @unpublished {interiorfrac, title = {Second-order cone interior-point method for quasistatic and moderate dynamic cohesive fracture}, year = {2018}, note = {submitted for publication, CMAME}, author = {S. Vavasis and K. Papoulia and M. Hirmand} } @article {KarimiVavasis, title = {IMRO: A Proximal Quasi-Newton Method for Solving $\ell_1$-Regularized Least Squares Problems}, journal = {SIAM J. Optimiz}, volume = {27}, year = {2017}, note = {Arxiv link: }, pages = {583-615}, author = {S. Karimi and S. Vavasis} } @unpublished {KarimiVavasis2017, title = {A single potential governing convergence of conjugate gradient, accelerated gradient and geometric descent}, year = {2017}, note = {Arxiv link: }, author = {Sahar Karimi and Stephen Vavasis} } @article {DoanVavasis2, title = {Finding the largest low-rank clusters with Ky Fan 2-k-norm and l1-norm}, journal = {SIAM J. Optim.}, volume = {26}, year = {2016}, note = {Arxiv link: }, pages = {274-312}, author = {X. V. Doan and S. Vavasis} } @unpublished {KarimiVavasis2016, title = {A unified convergence bound for conjugate gradient and accelerated gradient}, year = {2016}, note = {Arxiv link: }, author = {S. Karimi and S. Vavasis} } @article {drusvavawolk, title = {Extreme point inequalities and geometry of the rank sparsity ball}, journal = {Mathematical Programming}, volume = {152}, number = {1}, year = {2015}, note = {Arxiv link: }, month = {Aug}, pages = {521{\textendash}544}, abstract = {We investigate geometric features of the unit ball corresponding to the sum of the nuclear norm of a matrix and the \$\$l_1\$\$l1norm of its entries{\textendash}-a common penalty function encouraging joint low rank and high sparsity. As a byproduct of this effort, we develop a calculus (or algebra) of faces for general convex functions, yielding a simple and unified approach for deriving inequalities balancing the various features of the optimization problem at hand, at the extreme points of the solution set.}, issn = {1436-4646}, doi = {10.1007/s10107-014-0795-8}, url = {https://doi.org/10.1007/s10107-014-0795-8}, author = {Drusvyatskiy, D. and Vavasis, S. A. and Wolkowicz, H.} } @article {GillisVavasis2, title = {Semidefinite Programming Based Preconditioning for More Robust Near-Separable Nonnegative Matrix Factorization}, journal = {SIAM J. Optim.}, volume = {25}, number = {1}, year = {2015}, note = {Arxiv link: }, pages = {677-698}, author = {Nicolas Gillis and Stephen A. Vavasis} } @article {AmesVavasis2, title = {Convex optimization for the planted k-disjoint-clique problem}, journal = {Math. Progr.}, volume = {143}, year = {2014}, note = {Arxiv link: }, pages = {299-337}, author = {Brendan P.W. Ames and Stephen A. Vavasis} } @unpublished {ElkinKeiVavasis, title = {Convex relaxation for finding planted influential nodes in a social network}, year = {2013}, note = {Arxiv link: }, author = {L. Elkin and Ting Kei Pong and S. Vavasis} } @article {GillisVavasis, title = {Fast and Robust Recursive Algorithms for Separable Nonnegative Matrix Factorization}, journal = {IEEE Trans. Pattern Analysis and Machine Intelligence}, volume = {36}, year = {2013}, note = {Arxiv link: }, pages = {698-714}, author = {N. Gillis and S. A. Vavasis} } @article {DoanVavasis, title = {Finding approximately rank-one submatrices with the nuclear norm and $\ell_1$ norm}, journal = {SIAM J. Optimiz.}, volume = {23}, year = {2013}, note = {Arxiv link: }, pages = {2502-2540}, author = {X. V. Doan and S. Vavasis} } @article {DoanTohVavasis, title = {A proximal point algorithm for sequential feature extraction applications}, journal = {SIAM J. Sci. Comput.}, year = {2013}, note = {}, pages = {A517-A540}, author = {X. V. Doan and K.-C. Toh and S. Vavasis} } @unpublished {divdiff, title = {Some notes on applying computational divided differencing in optimization}, year = {2013}, note = {Arxiv link: }, author = {S. Vavasis} } @unpublished {KarimiVavasisDet, title = {Detecting and correcting loss of independence in nonlinear conjugate gradient}, year = {2012}, note = {Arxiv link: }, author = {S. Karimi and S. Vavasis} } @conference {SastryShontzVavasis, title = {A log-barrier method for mesh quality improvement}, booktitle = {Proceedings of 20th International Meshing Roundtable}, year = {2012}, note = { appeared in Proceedings of the 2011 International Meshing Roundtable. }, pages = {329-346}, publisher = {Springer}, organization = {Springer}, author = {S. Sastry and S. Shontz and S. Vavasis} } @article {gun2, title = {A condition number analysis of an algorithm for solving a system of polynomial equations with one degree of freedom}, journal = {SIAM J. Sci. Comput.}, volume = {33}, year = {2011}, pages = {433-454}, author = {Gun Srijuntongsiri and Stephen Vavasis} } @article {AmesVavasis, title = {Nuclear norm minimization for the planted clique and biclique problems}, journal = {Mathematical Programming}, volume = {129}, number = {1}, year = {2011}, pages = {69-89}, author = {Ames, B. and Vavasis, S.} } @article {hyperelastic, title = {A robust solution procedure for hyperelastic solids with large boundary deformation}, journal = {\em Engineering with Computers}, volume = {28}, number = {2}, year = {2011}, pages = {135-147}, author = {S. Shontz and S. Vavasis} } @article {ereversal, title = {Analysis of and workarounds for element reversal for a finite element-based algorithm for warping triangular and tetrahedral meshes}, journal = {BIT Numerical Mathematics}, volume = {50}, number = {4}, year = {2010}, pages = {863-884}, author = {S. Shontz and S. Vavasis} } @article {nmfnphard, title = {On the complexity of nonnegative matrix factorization}, journal = {SIAM J. Optim.}, volume = {20}, number = {3}, year = {2009}, pages = {1364-1377}, author = {Stephen A. Vavasis} } @article {linesurf, title = {A Condition Number Analysis of a Line-Surface Intersection Algorithm}, journal = {SIAM J. Sci. Comput.}, volume = {30}, year = {2008}, pages = {1064-1081}, author = {Gun Srijuntongsiri and Stephen A. Vavasis} } @unpublished {secant, title = {A new secant method for unconstrained optimization}, year = {2008}, note = {Archived in arxiv.org, 0808.2316}, author = {Stephen A. Vavasis} } @conference {r1d, title = {Nonnegative matrix factorization via rank-one downdating}, booktitle = {Proceedings of the 2008 International Conference on Machine Learning}, year = {2008}, note = {Proceedings published online at \urlhttp://icml2008.cs.helsinki.fi/abstracts.shtml, Preliminary version of the full paper in arxiv.org, 0808.0120}, author = {Michael Biggs and Ali Ghodsi and Stephen A. Vavasis} } @article {femsupptree, title = {Solving Elliptic Finite Element Systems in Near-Linear Time with Support Preconditioners}, journal = {SIAM J. Numer. Anal.}, volume = {46}, year = {2008}, pages = {3264-3284}, author = {E. Boman and B. Hendrickson and S. Vavasis} } @unpublished {surfsurf, title = {A Condition Number Analysis of a Surface-Surface Intersection Algorithm}, year = {2007}, note = {Archived in arxiv.org, 0711.4656}, author = {Gun Srijuntongsiri and Stephen A. Vavasis} } @unpublished {polybasis, title = {Properties of polynomial bases used in line-surface intersection algorithm}, year = {2007}, note = {Archived in arxiv.org, 0707.1515; submitted to ICCEE 2008 Conference}, author = {Gun Srijuntongsiri and Stephen A. Vavasis} } @article {pinw, title = {An algorithm for two-dimensional mesh generation based on the pinwheel tiling}, journal = {SIAM J. Scientific Computing}, volume = {28}, year = {2006}, pages = {1533-1562}, author = {P. Ganguly and S. Vavasis and K. Papoulia} } @unpublished {roots, title = {A conjecture that the roots of a univariate polynomial lie in a union of annuli}, year = {2006}, note = {Archived in arxiv.org, math.CV/0606194}, author = {S. Vavasis} } @article {spatialconv, title = {Spatial convergence of crack nucleation using a cohesive finite element model on a pinwheel-based mesh}, journal = {Internat. J. Numer. Meth. Eng.}, volume = {67}, year = {2006}, pages = {1-16}, abstract = {

Spatial convergence of crack nucleation using a cohesive finite-element model on a pinwheel-based mesh by , Stephen A. Vavasis, Pritam Ganguly

We consider the use of initially rigid cohesive interface models in a two-dimensional dynamic finite-element solution of a fracture process. Our focus is on convergence of finite-element solutions to a solution of the undiscretized medium as the mesh spacing Δx (and therefore time-step Δt) tends to zero. We propose the use of pinwheel meshes, which possess the {\textquoteleft}isoperimetric property{\textquoteright} that for any curve C in the computational domain, there is an approximation to C using mesh edges that tends to C including a correct representation of its length, as the grid size tends to zero. We suggest that the isoperimetric property is a necessary condition for any possible spatial convergence proof in the general case that the crack path is not known in advance. Conversely, we establish that if the pinwheel mesh is used, the discrete interface first activated in the finite-element model will converge to the initial crack in the undiscretized medium. Finally, we carry out a mesh refinement experiment to check convergence of both nucleation and propagation. Our results indicate that the crack path computed in the pinwheel mesh is more stable as the mesh is refined compared to other types of meshes.}, author = {K. Papoulia and S. Vavasis and P. Ganguly} } @article {macaulay, title = {Accurate solution of polynomial equations using Macaulay resultant matrices}, journal = {Mathematics of Computation}, volume = {74}, year = {2005}, pages = {221-262}, author = {G. J{\'o}nsson and S. Vavasis} } @article {HowleVava2, title = {An iterative method for solving complex-symmetric systems arising in electrical power modeling}, journal = {SIAM J. Matrix Analysis App.}, volume = {26}, year = {2005}, pages = {1150-1178}, author = {V. E. Howle and S. A. Vavasis} } @article {timecont2, title = {Obtaining initially rigid cohesive finite element models that are temporally convergent}, journal = {Engineering Fracture Mechanics}, volume = {72}, year = {2005}, pages = {2247-2267}, author = {Chin-Hang Sam and K. Papoulia and S. Vavasis} } @article {Jonsson1, title = {Solving polynomials with small leading coefficients}, journal = {SIAM J. Matrix Analysis App.}, volume = {26}, year = {2005}, pages = {400-414}, author = {G. J{\'o}nsson and S. Vavasis} } @unpublished {sparsesdp, title = {A Fully Sparse Implementation of a Primal- Dual Interior-Point Potential Reduction Method for Semidefinite Programming}, year = {2004}, note = {Archived in arxiv.org, cs.NA/0412009}, author = {Gun Srijuntongsiri and S. Vavasis} } @unpublished {Vava:bezier, title = {A Bernstein-B{\'e}zier Sufficient Condition for Invertibility of Polynomial Mappings}, year = {2003}, note = {Archived by arxiv.org, cs.NA/0308021}, author = {S. Vavasis} } @article {timecont, title = {Time continuity in cohesive finite element modeling}, journal = {International J. Numer. Meth. Eng.}, volume = {58}, number = {5}, year = {2003}, pages = {679-701}, author = {K. Papoulia and S. Vavasis and C.-H. Sam} } @article {BobrVava2, title = {Accurate Solution of Weighted Least Squares by Iterative Methods}, journal = {SIAM J. Matrix Anal. App.}, volume = {22}, year = {2001}, pages = {1153-1174}, author = {E. Y. Bobrovnikova and S. A. Vavasis} } @article {BobrVava3, title = {A Norm Bound for Projections with Complex Weights}, journal = {Linear Algebra and its Applications}, volume = {307}, year = {2000}, pages = {69-75}, author = {E. Bobrovnikova and S. Vavasis} } @article {MitchVava:QMG, title = {Quality Mesh Generation in Higher Dimensions}, journal = {SIAM J. Computing}, volume = {29}, year = {2000}, pages = {1334-1370}, author = {S. A. Mitchell and S. A. Vavasis} } @inbook {Vavasis:convex, title = {Convex Optimization}, booktitle = {Algorithms and Theory of Computation Handbook}, year = {1999}, publisher = {CRC Press}, organization = {CRC Press}, author = {S. A. Vavasis} } @unpublished {autodiff1, title = {A note on efficient computation of the gradient in semidefinite programming}, year = {1999}, note = {Preprint}, author = {S. Vavasis} } @article {MillerTengThurVava2, title = {Geometric Separators for Finite-Element Meshes}, journal = {SIAM J. Sci. Comput.}, volume = {19}, year = {1998}, pages = {364-386}, author = {G. L. Miller and S.-H. Teng and W. Thurston and S. Vavasis} } @article {DriscollVava, title = {Numerical conformal mapping using cross-ratios and Delaunay triangulation}, journal = {SIAM J. Sci. Comput.}, volume = {19}, year = {1998}, pages = {1783-1803}, author = {T. A. Driscoll and S. A. Vavasis} } @conference {HowleVava, title = {Preconditioning complex-symmetric layered systems arising in electrical power modeling}, booktitle = {Proceedings of the Copper Mountain Conference on Iterative Methods}, year = {1998}, author = {V. E. Howle and S. Vavasis} } @article {HoughVavasis, title = {Complete orthogonal decomposition for weighted least squares}, journal = {SIAM J. Matrix Anal. Appl.}, volume = {18}, year = {1997}, author = {P. Hough and S. Vavasis} } @article {MillerTengVavaThur1, title = {Separators for sphere-packings and nearest neighbor graphs}, journal = {J. ACM}, volume = {44}, year = {1997}, pages = {1-29}, author = {G. L. Miller and S.-H. Teng and W. Thurston and S. A. Vavasis} } @conference {MitchVava:asp, title = {An aspect ratio bound for triangulating a $d$-grid cut by a hyperplane (extended abstract)}, booktitle = {Proc. 12th ACM Symposium on Computational Geometry}, year = {1996}, pages = {48-57}, author = {S. A. Mitchell and S. Vavasis} } @article {VavaYe:basis, title = {Identifying an Optimal Basis in Linear Programming}, journal = {Annals of Operations Research}, volume = {62}, year = {1996}, pages = {565{\textendash}572}, author = {S. A. Vavasis and Y. Ye} } @article {VavaYe:long, title = {A primal-dual interior point method whose running time depends only on the constraint matrix}, journal = {Mathematical Programming}, volume = {74}, year = {1996}, pages = {79{\textendash}120}, author = {S. A. Vavasis and Y. Ye} } @unpublished {qmg-url, title = {QMG: Software for finite-element mesh generation}, year = {1996}, note = {See \urlhttp://www.cs.cornell.edu/home/vavasis/qmg-home.html}, author = {S. A. Vavasis} } @inbook {VavaYe:indicator, title = {On the relationship between layered least squares and affine scaling steps}, booktitle = {Lectures in Applied Mathematics, volume 32}, year = {1996}, publisher = {American Mathematical Society}, organization = {American Mathematical Society}, address = {Providence, RI}, author = {S. A. Vavasis and Y. Ye} } @article {Vava:fe, title = {Stable finite elements for problems with wild coefficients}, journal = {SIAM J. Numer. Anal.}, volume = {33}, year = {1996}, pages = {890{\textendash}916}, author = {S. A. Vavasis} } @article {VavaYe:short, title = {Condition Numbers for Polyhedra with Real Number Data}, journal = {Operations Research Letters}, volume = {17}, year = {1995}, pages = {209-214}, author = {S. A. Vavasis and Y. Ye} } @conference {VavaYe:stoc, title = {An accelerated interior point method whose running time depends only on $A$ (extended abstract)}, booktitle = {Proceedings of the 26th Symposium on the Theory of Computing}, year = {1994}, pages = {512{\textendash}521}, publisher = {ACM Press}, organization = {ACM Press}, author = {S. A. Vavasis and Y. Ye} } @booklet {BondVava2, title = {Fast Wavelet Transforms for Matrices Arising from Boundary Element Methods}, number = {CTC94TR174}, year = {1994}, publisher = {Cornell Theory Center, Cornell University}, author = {D. M. Bond and S. A. Vavasis} } @article {Vava:stab, title = {Stable numerical algorithms for equilibrium systems}, journal = {SIAM J. Matrix Anal. Appl}, volume = {15}, year = {1994}, pages = {1108{\textendash}1131}, author = {S. A. Vavasis} } @article {SternVava2, title = {Active set methods for problems in column block angular form}, journal = {Matem{\'a}tica Aplicada e Computacional}, volume = {12}, year = {1993}, pages = {199{\textendash}226}, author = {J. M. Stern and S. A. Vavasis} } @inbook {MillerTengThurVava, title = {Automatic Mesh Partitioning}, booktitle = {Graph Theory and Sparse Matrix Computation}, year = {1993}, publisher = {Springer Verlag}, organization = {Springer Verlag}, author = {G. Miller and S.-H. Teng and W. Thurston and S. Vavasis}, editor = {Alan George and John Gilbert and Joseph W. H. Liu} } @article {Vava:black, title = {Black-box complexity of local minimization}, journal = {SIAM Journal on Optimization}, volume = {3}, year = {1993}, pages = {60{\textendash}80}, author = {S. A. Vavasis} } @inbook {Vava:chapter, title = {Complexity issues in global optimization: A survey}, booktitle = {Handbook for Global Optimization}, year = {1993}, publisher = {Kluwer Academic Publishers}, organization = {Kluwer Academic Publishers}, author = {Vavasis, S. A.}, editor = {R. Horst and P. M. Pardalos} } @article {SternVava, title = {Nested dissection for sparse nullspace bases}, journal = {simax}, volume = {14}, year = {1993}, pages = {766{\textendash}775}, author = {J. M. Stern and S. A. Vavasis} } @inbook {Vava:poly, title = {Polynomial time weak approximation algorithms for quadratic programming}, booktitle = {Complexity in Numerical Optimization}, year = {1993}, pages = {490{\textendash}500}, publisher = {World Scientific}, organization = {World Scientific}, author = {Vavasis, S. A.}, editor = {P. M. Pardalos} } @inbook {Vava:concave, title = {On approximation algorithms for concave quadratic programming}, booktitle = {Recent Advances in Global Optimization}, year = {1992}, pages = {3{\textendash}18}, publisher = {Princeton University Press}, organization = {Princeton University Press}, address = {Princeton, N.J.}, author = {Vavasis, S. A.}, editor = {C. A. Floudas and P. M. Pardalos} } @article {Vava:indef, title = {Approximation algorithms for indefinite quadratic programming}, volume = {57}, year = {1992}, pages = {279{\textendash}311}, author = {Vavasis, S. A.} } @article {Vava:locindef, title = {Local minima for indefinite quadratic knapsack problems}, journal = {matpro}, volume = {54}, year = {1992}, pages = {127{\textendash}153}, author = {S. A. Vavasis} } @conference {BondVava, title = {Multigrid for mixed boundary integral equations}, booktitle = {Proc. 1992 Copper Mountain Conference on Iterative Methods}, year = {1992}, author = {D. M. Bond and S. A. Vavasis} } @article {Vava:precond, title = {Preconditioners for boundary integral equations}, volume = {13}, year = {1992}, pages = {905{\textendash}925}, author = {S. A. Vavasis} } @conference {MitchVava, title = {Quality mesh generation in three dimensions}, booktitle = {Proceedings of the ACM Computational Geometry Conference}, year = {1992}, note = {Also appeared as Cornell C.S. TR 92-1267}, pages = {212{\textendash}221}, author = {S. A. Mitchell and S. A. Vavasis} } @article {Vava:partition, title = {Automatic domain partitioning in three dimensions}, volume = {12}, year = {1991}, pages = {950{\textendash}970}, author = {S. A. Vavasis} } @booklet {HsuLeanLiuVava, title = {A boundary element method for three-dimensional free surface flow}, number = {X9100283}, year = {1991}, publisher = {Xerox}, author = {H.-W. Hsu and M. H. Lean and P. L.-F. Liu and S. A. Vavasis} } @conference {MillVava, title = {Density graphs and separators}, booktitle = {Proc. SIAM-ACM Symposium on Discrete Algorithms}, year = {1991}, author = {G. L. Miller and S. A. Vavasis} } @book {Vava:book, title = {Nonlinear Optimization: Complexity Issues}, year = {1991}, publisher = {Oxford University Press}, organization = {Oxford University Press}, address = {New York}, author = {Vavasis, S. A.} } @article {PardVava91, title = {Quadratic programming with one negative eigenvalue is NP-hard}, journal = {Journal of Global Optimization}, volume = {1}, year = {1991}, pages = {15{\textendash}22}, author = {P. M. Pardalos and S. A. Vavasis} } @article {MoreVava, title = {On the solution of concave knapsack problems}, journal = {matpro}, volume = {49}, year = {1991}, pages = {397{\textendash}411}, author = {J. J. Mor{\'e} and S. A. Vavasis} } @conference {MillerTengVava, title = {A unified geometric approach to graph separators}, booktitle = {Proceedings of the Symposium on Foundations of Computer Science}, year = {1991}, pages = {538{\textendash}547}, author = {G. L. Miller and S.-H. Teng and S. A. Vavasis} } @booklet {Vava:wavsurf, title = {A note on wavelet bases for two-dimensional surfaces}, number = {90-1157}, year = {1990}, publisher = {Department of Computer Science, Cornell University}, address = {Ithaca, NY}, author = {S. A. Vavasis} } @booklet {VavaZip90, title = {Proving polynomial-time for sphere-constrained quadratic programming}, number = {90-1182}, year = {1990}, publisher = {Department of Computer Science, Cornell University}, address = {Ithaca, NY}, author = {Vavasis, S. A. and Zippel, R.} } @article {Vava:qpnp, title = {Quadratic programming is in NP}, journal = {Information Processing Letters}, volume = {36}, year = {1990}, pages = {73{\textendash}77}, author = {Vavasis, S. A.} } @article {HirschPapaVava, title = {Exponential lower bounds for finding Brouwer fixed points}, journal = {Journal of Complexity}, volume = {5}, year = {1989}, pages = {379{\textendash}416}, author = {M. D. Hirsch and C. H. Papadimitriou and S. A. Vavasis} } @article {Vava:gepp, title = {Gaussian elimination with pivoting is P-complete}, journal = {SIAM Journal on Discrete Mathematics}, volume = {2}, year = {1989}, pages = {412{\textendash}423}, author = {S. A. Vavasis} } @conference {HirschVava, title = {Exponential lower bounds for finding Brouwer fixed points}, booktitle = {Proceedings 28th Symposium on Foundations of Computer Science}, year = {1987}, pages = {401{\textendash}-410}, publisher = {IEEE Computer Society Press}, organization = {IEEE Computer Society Press}, author = {M. D. Hirsch and S. A. Vavasis} } @inbook {LeibVava, title = {Analysis of polynomial approximation algorithms for constraint expressions}, booktitle = {Theoretical Computer Science: 6th GI conference, Dortmund}, series = {Lecture Notes in Computer Science}, volume = {145}, year = {1982}, publisher = {Springer Verlag}, organization = {Springer Verlag}, address = {Berlin}, author = {K. J. Lieberherr and S. A. Vavasis}, editor = {A. B. Cremers and H. P. Kriegel} }