-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathref_optimization.bib
4120 lines (3809 loc) · 339 KB
/
ref_optimization.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
@article{abdelmalekL1SolutionOverdetermined1980,
title = {L1 {{Solution}} of {{Overdetermined Systems}} of {{Linear Equations}}},
author = {Abdelmalek, Nabih N.},
year = {1980},
month = jun,
journal = {ACM Trans. Math. Softw.},
volume = {6},
number = {2},
pages = {220--227},
issn = {0098-3500},
doi = {10.1145/355887.355894},
url = {http://doi.acm.org/10.1145/355887.355894},
urldate = {2018-08-01}
}
@article{abdelmalekLinearL1Approximation1971,
title = {Linear {{L1 Approximation}} for a {{Discrete Point Set}} and {{L1 Solutions}} of {{Overdetermined Linear Equations}}},
author = {Abdelmalek, Nabih N.},
year = {1971},
month = jan,
journal = {J. ACM},
volume = {18},
number = {1},
pages = {41--47},
issn = {0004-5411},
doi = {10.1145/321623.321628},
url = {http://doi.acm.org/10.1145/321623.321628},
urldate = {2018-08-01}
}
@book{absilOptimizationAlgorithmsMatrix2007,
title = {Optimization {{Algorithms}} on {{Matrix Manifolds}}},
author = {Absil, P.-A. and Mahony, R. and Sepulchre, Rodolphe},
year = {2007},
month = dec,
publisher = {Princeton University Press},
address = {Princeton, N.J. ; Woodstock},
url = {https://sites.uclouvain.be/absil/amsbook/},
isbn = {978-0-691-13298-3},
langid = {english}
}
@article{agrawalDifferentiableConvexOptimization2019,
title = {Differentiable {{Convex Optimization Layers}}},
author = {Agrawal, Akshay and Amos, Brandon and Barratt, Shane and Boyd, Stephen and Diamond, Steven and Kolter, Zico},
year = {2019},
month = oct,
journal = {arXiv:1910.12430 [cs, math, stat]},
eprint = {1910.12430},
primaryclass = {cs, math, stat},
url = {http://arxiv.org/abs/1910.12430},
urldate = {2021-09-17},
abstract = {Recent work has shown how to embed differentiable optimization problems (that is, problems whose solutions can be backpropagated through) as layers within deep learning architectures. This method provides a useful inductive bias for certain problems, but existing software for differentiable optimization layers is rigid and difficult to apply to new settings. In this paper, we propose an approach to differentiating through disciplined convex programs, a subclass of convex optimization problems used by domain-specific languages (DSLs) for convex optimization. We introduce disciplined parametrized programming, a subset of disciplined convex programming, and we show that every disciplined parametrized program can be represented as the composition of an affine map from parameters to problem data, a solver, and an affine map from the solver's solution to a solution of the original problem (a new form we refer to as affine-solver-affine form). We then demonstrate how to efficiently differentiate through each of these components, allowing for end-to-end analytical differentiation through the entire convex program. We implement our methodology in version 1.1 of CVXPY, a popular Python-embedded DSL for convex optimization, and additionally implement differentiable layers for disciplined convex programs in PyTorch and TensorFlow 2.0. Our implementation significantly lowers the barrier to using convex optimization problems in differentiable programs. We present applications in linear machine learning models and in stochastic control, and we show that our layer is competitive (in execution time) compared to specialized differentiable solvers from past work.},
archiveprefix = {arxiv}
}
@inproceedings{ahmadiDSOSSDSOSOptimization2014,
title = {{{DSOS}} and {{SDSOS}} Optimization: {{LP}} and {{SOCP-based}} Alternatives to Sum of Squares Optimization},
shorttitle = {{{DSOS}} and {{SDSOS}} Optimization},
booktitle = {2014 48th {{Annual Conference}} on {{Information Sciences}} and {{Systems}} ({{CISS}})},
author = {Ahmadi, A. A. and Majumdar, A.},
year = {2014},
month = mar,
pages = {1--5},
doi = {10.1109/CISS.2014.6814141},
abstract = {Sum of squares (SOS) optimization has been a powerful and influential addition to the theory of optimization in the past decade. Its reliance on relatively large-scale semidefinite programming, however, has seriously challenged its ability to scale in many practical applications. In this paper, we introduce DSOS and SDSOS optimization as more tractable alternatives to sum of squares optimization that rely instead on linear programming and second order cone programming. These are optimization problems over certain subsets of sum of squares polynomials and positive semidefinite matrices and can be of potential interest in general applications of semidefinite programming where scalability is a limitation.}
}
@techreport{ahmadiSumSquaresSOS,
type = {Lecture Notes},
title = {Sum of {{Squares}} ({{SOS}}) {{Techniques}}: {{An Introduction}}},
author = {Ahmadi, Amir Ali},
institution = {Princeton University},
url = {https://www.princeton.edu/~aaa/Public/Teaching/ORF523/S16/ORF523_S16_Lec15.pdf}
}
@article{alarieTwoDecadesBlackbox2021,
title = {Two Decades of Blackbox Optimization Applications},
author = {Alarie, St{\'e}phane and Audet, Charles and Gheribi, A{\"i}men E. and Kokkolaras, Michael and Le Digabel, S{\'e}bastien},
year = {2021},
month = jan,
journal = {EURO Journal on Computational Optimization},
volume = {9},
pages = {100011},
issn = {2192-4406},
doi = {10.1016/j.ejco.2021.100011},
url = {https://www.sciencedirect.com/science/article/pii/S2192440621001386},
urldate = {2022-06-02},
abstract = {This article reviews blackbox optimization applications of direct search optimization methods over the past twenty years. Emphasis is placed on the Mesh Adaptive Direct Search (Mads) derivative-free optimization algorithm. The main focus is on applications in three specific fields: energy, materials science, and computational engineering design. Nevertheless, other applications in science and engineering, including patents, are also considered. The breadth of applications demonstrates the versatility of Mads and highlights the evolution of its accompanying software NOMAD as a standard tool for blackbox optimization.},
langid = {english}
}
@misc{AlgorithmSCSDocumentation,
title = {Algorithm --- {{SCS}} 3.2.4 Documentation},
url = {https://www.cvxgrp.org/scs/algorithm/index.html},
urldate = {2024-01-19}
}
@phdthesis{allendeMathematicalProgramsEquilibrium2006,
title = {Mathematical Programs with Equilibrium Constraints: Solution Techniques from Parametric Optimization},
author = {Allende, Gamayqzel Bouza},
year = {2006},
address = {Enschede, NL},
url = {https://ris.utwente.nl/ws/files/6042785/thesis_Allende.pdf},
abstract = {Equilibrium constrained problems form a special class of mathematical programs where the decision variables satisfy a finite number of constraints together with an equilibrium condition. Optimization problems with a variational inequality constraint, bilevel problems and semi-infinite programs can be seen as particular cases of equilibrium constrained problems. Such models appear in many practical applications. Equilibrium constraint problems can be written in bilevel form with possi- bly a finite number of extra inequality constraints. This opens the way to solve these programs by applying the so-called Karush-Kuhn-Tucker approach. Here the lower level problem of the bilevel program is replaced by the Karush-Kuhn- Tucker condition, leading to a mathematical program with complementarity con- straints (MPCC). Unfortunately, MPCC problems cannot be solved by classical algorithms since they do not satisfy the standard regularity conditions. To solve MPCCs one has tried to conceive appropriate modifications of standard methods. For example sequential quadratic programming, penalty algorithms, regulariza- tion and smoothing approaches. The aim of this thesis is twofold. First, as a basis, MPCC problems will be investigated from a structural and generical viewpoint. We concentrate on a special parametric smoothing approach to solve these programs. The convergence behavior of this method is studied in detail. Although the smoothing approach is widely used, our results on existence of solutions and on the rate of convergence are new. We also derive (for the first time) genericity results for the set of minimizers (generalized critical points) for one-parametric MPCC. In a second part we will consider the MPCC problem obtained by applying the KKT-approach to equilibrium constrained programs and bilevel problems. We will analyze the generic structure of the resulting MPCC programs and adapt the related smoothing method to these particular cases. All corresponding results are new.},
school = {Universiteit Twente}
}
@phdthesis{amosDifferentiableOptimizationBasedModeling2019,
title = {Differentiable {{Optimization-Based Modeling}} for {{Machine Learning}}},
author = {Amos, Brandon},
year = {2019},
url = {https://github.com/bamos/thesis},
school = {Carnegie Mellon University}
}
@misc{amosDifferentiableOptimizationBasedModeling2019a,
type = {Thesis Defense},
title = {Differentiable {{Optimization-Based Modeling}} for {{Machine Learning}}},
author = {Amos, Brandon},
year = {2019},
address = {Carnegie Mellon University}
}
@misc{anderssonCasADiDocumentation2023,
title = {{{CasADi Documentation}}},
author = {Andersson, Joel and Gillis, Joris and Horn, Greg},
year = {2023},
month = nov,
url = {https://github.com/casadi/casadi/wiki/Onboarding-Guide}
}
@article{andreaniSolutionMathematicalProgramming2001,
title = {On the Solution of Mathematical Programming Problems with Equilibrium Constraints},
author = {Andreani, Roberto and Mart{\i}{\textasciiacute}nez, Jos{\'e} Mario},
year = {2001},
month = dec,
journal = {Mathematical Methods of Operations Research},
volume = {54},
number = {3},
pages = {345--358},
issn = {1432-5217},
doi = {10.1007/s001860100158},
url = {https://doi.org/10.1007/s001860100158},
urldate = {2022-03-22},
abstract = {Mathematical programming problems with equilibrium constraints (MPEC) are nonlinear programming problems where the constraints have a form that is analogous to first-order optimality conditions of constrained optimization. We prove that, under reasonable sufficient conditions, stationary points of the sum of squares of the constraints are feasible points of the MPEC. In usual formulations of MPEC all the feasible points are nonregular in the sense that they do not satisfy the Mangasarian-Fromovitz constraint qualification of nonlinear programming. Therefore, all the feasible points satisfy the classical Fritz-John necessary optimality conditions. In principle, this can cause serious difficulties for nonlinear programming algorithms applied to MPEC. However, we show that most feasible points do not satisfy a recently introduced stronger optimality condition for nonlinear programming. This is the reason why, in general, nonlinear programming algorithms are successful when applied to MPEC.},
langid = {english}
}
@misc{anjosConicOptimizationBasics2014,
title = {Conic {{Optimization The Basics}}, Some {{Fundamental Results}}, and {{Recent Developments}}},
author = {Anjos, Miguel F.},
year = {2014},
month = apr,
url = {http://cost-td1207.zib.de/sites/default/files/miguel_f_anjos.pdf}
}
@article{arnstromDualActiveSetSolver2022a,
title = {A {{Dual Active-Set Solver}} for {{Embedded Quadratic Programming Using Recursive LDL}}{{{\textsuperscript{T}}}}{\textsuperscript{\$}} {{Updates}}},
author = {Arnstr{\"o}m, Daniel and Bemporad, Alberto and Axehill, Daniel},
year = {2022},
month = aug,
journal = {IEEE Transactions on Automatic Control},
volume = {67},
number = {8},
pages = {4362--4369},
issn = {1558-2523},
doi = {10.1109/TAC.2022.3176430},
abstract = {In this technical article, we present a dual active-set solver for quadratic programming that has properties suitable for use in embedded model predictive control applications. In particular, the solver is efficient, can easily be warm started, and is simple to code. Moreover, the exact worst-case computational complexity of the solver can be determined offline and, by using outer proximal-point iterations, ill-conditioned problems can be handled in a robust manner.}
}
@article{arnstromLinearProgrammingMethod2022a,
title = {A {{Linear Programming Method Based}} on {{Proximal-Point Iterations With Applications}} to {{Multi-Parametric Programming}}},
author = {Arnstr{\"o}m, Daniel and Bemporad, Alberto and Axehill, Daniel},
year = {2022},
journal = {IEEE Control Systems Letters},
volume = {6},
pages = {2066--2071},
issn = {2475-1456},
doi = {10.1109/LCSYS.2021.3138218},
abstract = {We propose a linear programming method that is based on active-set changes and proximal-point iterations. The method solves a sequence of least-distance problems using a warm-started quadratic programming solver that can reuse internal matrix factorizations from the previously solved least-distance problem. We show that the proposed method terminates in a finite number of iterations and that it outperforms state-of-the-art LP solvers in scenarios where an extensive number of small/medium scale LPs need to be solved rapidly, occurring in, for example, multi-parametric programming algorithms. In particular, we show how the proposed method can accelerate operations such as redundancy removal, computation of Chebyshev centers and solving linear feasibility problems.}
}
@phdthesis{axehillIntegerQuadraticProgramming2008,
title = {Integer {{Quadratic Programming}} for {{Control}} and {{Communication}}},
author = {Axehill, Daniel},
year = {2008},
address = {Link{\"o}ping, Sweden},
url = {http://urn.kb.se/resolve?urn=urn:nbn:se:liu:diva-10642},
urldate = {2022-12-07},
abstract = {DiVA portal is a finding tool for research publications and student theses written at the following 50 universities and research institutions.},
langid = {english},
school = {Link{\"o}ping University}
}
@article{baayenHiddenInvariantConvexity2022,
title = {Hidden Invariant Convexity for Global and Conic-Intersection Optimality Guarantees in Discrete-Time Optimal Control},
author = {Baayen, Jorn H. and Postek, Krzysztof},
year = {2022},
month = feb,
journal = {Journal of Global Optimization},
volume = {82},
number = {2},
pages = {263--281},
issn = {1573-2916},
doi = {10.1007/s10898-021-01072-5},
url = {https://doi.org/10.1007/s10898-021-01072-5},
urldate = {2022-03-25},
abstract = {Non-convex discrete-time optimal control problems in, e.g., water or power systems, typically involve a large number of variables related through nonlinear equality constraints. The ideal goal is to find a globally optimal solution, and numerical experience indicates that algorithms aiming for Karush--Kuhn--Tucker points often find solutions that are indistinguishable from global optima. In our paper, we provide a theoretical underpinning for this phenomenon, showing that on a broad class of problems the objective can be shown to be an invariant convex function (invex function) of the control decision variables when state variables are eliminated using implicit function theory. In this way, optimality guarantees can be obtained, the exact nature of which depends on the position of the solution within the feasible set. In a numerical example, we show how high-quality solutions are obtained with local search for a river control problem where invexity holds.},
langid = {english}
}
@book{balasDisjunctiveProgramming2018,
title = {Disjunctive {{Programming}}},
author = {Balas, Egon},
year = {2018},
month = dec,
publisher = {Springer},
address = {Cham},
url = {https://doi.org/10.1007/978-3-030-00148-3},
isbn = {978-3-030-00147-6},
langid = {english}
}
@misc{bambadePROXQPEfficientVersatile2023,
title = {{{PROXQP}}: An {{Efficient}} and {{Versatile Quadratic Programming Solver}} for {{Real-Time Robotics Applications}} and {{Beyond}}},
shorttitle = {{{PROXQP}}},
author = {Bambade, Antoine and Schramm, Fabian and Kazdadi, Sarah El and Caron, St{\'e}phane and Taylor, Adrien and Carpentier, Justin},
year = {2023},
month = sep,
url = {https://inria.hal.science/hal-04198663},
urldate = {2024-01-18},
abstract = {Convex Quadratic programming (QP) has become a core component in the modern engineering toolkit, particularly in robotics, where QP problems are legions, ranging from real-time whole-body controllers to planning and estimation algorithms. Many of those QPs need to be solved at high frequency. Meeting timing requirements requires taking advantage of as many structural properties as possible for the problem at hand. For instance, it is generally crucial to resort to warm-starting to exploit the resemblance of consecutive control iterations. While a large range of off-the-shelf QP solvers is available, only a few are suited to exploit problem structure and warm-starting capacities adequately. In this work, we propose the PROXQP algorithm, a new and efficient QP solver that exploits QP structures by leveraging primal-dual augmented Lagrangian techniques. For convex QPs, PROXQP features a global convergence guarantee to the closest feasible QP, an essential property for safe closedloop control. We illustrate its practical performance on various standard robotic and control experiments, including a real-world closed-loop model predictive control application. While originally tailored for robotics applications, we show that PROXQP also performs at the level of state of the art on generic QP problems, making PROXQP suitable for use as an off-the-shelf solver for regular applications beyond robotics.},
langid = {english}
}
@book{bankNonLinearParametricOptimization1983,
title = {Non-{{Linear Parametric Optimization}}},
author = {Bank, B. and Guddat, J. and Klatte, D. and Kummer, B. and Tammer, K.},
year = {1983},
publisher = {Birkh{\"a}user},
address = {Basel},
url = {https://doi.org/10.1007/978-3-0348-6328-5},
isbn = {978-3-0348-6330-8}
}
@book{bardPracticalBilevelOptimization1998,
title = {Practical {{Bilevel Optimization}}: {{Algorithms}} and {{Applications}}},
shorttitle = {Practical {{Bilevel Optimization}}},
author = {Bard, Jonathan F.},
year = {1998},
month = dec,
series = {Nonconvex {{Optimization}} and {{Its Applications}}},
number = {30},
publisher = {Springer},
address = {Dordrecht ; Boston},
url = {https://link.springer.com/book/10.1007/978-1-4757-2836-1},
isbn = {978-0-7923-5458-1},
langid = {english}
}
@article{barrodaleImprovedAlgorithmDiscrete1973,
title = {An {{Improved Algorithm}} for {{Discrete}} \$l\_1 \$ {{Linear Approximation}}},
author = {Barrodale, I. and Roberts, F.},
year = {1973},
month = oct,
journal = {SIAM Journal on Numerical Analysis},
volume = {10},
number = {5},
pages = {839--848},
issn = {0036-1429},
doi = {10.1137/0710069},
url = {https://epubs.siam.org/doi/abs/10.1137/0710069},
urldate = {2018-08-01},
abstract = {By modifying the simplex method of linear programming, we are able to present an algorithm for \$l\_1 \$-approximation which appears to, be superior computationally to any other known algorithm for this problem.}
}
@article{bartelsMinimizationTechniquesPiecewise1978,
title = {Minimization {{Techniques}} for {{Piecewise Differentiable Functions}}: {{The}} \$l\_1\$ {{Solution}} to an {{Overdetermined Linear System}}},
shorttitle = {Minimization {{Techniques}} for {{Piecewise Differentiable Functions}}},
author = {Bartels, R. and Conn, A. and Sinclair, J.},
year = {1978},
month = apr,
journal = {SIAM Journal on Numerical Analysis},
volume = {15},
number = {2},
pages = {224--241},
issn = {0036-1429},
doi = {10.1137/0715015},
url = {https://epubs.siam.org/doi/10.1137/0715015},
urldate = {2018-08-01},
abstract = {A new algorithm is presented for computing a vector x which satisfies a given m by \$n(m {$>$} n {\textbackslash}geqq 2)\$ linear system in the sense that the \$l\_1 \$ norm is minimized. That is, if A is a matrix having m columns \$a\_1 , {\textbackslash}cdots ,a\_m \$ each of length n, and b is a vector with components \${\textbackslash}beta \_1 , {\textbackslash}cdots ,{\textbackslash}beta \_m \$, then x is selected so that {\textbackslash}[{\textbackslash}phi (x) = {\textbar}{\textbar}A\^{}T x - b{\textbar}{\textbar}\_1 = {\textbackslash}sum \_\{i = 1\}\^{}m \{{\textbackslash}left{\textbar}a\_i\^{}T x - {\textbackslash}beta \_i {\textbackslash}right{\textbar}\} {\textbackslash}] is as small as possible. Such solutions are of interest for the ``robust'' fitting of a linear model to data.The function \${\textbackslash}phi \$ is directly minimized in a finite number of steps using techniques borrowed from Conn's approach toward minimizing piecewise differentiable functions. In these techniques if x is any point and \$A\_{\textbackslash}mathcal \{Z\} \$ stands for the submatrix consisting of those columns \$a\_j\$ from A for which the corresponding residuals \$a\_j\^{}T x - {\textbackslash}beta \_j \$ are zero, then the discontinuities in the gradient of \${\textbackslash}phi \$ at x are handled by making use of the projector onto the null space of \$A\_{\textbackslash}mathcal \{Z\}\^{}T \$.Attention has been paid both to numerical stability and efficiency in maintaining and updating a factorization of \$A\_{\textbackslash}mathcal\{Z\} \$ from which the necessary projector is obtainable.The algorithm compares favorably with the best so far reported for the linear \$l\_1\$ problem, and it can easily be extended to handle linear constraints.}
}
@book{beckFirstOrderMethodsOptimization2017,
title = {First-{{Order Methods}} in {{Optimization}}},
author = {Beck, Amir},
year = {2017},
month = oct,
series = {{{MOS-SIAM Series}} on {{Optimization}}},
publisher = {{Society for Industrial and Applied Mathematics}},
doi = {10.1137/1.9781611974997},
url = {https://sites.google.com/site/amirbeck314/books},
urldate = {2021-03-29},
abstract = {This book, as the title suggests, is about first-order methods, namely, methods that exploit information on values and gradients/subgradients (but not Hessians) of the functions comprising the model under consideration. First-order methods go back to 1847 with the work of Cauchy on the steepest descent method. With the increase in the amount of applications that can be modeled as large- or even huge-scale optimization problems, there has been a revived interest in using simple methods that require low iteration cost as well as low memory storage.},
isbn = {978-1-61197-498-0}
}
@book{beckIntroductionNonlinearOptimization2014,
title = {Introduction to {{Nonlinear Optimization}}},
author = {Beck, Amir},
year = {2014},
month = oct,
series = {{{MOS-SIAM Series}} on {{Optimization}}},
publisher = {{Society for Industrial and Applied Mathematics}},
doi = {10.1137/1.9781611973655},
url = {https://sites.google.com/site/amirbeck314/books},
urldate = {2022-01-26},
abstract = {This book emerged from the idea that an optimization training should include three basic components: a strong theoretical and algorithmic foundation, familiarity with various applications, and the ability to apply the theory and algorithms on actual ``real-life'' problems. The book is intended to be the basis of such an extensive training. The mathematical development of the main concepts in nonlinear optimization is done rigorously, where a special effort was made to keep the proofs as simple as possible. The results are presented gradually and accompanied with many illustrative examples. Since the aim is not to give an encyclopedic overview, the focus is on the most useful and important concepts. The theory is complemented by numerous discussions on applications from various scientific fields such as signal processing, economics and localization. Some basic algorithms are also presented and studied to provide some flavor of this important aspect of optimization. Many topics are demonstrated by MATLAB programs, and ideally, the interested reader will find satisfaction in the ability of actually solving problems on his or her own. The book contains several topics that, compared to other classical textbooks, are treated differently. The following are some examples of the less common issues.},
isbn = {978-1-61197-364-8}
}
@article{behrendtTechnicalReportTotally2021,
title = {Technical {{Report}}: {{A Totally Asynchronous Algorithm}} for {{Tracking Solutions}} to {{Time-Varying Convex Optimization Problems}}},
shorttitle = {Technical {{Report}}},
author = {Behrendt, Gabriel and Hale, Matthew},
year = {2021},
month = oct,
journal = {arXiv:2110.06705 [math]},
eprint = {2110.06705},
primaryclass = {math},
url = {http://arxiv.org/abs/2110.06705},
urldate = {2022-05-10},
abstract = {This paper presents a decentralized algorithm for a team of agents to track time-varying fixed points that are the solutions to time-varying convex optimization problems. The algorithm is first-order, and it allows for total asynchrony in the communications and computations of all agents, i.e., all such operations can occur with arbitrary timing and arbitrary (finite) delays. Convergence rates are computed in terms of the communications and computations that agents execute, without specifying when they must occur. These rates apply to convergence to the minimum of each individual objective function, as well as agents' long-run behavior as their objective functions change. Then, to improve the usage of limited communication and computation resources, we optimize the timing of agents' operations relative to changes in their objective functions to minimize total fixed point tracking error over time. Simulation results are presented to illustrate these developments in practice and empirically assess their robustness to uncertainties in agents' update laws.},
archiveprefix = {arxiv}
}
@book{ben-talLecturesModernConvex2001,
title = {Lectures on {{Modern Convex Optimization}}: {{Analysis}}, {{Algorithms}}, and {{Engineering Applications}}},
shorttitle = {Lectures on {{Modern Convex Optimization}}},
author = {{Ben-Tal}, Aharon and Nemirovski, Arkadi},
year = {2001},
month = aug,
publisher = {{Society for Industrial and Applied Mathematics}},
address = {Philadelphia, PA},
abstract = {Here is a book devoted to well-structured and thus efficiently solvable convex optimization problems, with emphasis on conic quadratic and semidefinite programming. The authors present the basic theory underlying these problems as well as their numerous applications in engineering, including synthesis of filters, Lyapunov stability analysis, and structural design. The authors also discuss the complexity issues and provide an overview of the basic theory of state-of-the-art polynomial time interior point methods for linear, conic quadratic, and semidefinite programming. The book's focus on well-structured convex problems in conic form allows for unified theoretical and algorithmical treatment of a wide spectrum of important optimization problems arising in applications.},
isbn = {978-0-89871-491-3},
langid = {english}
}
@unpublished{ben-talLecturesModernConvex2023,
type = {Lecture Notes},
title = {Lectures on {{Modern Convex Optimization}} - 2020/2021/2022/2023 {{Analysis}}, {{Algorithms}}, {{Engineering Applications}}},
shorttitle = {Lectures on {{Modern Convex Optimization}}},
author = {{Ben-Tal}, Aharon and Nemirovski, Arkadi},
year = {2023},
address = {Technion \& Georgia Institute of Technology},
url = {https://www2.isye.gatech.edu/~nemirovs/LMCOLN2023Spring.pdf}
}
@book{ben-talRobustOptimization2009,
title = {Robust {{Optimization}}},
author = {{Ben-Tal}, Aharon and Ghaoui, Laurent El and Nemirovski, Arkadi},
year = {2009},
month = aug,
publisher = {Princeton University Press},
address = {Princeton},
abstract = {Robust optimization is still a relatively new approach to optimization problems affected by uncertainty, but it has already proved so useful in real applications that it is difficult to tackle such problems today without considering this powerful methodology. Written by the principal developers of robust optimization, and describing the main achievements of a decade of research, this is the first book to provide a comprehensive and up-to-date account of the subject. Robust optimization is designed to meet some major challenges associated with uncertainty-affected optimization problems: to operate under lack of full information on the nature of uncertainty; to model the problem in a form that can be solved efficiently; and to provide guarantees about the performance of the solution. The book starts with a relatively simple treatment of uncertain linear programming, proceeding with a deep analysis of the interconnections between the construction of appropriate uncertainty sets and the classical chance constraints (probabilistic) approach. It then develops the robust optimization theory for uncertain conic quadratic and semidefinite optimization problems and dynamic (multistage) problems. The theory is supported by numerous examples and computational illustrations. An essential book for anyone working on optimization and decision making under uncertainty, Robust Optimization also makes an ideal graduate textbook on the subject.},
isbn = {978-0-691-14368-2},
langid = {english}
}
@article{bergmannManoptJlOptimization2022,
title = {Manopt.Jl: {{Optimization}} on {{Manifolds}} in {{Julia}}},
shorttitle = {Manopt.Jl},
author = {Bergmann, Ronny},
year = {2022},
month = feb,
journal = {Journal of Open Source Software},
volume = {7},
number = {70},
pages = {3866},
issn = {2475-9066},
doi = {10.21105/joss.03866},
url = {https://joss.theoj.org/papers/10.21105/joss.03866},
urldate = {2022-02-11},
abstract = {Bergmann, R., (2022). Manopt.jl: Optimization on Manifolds in Julia. Journal of Open Source Software, 7(70), 3866, https://doi.org/10.21105/joss.03866},
langid = {english}
}
@article{bernsteinOnlinePrimalDualMethods2019,
title = {Online {{Primal-Dual Methods With Measurement Feedback}} for {{Time-Varying Convex Optimization}}},
author = {Bernstein, Andrey and Dall'Anese, Emiliano and Simonetto, Andrea},
year = {2019},
month = apr,
journal = {IEEE Transactions on Signal Processing},
volume = {67},
number = {8},
pages = {1978--1991},
issn = {1941-0476},
doi = {10.1109/TSP.2019.2896112},
abstract = {This paper addresses the design and analysis of feedback-based online algorithms to control systems or networked systems based on performance objectives and engineering constraints that may evolve over time. The emerging time-varying convex optimization formalism is leveraged to model optimal operational trajectories of the systems, as well as explicit local and network-level operational constraints. Departing from existing batch and feed-forward optimization approaches, the design of the algorithms capitalizes on an online implementation of primal-dual projected-gradient methods; the gradient steps are, however, suitably modified to accommodate feedback from the system in the form of measurements, hence, the term ``online optimization with feedback.'' By virtue of this approach, the resultant algorithms can cope with model mismatches in the algebraic representation of the system states and outputs, they avoid pervasive measurements of exogenous inputs, and they naturally lend themselves to a distributed implementation. Under suitable assumptions, analytical convergence claims are established in terms of dynamic regret. Furthermore, when the synthesis of the feedback-based online algorithms is based on a regularized Lagrangian function, Q-linear convergence to solutions of the time-varying optimization problem is shown.}
}
@book{bertsekasConstrainedOptimizationLagrange1996,
title = {Constrained {{Optimization}} and {{Lagrange Multiplier Methods}}},
author = {Bertsekas, Dimitri P. and Bertsekas, Dimitri P.},
year = {1996},
month = jan,
publisher = {Athena Scientific},
address = {Belmont, Mass},
url = {http://www.athenasc.com/lmultbook.html},
isbn = {978-1-886529-04-5},
langid = {english}
}
@misc{bertsekasConvexAnalysisOptimization2014,
type = {Lecture Slides},
title = {Convex Analysis and Optimization Based on 6.253 Class Lectures at the {{Mass}}. {{Institute}} of {{Technology}}, {{Cambridge}}, {{Mass}}, {{Spring}} 2014},
author = {Bertsekas, Dimitri P.},
year = {2014},
url = {http://web.mit.edu/dimitrib/www/home.html}
}
@book{bertsekasConvexOptimizationAlgorithms2015,
title = {Convex {{Optimization Algorithms}}},
author = {Bertsekas, Dimitri P.},
year = {2015},
month = feb,
publisher = {Athena Scientific},
address = {Nashua},
url = {http://www.athenasc.com/convexalgorithms.html},
isbn = {978-1-886529-28-1},
langid = {english}
}
@book{bertsekasConvexOptimizationTheory2009,
title = {Convex {{Optimization Theory}}},
author = {Bertsekas, Dimitri P.},
year = {2009},
month = jun,
publisher = {Athena Scientific},
address = {Belmont, Mass},
url = {http://web.mit.edu/dimitrib/www/Convex_Theory_Entire_Book.pdf},
isbn = {978-1-886529-31-1},
langid = {english}
}
@incollection{bertsekasIncrementalGradientSubgradient2012,
title = {Incremental Gradient, Subgradient, and Proximal Methods for Convex Optimization: A Survey},
booktitle = {Optimization for {{Machine Learning}}},
author = {Bertsekas, Dimitri P.},
year = {2012},
publisher = {MIT Press},
abstract = {The interplay between optimization and machine learning is one of the most important developments in modern computational science. Optimization formulations and methods are proving to be vital in designing algorithms to extract essential knowledge from huge volumes of data. Machine learning, however, is not simply a consumer of optimization technology but a rapidly evolving field that is itself generating new optimization ideas. This book captures the state of the art of the interaction between optimization and machine learning in a way that is accessible to researchers in both fields.Optimization approaches have enjoyed prominence in machine learning because of their wide applicability and attractive theoretical properties. The increasing complexity, size, and variety of today's machine learning models call for the reassessment of existing assumptions. This book starts the process of reassessment. It describes the resurgence in novel contexts of established frameworks such as first-order methods, stochastic approximations, convex relaxations, interior-point methods, and proximal methods. It also devotes attention to newer themes such as regularized optimization, robust optimization, gradient and subgradient methods, splitting techniques, and second-order methods. Many of these techniques draw inspiration from other fields, including operations research, theoretical computer science, and subfields of optimization. The book will enrich the ongoing cross-fertilization between the machine learning community and these other fields, and within the broader optimization community.},
isbn = {978-0-262-01646-9},
langid = {english}
}
@book{bertsekasNonlinearProgramming2016,
title = {Nonlinear {{Programming}}},
shorttitle = {Nonlinear {{Programming}}},
author = {Bertsekas, Dimitri},
year = {2016},
month = jun,
edition = {3rd},
publisher = {Athena Scientific},
address = {Belmont, Mass},
url = {http://www.athenasc.com/nonlinbook.html},
isbn = {978-1-886529-05-2},
langid = {english}
}
@book{bertsekasParallelDistributedComputation1997,
title = {Parallel and {{Distributed Computation}}: {{Numerical Methods}}},
shorttitle = {Parallel and {{Distributed Computation}}},
author = {Bertsekas, Dimitri P. and Tsitsiklis, John},
year = {1997},
month = jan,
publisher = {Athena Scientific},
address = {Belmont, Mass},
url = {http://www.athenasc.com/pdcbook.html},
abstract = {This highly acclaimed work, first published by Prentice Hall in 1989, is a comprehensive and theoretically sound treatment of parallel and distributed numerical methods. It focuses on algorithms that are naturally suited for massive parallelization, and it explores the fundamental convergence, rate of convergence, communication, and synchronization issues associated with such algorithms. This is an extensive book, which aside from its focus on parallel and distributed algorithms, contains a wealth of material on a broad variety of computation and optimization topics. Among its special features, the book: 1) Quantifies the performance of parallel algorithms, including the limitations imposed by the communication and synchronization penalties. 2) Describes communication algorithms for a variety of system architectures including tree, mesh, and hypercube. 3) Provides a comprehensive convergence analysis of asynchronous methods and a comparison with their asynchronous counterparts. 4) Cove},
isbn = {978-1-886529-01-4},
langid = {english}
}
@article{bertsekasProjectedNewtonMethods1982,
title = {Projected {{Newton Methods}} for {{Optimization Problems}} with {{Simple Constraints}}},
author = {Bertsekas, D.},
year = {1982},
month = mar,
journal = {SIAM Journal on Control and Optimization},
volume = {20},
number = {2},
pages = {221--246},
issn = {0363-0129},
doi = {10.1137/0320018},
url = {https://epubs.siam.org/doi/abs/10.1137/0320018},
urldate = {2019-04-11},
abstract = {We consider the problem \${\textbackslash}min {\textbackslash}\{ f(x){\textbar}x {\textbackslash}geqq 0{\textbackslash}\} \$, and propose algorithms of the form \$x\_\{k + 1\} = [x\_k - {\textbackslash}alpha \_k D\_k {\textbackslash}nabla f(x\_k )]\^{} + \$, where \$[ {\textbackslash}cdot ]\^{} + \$ denotes projection on the positive orthant, \${\textbackslash}alpha \_k \$ is a stepsize chosen by an Armijo-like rule and \$D\_k \$ is a positive definite symmetric matrix which is partly diagonal. We show that \$D\_k \$ can be calculated simply on the basis of second derivatives of f so that the resulting Newton-like algorithm has a typically superlinear rate of convergence. With other choices of \$D\_k \$ convergence at a typically linear rate is obtained. The algorithms are almost as simple as their unconstrained counterparts. They are well suited for problems of large dimension such as those arising in optimal control while being competitive with existing methods for low-dimensional problems. The effectiveness of the Newton-like algorithm is demonstrated via computational examples involving as many as 10,000 variables. Extensions to general linearly constrained problems are also provided. These extensions utilize a notion of an active generalized rectangle patterned after the notion of an active manifold used in manifold suboptimization methods. By contrast with these methods, many constraints can be added or subtracted from the binding set at each iteration without the need to solve a quadratic programming problem.}
}
@article{bertsekasPseudonormalityLagrangeMultiplier2002,
title = {Pseudonormality and a {{Lagrange Multiplier Theory}} for {{Constrained Optimization}}},
author = {Bertsekas, D.P. and Ozdaglar, A.E.},
year = {2002},
month = aug,
journal = {Journal of Optimization Theory and Applications},
volume = {114},
number = {2},
pages = {287--343},
issn = {1573-2878},
doi = {10.1023/A:1016083601322},
url = {https://doi.org/10.1023/A:1016083601322},
urldate = {2022-05-17},
abstract = {We consider optimization problems with equality, inequality, and abstract set constraints, and we explore various characteristics of the constraint set that imply the existence of Lagrange multipliers. We prove a generalized version of the Fritz--John theorem, and we introduce new and general conditions that extend and unify the major constraint qualifications. Among these conditions, two new properties, pseudonormality and quasinormality, emerge as central within the taxonomy of interesting constraint characteristics. In the case where there is no abstract set constraint, these properties provide the connecting link between the classical constraint qualifications and two distinct pathways to the existence of Lagrange multipliers: one involving the notion of quasiregularity and the Farkas lemma, and the other involving the use of exact penalty functions. The second pathway also applies in the general case where there is an abstract set constraint.},
langid = {english}
}
@book{bertsimasIntroductionLinearOptimization1997,
title = {Introduction to {{Linear Optimization}}},
author = {Bertsimas, Dimitris and Tsitsiklis, John N.},
year = {1997},
month = feb,
publisher = {Athena Scientific},
address = {Belmont, Mass},
url = {http://athenasc.com/linoptbook.html},
abstract = {This book provides a unified, insightful, and modern treatment of linear optimization, that is, linear programming, network flow problems, and discrete optimization. It includes classical topics as well as the state of the art, in both theory and practice.},
isbn = {978-1-886529-19-9},
langid = {english}
}
@incollection{bertsimasMomentProblemsSemidefinite2000,
title = {Moment {{Problems}} and {{Semidefinite Optimization}}},
booktitle = {Handbook of {{Semidefinite Programming}}},
author = {Bertsimas, Dimitris and Sethuraman, Jay},
editor = {Wolkowicz, Henry and Saigal, Romesh and Vandenberghe, Lieven},
year = {2000},
month = jan,
series = {International {{Series}} in {{Operations Research}} \& {{Management Science}}},
number = {27},
pages = {469--509},
publisher = {Springer US},
url = {http://link.springer.com/chapter/10.1007/978-1-4615-4381-7_16},
urldate = {2014-07-01},
abstract = {Problems involving moments of random variables arise naturally in many areas of mathematics, economics, and operations research. Let us give some examples that motivate the present paper.},
copyright = {{\copyright}2000 Kluwer Academic Publishers},
isbn = {978-1-4613-6970-7 978-1-4615-4381-7},
langid = {english}
}
@article{bertsimasOnlineMixedIntegerOptimization2022,
title = {Online {{Mixed-Integer Optimization}} in {{Milliseconds}}},
author = {Bertsimas, Dimitris and Stellato, Bartolomeo},
year = {2022},
month = jul,
journal = {INFORMS Journal on Computing},
volume = {34},
number = {4},
pages = {2229--2248},
publisher = {INFORMS},
issn = {1091-9856},
doi = {10.1287/ijoc.2022.1181},
url = {https://pubsonline.informs.org/doi/abs/10.1287/ijoc.2022.1181},
urldate = {2023-09-20},
abstract = {We propose a method to approximate the solution of online mixed-integer optimization (MIO) problems at very high speed using machine learning. By exploiting the repetitive nature of online optimization, we can greatly speed up the solution time. Our approach encodes the optimal solution into a small amount of information denoted as strategy using the voice of optimization framework. In this way, the core part of the optimization routine becomes a multiclass classification problem that can be solved very quickly. In this work, we extend that framework to real-time and high-speed applications focusing on parametric mixed-integer quadratic optimization. We propose an extremely fast online optimization method consisting of a feedforward neural network evaluation and a linear system solution where the matrix has already been factorized. Therefore, this online approach does not require any solver or iterative algorithm. We show the speed of the proposed method both in terms of total computations required and measured execution time. We estimate the number of floating point operations required to completely recover the optimal solution as a function of the problem dimensions. Compared with state-of-the-art MIO routines, the online running time of our method is very predictable and can be lower than a single matrix factorization time. We benchmark our method against the state-of-the-art solver Gurobi obtaining up to two to three orders of magnitude speedups on examples from fuel cell energy management, sparse portfolio optimization, and motion planning with obstacle avoidance. Summary of Contribution: We propose a technique to approximate the solution of online optimization problems at high speed using machine learning. By exploiting the repetitive nature of online optimization, we learn the mapping between the key problem parameters and an encoding of the optimal solution to greatly speed up the solution time. This allows us to significantly improve the computation time and resources needed to solve online mixed-integer optimization problems. We obtain a simple method with a very low computing time variance, which is crucial in online settings.}
}
@book{bertsimasOptimizationIntegers2005,
title = {Optimization {{Over Integers}}},
author = {Bertsimas, Dimitris and Weismantel, Robert},
year = {2005},
month = jun,
publisher = {Dynamic Ideas},
address = {Belmont},
url = {https://www.dynamic-ideas.com/books/x0g7bsm2nvnl6j7ebqodcrhsvlgbm7},
abstract = {The book provides a unified, insightful, and modern treatment of the theory of integer optimization. The book is used in the doctoral level course, "Integer and Combinatorial Optimization" at the Massachusetts Institute of Technology. For solutions to exercises and other instructor resources, please contact Dimitris Bertsimas ([email protected]). The chapters of the book are logically organized in four parts: Part I: Formulations and relaxations includes Chapters 1-5 and discusses how to formulate integer optimization problems, how to enhance the formulations to improve the quality of relaxations, how to obtain ideal formulations, the duality of integer optimization and how to solve the resulting relaxations both practically and theoretically. Part II: Algebra and geometry of integer optimization includes Chapters 6-8 and develops the theory of lattices, oulines ideas from algebraic geometry that have had an impact on integer optimization, and most importantly discusses the geometry of integer optimization, a key feature of the book. These chapters provide the building blocks for developing algorithms. Part III: Algorithms for integer optimization includes Chapters 9-12 and develops cutting plane methods, integral basis methods, enumerative and heuristic methods and approximation algorithms. The key characteristic of our treatment is that our development of the algorithms is naturally based on the algebraic and geometric developments of Part II. Part IV: Extensions of integer optimization includes Chapters 13 and 14, and treats mixed integer optimization and robust discrete optimization. Both areas are practically significant as real world problems have very often both continuous and discrete variables and have elements of uncertainty that need to be addressed in a tractable manner. Distinguishing Characteristics Of This Book:* Develops the theory of integer optimization from a new geometric perspective via integral generating sets; * Emphasizes strong formulations, ways to improve them, integral polyhedra, duality, and relaxations; * Discusses applications of lattices and algebraic geometry to integer optimization, including Grobner bases, optimization over polynomials and counting integer points in polyhedra; * Contains a unified geometric treatment of cutting plane and integral basis methods; * Covers enumerative and heuristic methods, including local search over exponential neighborhoods and simulated annealing; * Presents the major methods to construct approximation algorithms: primal-dual, randomized rounding, semidefinite and enumerative methods; * Provides a unified treatment of mixed integer and robust discrete optimization; * Includes a large number of examples and exercises developed through extensive classroom use.},
isbn = {978-0-9759146-2-5},
langid = {english}
}
@book{bertsimasRobustAdaptiveOptimization2022,
title = {Robust and {{Adaptive Optimization}}},
author = {Bertsimas, Dimitris and Hertog, Dick Den},
year = {1 ledna 2022},
publisher = {Dynamic Ideas},
address = {Belmont, Massachusetts},
url = {https://www.dynamic-ideas.com/books/robust-and-adaptive-optimization},
abstract = {This book provides an original treatment of robust (RO) and adaptive robust optimization (ARO) based on over twenty years of research by each of the authors. Structure of the Book Part I describes linear RO and the underlying uncertainty sets. Part II treats modeling, exact and approximate algorithms for ARO. Part III introduces nonlinear RO for concave uncertainty. Part IV outlines nonlinear RO for concave uncertainty. Part V discusses the theory of distributional RO and ARO. Part VI contains a variety of RO and ARO applications including queueing theory, auction design, option pricing and energy unit commitment. ORIGINAL CHARACTERISTICS OF THE BOOK: Emphasis on modeling in RO and ARO. Integrated treatment of RO and ARO and of nonlinear RO for concave and convex uncertainty, as opposed to robust conic optimization. Interplay of probability theory to select parameters in RO and of optimization for tractability.},
isbn = {978-1-73378-852-6}
}
@article{bertsimasTheoryApplicationsRobust2011,
title = {Theory and {{Applications}} of {{Robust Optimization}}},
author = {Bertsimas, Dimitris and Brown, David B. and Caramanis, Constantine},
year = {2011},
month = jan,
journal = {SIAM Review},
volume = {53},
number = {3},
pages = {464--501},
publisher = {{Society for Industrial and Applied Mathematics}},
issn = {0036-1445},
doi = {10.1137/080734510},
url = {https://epubs.siam.org/doi/abs/10.1137/080734510},
urldate = {2023-05-28},
abstract = {In this paper we consider semidefinite programs (SDPs) whose data depend on some unknown but bounded perturbation parameters. We seek "robust" solutions to such programs, that is, solutions which minimize the (worst-case) objective while satisfying the constraints for every possible value of parameters within the given bounds. Assuming the data matrices are rational functions of the perturbation parameters, we show how to formulate sufficient conditions for a robust solution to exist as SDPs. When the perturbation is "full," our conditions are necessary and sufficient. In this case, we provide sufficient conditions which guarantee that the robust solution is unique and continuous (H{\"o}lder-stable) with respect to the unperturbed problem's data. The approach can thus be used to regularize ill-conditioned SDPs. We illustrate our results with examples taken from linear programming, maximum norm minimization, polynomial interpolation, and integer programming.}
}
@book{bieglerNonlinearProgramming2010,
title = {Nonlinear {{Programming}}},
author = {Biegler, Lorenz T.},
year = {2010},
month = jan,
series = {{{MOS-SIAM Series}} on {{Optimization}}},
publisher = {{Society for Industrial and Applied Mathematics}},
doi = {10.1137/1.9780898719383},
url = {https://my.siam.org/Store/Product/viewproduct/?ProductId=1488},
urldate = {2022-01-26},
abstract = {Chemical engineering applications have been a source of challenging optimization problems for over 50 years. For many chemical process systems, detailed steady state and dynamic behavior can now be described by a rich set of detailed nonlinear models, and relatively small changes in process design and operation can lead to significant improvements in efficiency, product quality, environmental impact, and profitability.With these characteristics, it is not surprising that systematic optimization strategies have played an important role in chemical engineering practice. In particular, over the past 35 years, nonlinear programming (NLP) has become an indispensable tool for the optimization of chemical processes. These tools are now applied at research and process development stages, in the design stage, and in the online operation of these processes. More recently, the scope of these applications is being extended to cover more challenging, large-scale tasks including process control based on the optimization of nonlinear dynamic models, as well as the incorporation of nonlinear models into strategic planning functions. Moreover, the ability to solve large-scale process optimization models cheaply, even online, is aided by recent breakthroughs in nonlinear programming, including the development of modern barrier methods, deeper understanding of line search and trust region strategies to aid global convergence, efficient exploitation of second derivatives in algorithmic development, and the availability of recently developed and widely used NLP codes, including those for barrier methods [81, 391, 404], sequential quadratic programming (SQP) [161, 159], and reduced gradient methods [119, 245, 285]. Finally, the availability of optimization modeling environments, such as AIMMS, AMPL, and GAMS, as well as the NEOS server, has made the formulation and solution of optimization accessible to a much wider user base. All of these advances have a huge impact in addressing and solving process engineering problems previously thought intractable. In addition to developments in mathematical programming, research in process systems engineering has led to optimization modeling formulations that leverage these algorithmic advances, with specific model structure and characteristics that lead to more efficient solutions. This text attempts to make these recent optimization advances accessible to engineers and practitioners. Optimization texts for engineers usually fall into two categories. First, excellent mathematical programming texts (e.g., [134, 162, 294, 100, 227]) emphasize fundamental properties and numerical analysis, but have few specific examples with relevance to real-world applications, and are less accessible to practitioners. On the other hand, equally good engineering texts (e.g., [122, 305, 332, 53]) emphasize applications with well-known methods and codes, but often without their underlying fundamental properties. While their approach is accessible and quite useful for engineers, these texts do not aid in a deeper understanding of the methods or provide extensions to tackle large-scale problems efficiently.},
isbn = {978-0-89871-702-0}
}
@article{bieglerRetrospectiveOptimization2004,
title = {Retrospective on Optimization},
author = {Biegler, Lorenz T. and Grossmann, Ignacio E.},
year = {2004},
month = jul,
journal = {Computers \& Chemical Engineering},
volume = {28},
number = {8},
pages = {1169--1192},
issn = {0098-1354},
doi = {10.1016/j.compchemeng.2003.11.003},
url = {https://www.sciencedirect.com/science/article/pii/S0098135403003089},
urldate = {2023-11-04},
abstract = {In this paper, we provide a general classification of mathematical optimization problems, followed by a matrix of applications that shows the areas in which these problems have been typically applied in process systems engineering. We then provide a review of solution methods of the major types of optimization problems for continuous and discrete variable optimization, particularly nonlinear and mixed-integer nonlinear programming (MINLP). We also review their extensions to dynamic optimization and optimization under uncertainty. While these areas are still subject to significant research efforts, the emphasis in this paper is on major developments that have taken place over the last 25 years.}
}
@book{bierlaireOptimizationPrinciplesAlgorithms2018,
title = {Optimization: {{Principles}} and {{Algorithms}}},
shorttitle = {Optimization},
author = {Bierlaire, Michel},
year = {2018},
edition = {2},
publisher = {EPFL Press},
address = {Lausanne},
url = {https://transp-or.epfl.ch/books/optimization/html/OptimizationPrinciplesAlgorithms2018.pdf},
abstract = {Optimization algorithms are critical tools for engineers, but difficult to use since none of them are universal in application. This introductery text builds up the knowledge set, from the basics, so that engineering students can understand the processes that govern optimization processes. Reinforcing theory with practice, the book includes discussions on how to match applications to appropiate methods and why certain approaches are not adapted to specific requirements.Following the success and adoption of the French version of this book with undergraduate and graduate students, the author has translated and substantially updated and reworked this new English edition. Featuring more worked problems and supported by a companion website, readers can learn the practical elements of building algorithms to solve real-life problems.},
isbn = {978-2-88915-279-7},
langid = {english}
}
@misc{BilevelProgramming2016,
title = {Bilevel Programming},
year = {2016},
month = sep,
journal = {YALMIP},
url = {https://yalmip.github.io/tutorial/bilevelprogramming/},
urldate = {2022-03-24},
abstract = {Bilevel programming using the built-in bilevel solver},
langid = {english}
}
@book{birgeIntroductionStochasticProgramming2011,
title = {Introduction to {{Stochastic Programming}}},
author = {Birge, John R. and Louveaux, Fran{\c c}ois},
year = {27 {\v c}ervna 2011},
series = {Springer {{Series}} in {{Operations Research}} and {{Financial Engineering}}},
edition = {2},
publisher = {Springer},
address = {New York, NY},
url = {https://doi.org/10.1007/978-1-4614-0237-4},
abstract = {The aim of stochastic programming is to find optimal decisions in problems~ which involve uncertain data. This field is currently developing rapidly with contributions from many disciplines including operations research, mathematics, and probability. At the same time, it is now being applied in a wide variety of subjects ranging from agriculture to financial planning and from industrial engineering to computer networks. This textbook provides a first course in stochastic programming suitable for students with a basic knowledge of linear programming, elementary analysis, and probability. The authors aim to present a broad overview of the main themes and methods of the subject. Its prime goal is to help students develop an intuition on how to model uncertainty into mathematical problems, what uncertainty changes bring to the decision process, and what techniques help to manage uncertainty in solving the problems.In this extensively updated new edition there is more material on methods and examples including several new approaches for discrete variables, new results on risk measures in modeling and Monte Carlo sampling methods, a new chapter on relationships to other methods including approximate dynamic programming, robust optimization and online methods.The book is highly illustrated with chapter summaries and many examples and exercises. Students, researchers and practitioners in operations research and the optimization area will find it particularly of interest. Review of First Edition:"The discussion on modeling issues, the large number of examples used to illustrate the material, and the breadth of the coverage make~'Introduction to Stochastic Programming' an ideal textbook for the area." (Interfaces, 1998)},
isbn = {978-1-4614-0236-7}
}
@book{birginPracticalAugmentedLagrangian2014,
title = {Practical {{Augmented Lagrangian Methods}} for {{Constrained Optimization}}},
author = {Birgin, E. G. and Mart{\'i}nez, J. M.},
year = {2014},
month = may,
series = {Fundamentals of {{Algorithms}}},
publisher = {{Society for Industrial and Applied Mathematics}},
doi = {10.1137/1.9781611973365},
url = {https://epubs.siam.org/doi/book/10.1137/1.9781611973365},
urldate = {2022-01-26},
abstract = {This book is about the Augmented Lagrangian method, a popular technique for solving constrained optimization problems. It is mainly dedicated to engineers, chemists, physicists, economists, and general users of constrained optimization for solving real-life problems. Nevertheless, it describes in rigorous mathematical terms the convergence theory that applies to the algorithms analyzed. Users often need to understand with precision the properties of the solutions that a practical algorithm finds and the way in which these properties are reflected in practice. Many theorems concerning the behavior of practical algorithms will be found in this book. The geometrical and computational meaning of each theoretical result will be highlighted to make the relevant theory accessible to practitioners. Often, the assumptions under which we prove that algorithms work will not be the most general ones but will be those whose interpretation helps one to understand the computational behavior in real-life problems. Moreover, the plausibility of most assumptions will be discussed, presenting simple sufficient conditions under which assumptions hold. This helps one foresee what can be expected from a practical algorithm and which properties are not expected at all.},
isbn = {978-1-61197-335-8}
}
@misc{blekhermanIntroductionSumsSquares2019,
type = {{{AMS Short Course}} on {{Sums}} of {{Squares}}},
title = {Introduction to {{Sums}} of {{Squares}}},
author = {Blekherman, Greg},
year = {2019},
url = {https://www.ams.org/meetings/shortcourse/BlekhermanSlides-Updated.pdf}
}
@book{blekhermanSemidefiniteOptimizationConvex2013,
title = {Semidefinite {{Optimization}} and {{Convex Algebraic Geometry}}},
editor = {Blekherman, Grigoriy and Parrilo, Pablo A. and Thomas, Rekha},
year = {2013},
month = mar,
publisher = {{Society for Industrial and Applied Mathematics}},
address = {Philadelphia},
url = {http://www.mit.edu/~parrilo/sdocag/},
isbn = {978-1-61197-228-3},
langid = {english}
}
@article{bloomfieldLeastAbsoluteDeviations1980,
title = {Least {{Absolute Deviations Curve-Fitting}}},
author = {Bloomfield, P. and Steiger, W.},
year = {1980},
month = jun,
journal = {SIAM Journal on Scientific and Statistical Computing},
volume = {1},
number = {2},
pages = {290--301},
issn = {0196-5204},
doi = {10.1137/0901019},
url = {https://epubs.siam.org/doi/10.1137/0901019},
urldate = {2018-08-01},
abstract = {A method is proposed for least absolute deviations curve fitting. It may be used to obtain least absolute deviations fits of general linear regressions. As a special case it includes a minor variant of a method for fitting straight lines by least absolute deviations that was previously thought to possess no generalization. The method has been tested on a computer and was found on a range of problems to execute in as little as \$\{1 /3 \}\$ the CPU time required by a published algorithm based on linear programming. More important, this advantage appears to increase indefinitely with the number of data points}
}
@book{bloomfieldLeastAbsoluteDeviations1983,
title = {Least {{Absolute Deviations}}: {{Theory}}, {{Applications}} and {{Algorithms}}},
shorttitle = {Least {{Absolute Deviations}}},
author = {Bloomfield, Peter and Steiger, William},
year = {1983},
series = {Progress in {{Probability}}},
publisher = {Birkh{\"a}user Basel},
url = {//www.springer.com/us/book/9781468485769},
urldate = {2018-08-01},
abstract = {Least squares is probably the best known method for fitting linear models and by far the most widely used. Surprisingly, the discrete L 1 analogue, least absolute deviations (LAD) seems to have been considered first. Possibly the LAD criterion was forced into the background because of the com\- putational difficulties associated with it. Recently there has been a resurgence of interest in LAD. It was spurred on by work that has resulted in efficient al\- gorithms for obtaining LAD fits. Another stimulus came from robust statistics. LAD estimates resist undue effects from a feyv, large errors. Therefore. in addition to being robust, they also make good starting points for other iterative, robust procedures. The LAD criterion has great utility. LAD fits are optimal for linear regressions where the errors are double exponential. However they also have excellent properties well outside this narrow context. In addition they are useful in other linear situations such as time series and multivariate data analysis. Finally, LAD fitting embodies a set of ideas that is important in linear optimization theory and numerical analysis. viii PREFACE In this monograph we will present a unified treatment of the role of LAD techniques in several domains. Some of the material has appeared in recent journal papers and some of it is new. This presentation is organized in the following way. There are three parts, one for Theory, one for Applicatior.s and one for Algorithms.},
isbn = {978-1-4684-8576-9},
langid = {english}
}
@phdthesis{bookhovenPiecewiselinearModellingAnalysis1981,
title = {Piecewise-Linear Modelling and Analysis},
author = {Bookhoven, van, W. M. G.},
year = {1981},
address = {Eindhoven, NL},
url = {https://doi.org/10.6100/IR118197},
school = {Technische Hogeschool Eindhoven}
}
@book{borweinConvexAnalysisNonlinear2006,
title = {Convex {{Analysis}} and {{Nonlinear Optimization}}: {{Theory}} and {{Examples}}},
shorttitle = {Convex {{Analysis}} and {{Nonlinear Optimization}}},
author = {Borwein, Jonathan and Lewis, Adrian S.},
year = {2006},
series = {{{CMS Books}} in {{Mathematics}}},
edition = {2},
publisher = {Springer},
address = {New York},
url = {https://link.springer.com/book/10.1007/978-0-387-31256-9},
isbn = {978-0-387-29570-1},
langid = {english}
}
@book{borziComputationalOptimizationSystems2012,
title = {Computational {{Optimization}} of {{Systems Governed}} by {{Partial Differential Equations}}},
author = {Borz{\`i}, Alfio and Schulz, Volker},
year = {2012},
month = jan,
series = {Computational {{Science}} and {{Engineering}}},
number = {8},
publisher = {{Society for Industrial and Applied Mathematics}},
address = {Philadelphia},
url = {https://epubs.siam.org/doi/book/10.1137/1.9781611972054},
isbn = {978-1-61197-204-7},
langid = {english}
}
@book{boumalIntroductionOptimizationSmooth2023,
title = {An Introduction to Optimization on Smooth Manifolds},
author = {Boumal, Nicolas},
year = {2023},
publisher = {To appear with Cambridge University Press},
url = {https://www.nicolasboumal.net/book/},
urldate = {2022-02-11},
abstract = {This is a book about optimization on smooth manifolds for readers who are comfortable with linear algebra and multivariable calculus. There are no prerequisites in geometry or optimization. Chapters 3 and 5 in particular can serve as a standalone introduction to differential and Riemannian geometry. They focus on embedded submanifolds of linear spaces, with full proofs. A distinguishing feature is that these early chapters do not involve charts. Chapter 8 provides the general theory so that we can build quotient manifolds in Chapter 9. The optimization algorithms in Chapters 4 and 6 apply to the general case, but can already be understood after reading Chapters 3 and 5. Chapter 7 details examples of submanifolds that come up in practice. Chapter 10 covers more advanced Riemannian tools, and Chapter 11 introduces geodesic convexity. In a one-semester graduate course of the mathematics department at Princeton University in 2019 and 2020 (24 lectures of 80 minutes each, two projects, no exercises), I covered much of Chapters 1--6 and select parts of Chapter 7 before the midterm break, then much of Chapters 8--9 and select parts of Chapters 10--11 after the break. Those chapters were shorter at the time, but it still made for a sustained pace. At EPFL in 2021, I discussed mostly Chapters 1--8 in 13 lectures of 90 minutes, plus as many exercise sessions and two projects. The course is popular with applied mathematicians and mathematically inclined engineering students, at the graduate and advanced undergraduate level. You may also be interested in the Manopt toolboxes (Matlab, Python, Julia), and in the book Optimization Algorithms on Matrix Manifolds by Absil, Mahony and Sepulchre (Princeton University Press, 2008), all freely available online. These slides hold a summary of the basic geometric tools and algorithms from Chapters 3 and 5. Here are a one-hour video and a two-hour video introducing the basics of differential geometry and Riemannian geometry for optimization on smooth manifolds, using a variation of the slides linked here. These two videos have mostly the same contents. This version forms the basis for a forthcoming publication with Cambridge University Press.},
isbn = {978-1-00-916617-1}
}
@book{boydConvexOptimization2004,
title = {Convex {{Optimization}}},
author = {Boyd, Stephen and Vandenberghe, Lieven},
year = {2004},
month = mar,
edition = {Seventh printing with corrections 2009},
publisher = {Cambridge University Press},
address = {Cambridge, UK},
url = {https://web.stanford.edu/~boyd/cvxbook/},
abstract = {Convex optimization problems arise frequently in many different fields. A comprehensive introduction to the subject, this book shows in detail how such problems can be solved numerically with great efficiency. The focus is on recognizing convex optimization problems and then finding the most appropriate technique for solving them. The text contains many worked examples and homework exercises and will appeal to students, researchers and practitioners in fields such as engineering, computer science, mathematics, statistics, finance, and economics.},
isbn = {978-0-521-83378-3},
langid = {english}
}
@misc{boydConvexOptimizationApplications,
title = {Convex {{Optimization Applications}}},
author = {Boyd, Stephen and Diamond, Steven and Zhang, Junzi and Agrawal, Akshay},
url = {https://web.stanford.edu/~boyd/papers/pdf/cvx_applications.pdf},
urldate = {2021-07-30},
langid = {english}
}
@article{boydDistributedOptimizationStatistical2011,
title = {Distributed {{Optimization}} and {{Statistical Learning}} via the {{Alternating Direction Method}} of {{Multipliers}}},
author = {Boyd, Stephen and Parikh, Neal and Chu, Eric and Peleato, Borja and Eckstein, Jonathan},
year = {2011},
month = jan,
journal = {Found. Trends Mach. Learn.},
volume = {3},
number = {1},
pages = {1--122},
issn = {1935-8237},
doi = {10.1561/2200000016},
url = {http://dx.doi.org/10.1561/2200000016},
urldate = {2017-05-16},
abstract = {Many problems of recent interest in statistics and machine learning can be posed in the framework of convex optimization. Due to the explosion in size and complexity of modern datasets, it is increasingly important to be able to solve problems with a very large number of features or training examples. As a result, both the decentralized collection or storage of these datasets as well as accompanying distributed solution methods are either necessary or at least highly desirable. In this review, we argue that the alternating direction method of multipliers is well suited to distributed convex optimization, and in particular to large-scale problems arising in statistics, machine learning, and related areas. The method was developed in the 1970s, with roots in the 1950s, and is equivalent or closely related to many other algorithms, such as dual decomposition, the method of multipliers, Douglas--Rachford splitting, Spingarn's method of partial inverses, Dykstra's alternating projections, Bregman iterative algorithms for {$\ell$}1 problems, proximal methods, and others. After briefly surveying the theory and history of the algorithm, we discuss applications to a wide variety of statistical and machine learning problems of recent interest, including the lasso, sparse logistic regression, basis pursuit, covariance selection, support vector machines, and many others. We also discuss general distributed optimization, extensions to the nonconvex setting, and efficient implementation, including some details on distributed MPI and Hadoop MapReduce implementations.},
annotation = {05103}
}
@book{boydIntroductionAppliedLinear2018,
title = {Introduction to {{Applied Linear Algebra}}: {{Vectors}}, {{Matrices}}, and {{Least Squares}}},
shorttitle = {Introduction to {{Applied Linear Algebra}}},
author = {Boyd, Stephen},
year = {2018},
month = aug,
edition = {1st edition},
publisher = {Cambridge University Press},
address = {Cambridge, UK ; New York, NY},
isbn = {978-1-316-51896-0},
langid = {english}
}
@unpublished{boydIntroductionAppliedLinear2021,
type = {Draft},
title = {Introduction to {{Applied Linear Algebra Vectors}}, {{Matrices}}, and {{Least Squares}} - {{Julia Language Companion}}},
author = {Boyd, Stephen and Vandeberghe, Lieven},
year = {2021},
month = mar,
url = {https://web.stanford.edu/~boyd/vmls/vmls-julia-companion.pdf}
}
@book{boydLinearMatrixInequalities1994,
title = {Linear {{Matrix Inequalities}} in {{System}} and {{Control Theory}}},
author = {Boyd, Stephen and El Ghaoui, Laurent and Feron, Eric and Balakrishnan, Venkataramanan},
year = {1994},
month = jan,
series = {Studies in {{Applied}} and {{Numerical Mathematics}}},
publisher = {{Society for Industrial and Applied Mathematics}},
doi = {10.1137/1.9781611970777},
url = {https://epubs.siam.org/doi/book/10.1137/1.9781611970777},
urldate = {2021-04-16},
abstract = {The basic topic of this book is solving problems from system and control theory using convex optimization. We show that a wide variety of problems arising in system and control theory can be reduced to a handful of standard convex and quasiconvex optimization problems that involve matrix inequalities. For a few special cases there are ``analytic solutions'' to these problems, but our main point is that they can be solved numerically in all cases. These standard problems can be solved in polynomial-time (by, e.g., the ellipsoid algorithm of Shor, Nemirovskii, and Yudin), and so are tractable, at least in a theoretical sense. Recently developed interior-point methods for these standard problems have been found to be extremely efficient in practice. Therefore, we consider the original problems from system and control theory as solved. This book is primarily intended for the researcher in system and control theory, but can also serve as a source of application problems for researchers in convex optimization. Although we believe that the methods described in this book have great practical value, we should warn the reader whose primary interest is applied control engineering. This is a research monograph: We present no specific examples or numerical results, and we make only brief comments about the implications of the results for practical control engineering. To put it in a more positive light, we hope that this book will later be considered as the first book on the topic, not the most readable or accessible. The background required of the reader is knowledge of basic system and control theory and an exposure to optimization. Sontag's book Mathematical Control Theory [Son90] is an excellent survey. Further background material is covered in the texts Linear Systems [Kai80] by Kailath, Nonlinear Systems Analysis [Vid92] by Vidyasagar, Optimal Control: Linear Quadratic Methods [AM90] by Anderson and Moore, and Convex Analysis and Minimization Algorithms I [HUL93] by Hiriart-Urruty and Lemar{\'e}chal. We also highly recommend the book Interior-point Polynomial Algorithms in Convex Programming [NN94] by Nesterov and Nemirovskii as a companion to this book. The reader will soon see that their ideas and methods play a critical role in the basic idea presented in this book.},
isbn = {978-0-89871-485-2}
}
@article{boydMethodCentersMinimizing1993,
title = {Method of Centers for Minimizing Generalized Eigenvalues},
author = {Boyd, Stephen and El Ghaoui, Laurent},
year = {1993},
month = jul,
journal = {Linear Algebra and its Applications},
volume = {188--189},
pages = {63--111},
issn = {0024-3795},
doi = {10.1016/0024-3795(93)90465-Z},
url = {https://www.sciencedirect.com/science/article/pii/002437959390465Z},
urldate = {2021-07-04},
abstract = {We consider the problem of minimizing the largest generalized eigenvalue of a pair of symmetric matrices, each of which depends affinely on the decision variables. Although this problem may appear specialized, it is in fact quite general, and includes for example all linear, quadratic, and linear fractional programs. Many problems arising in control theory can be cast in this form. The problem is nondifferentiable but quasiconvex, so methods such as Kelley's cutting-plane algorithm or the ellipsoid algorithm of Shor, Nemirovsky, and Yudin are guaranteed to minimize it. In this paper we describe relevant background material and a simple interior-point method that solves such problems more efficiently. The algorithm is a variation on Huard's method of centers, using a self-concordant barrier for matrix inequalities developed by Nesterov and Nemirovsky. (Nesterov and Nemirovsky have also extended their potential reduction methods to handle the same problem.) Since the problem is quasiconvex but not convex, devising a nonheuristic stopping criterion (i.e., one that guarantees a given accuracy) is more difficult than in the convex case. We describe several nonheuristic stopping criteria that are based on the dual of a related convex problem and a new ellipsoidal approximation that is slightly sharper, in some cases, than a more general result to Nesterov and Nemirovsky. The algorithm is demonstrated on an example: determining the quadratic Lyapunov function that optimizes a decay-rate estimate for a differential inclusion.},
langid = {english}
}
@article{boydTutorialGeometricProgramming2007,
title = {A Tutorial on Geometric Programming},
author = {Boyd, Stephen and Kim, Seung-Jean and Vandenberghe, Lieven and Hassibi, Arash},
year = {2007},
month = apr,
journal = {Optimization and Engineering},
volume = {8},
number = {1},
pages = {67},
issn = {1573-2924},
doi = {10.1007/s11081-007-9001-7},
url = {https://web.stanford.edu/~boyd/papers/gp_tutorial.html},
urldate = {2022-02-14},
abstract = {A geometric program (GP) is a type of mathematical optimization problem characterized by objective and constraint functions that have a special form. Recently developed solution methods can solve even large-scale GPs extremely efficiently and reliably; at the same time a number of practical problems, particularly in circuit design, have been found to be equivalent to (or well approximated by) GPs. Putting these two together, we get effective solutions for the practical problems. The basic approach in GP modeling is to attempt to express a practical problem, such as an engineering analysis or design problem, in GP format. In the best case, this formulation is exact; when this is not possible, we settle for an approximate formulation. This tutorial paper collects together in one place the basic background material needed to do GP modeling. We start with the basic definitions and facts, and some methods used to transform problems into GP format. We show how to recognize functions and problems compatible with GP, and how to approximate functions or data in a form compatible with GP (when this is possible). We give some simple and representative examples, and also describe some common extensions of GP, along with methods for solving (or approximately solving) them.},
langid = {english}
}
@book{brinkhuisConvexAnalysisOptimization2020,
title = {Convex {{Analysis}} for {{Optimization}}},
author = {Brinkhuis, Jan},
year = {2020},
month = may,
series = {Graduate {{Texts}} in {{Operations Research}}},
publisher = {Springer},
address = {Cham},
url = {https://link.springer.com/book/10.1007/978-3-030-41804-5},
abstract = {Presents a unified novel three-step method for all constructions, formulas and proofs of the important classic notions of convexity Includes numerous exercises and illustrations to stimulate learning by doing and seeing Written in a narrative style with short sections and concise proofs},
isbn = {978-3-030-41803-8},
langid = {english}
}
@book{brinkhuisOptimizationInsightsApplications2005,
title = {Optimization: {{Insights}} and {{Applications}}},
shorttitle = {Optimization},
author = {Brinkhuis, Jan and Tikhomirov, Vladimir},
year = {2005},
month = sep,
publisher = {Princeton University Press},
address = {Princeton, N.J},
url = {https://press.princeton.edu/books/hardcover/9780691102870/optimization},
isbn = {978-0-691-10287-0},
langid = {english}
}
@book{bucurVariationalMethodsShape2005,
title = {Variational {{Methods}} in {{Shape Optimization Problems}}},
author = {Bucur, Dorin and Buttazzo, Giuseppe},
year = {2005},
month = jul,
series = {Progress in {{Nonlinear Differential Equations}} and {{Their Applications Book}}},
number = {65},
publisher = {Birkh{\"a}user},
address = {Boston},
isbn = {978-0-8176-4359-1},
langid = {english}
}
@incollection{byrnesConvexOptimizationApproach2003,
title = {A {{Convex Optimization Approach}} to {{Generalized Moment Problems}}},
booktitle = {Control and {{Modeling}} of {{Complex Systems}}},
author = {Byrnes, Christopher I. and Lindquist, Anders},
editor = {Hashimoto, Koichi and Oishi, Yasuaki and Yamamoto, Yutaka},
year = {2003},
month = jan,
series = {Trends in {{Mathematics}}},
pages = {3--21},
publisher = {Birkh{\"a}user Boston},
url = {http://link.springer.com/chapter/10.1007/978-1-4612-0023-9_1},
urldate = {2014-07-01},
abstract = {In this paper we present a universal solution to the generalized moment problem, with a nonclassical complexity constraint. We show that this solution can be obtained by minimizing a strictly convex nonlinear functional. This optimization problem is derived in two different ways. We first derive this intrinsically, in a geometric way, by path integration of a one-form which defines the generalized moment problem. It is observed that this one-form is closed and defined on a convex set, and thus exact with, perhaps surprisingly, a strictly convex primitive function. We also derive this convex functional as the dual problem of a problem to maximize a cross entropy functional. In particular, these approaches give a constructive parameterization of all solutions to the Nevanlinna-Pick interpolation problem, with possible higher-order interpolation at certain points in the complex plane, with a degree constraint as well as all solutions to the rational covariance extension problem --- two areas which have been advanced by the work of Hidenori Kimura. Illustrations of these results in system identification and probability are also mentioned.},
copyright = {{\copyright}2003 Birkh{\"a}user Boston},
isbn = {978-1-4612-6577-1 978-1-4612-0023-9},
langid = {english}
}
@article{cadzowMinimumL1L22002,
title = {Minimum {$\ell$}1, {$\ell$}2, and {$\ell\infty$} {{Norm Approximate Solutions}} to an {{Overdetermined System}} of {{Linear Equations}}},
author = {Cadzow, James A.},
year = {2002},
month = oct,
journal = {Digital Signal Processing},
volume = {12},
number = {4},
pages = {524--560},
issn = {1051-2004},
doi = {10.1006/dspr.2001.0409},
url = {http://www.sciencedirect.com/science/article/pii/S1051200401904099},
urldate = {2018-08-01},
abstract = {Cadzow, J. A., Minimum {$\ell$}1, {$\ell$}2, and {$\ell\infty$} Norm Approximate Solutions to an Overdetermined System of Linear Equations, Digital Signal Processing12 (2002) 524--560 Many practical problems encountered in digital signal processing and other quantitative oriented disciplines entail finding a best approximate solution to an overdetermined system of linear equations. Invariably, the least squares error approximate solution (i.e., minimum {$\ell$}2 norm) is chosen for this task due primarily to the existence of a convenient closed expression for its determination. It should be noted, however, that in many applications a minimum {$\ell$}1 or {$\ell\infty$} norm approximate solution is preferable. For example, in cases where the data being analyzed contain a few data outliers a minimum {$\ell$}1 approximate solution is preferable since it tends to ignore bad data points. In other applications one may wish to determine an approximate solution whose largest error magnitude is the smallest possible (i.e., a minimum {$\ell\infty$} norm approximate solution). Unfortunately, there do not exist convenient closed form expressions for either the minimum {$\ell$}1 or the minimum {$\ell\infty$} norm approximate solution and one must resort to nonlinear programming methods for their determination. Effective algorithms for determining these two solutions are herein presented (see Cadzow, J. A., Data Analysis and Signal Processing: Theory and Applications).}
}
@book{calafioreOptimizationModels2014,
title = {Optimization {{Models}}},
author = {Calafiore, Giuseppe C.},
year = {2014},
month = oct,
publisher = {Cambridge University Press},
address = {Cambridge, UK},
url = {https://people.eecs.berkeley.edu/~elghaoui/optmodbook.html},
abstract = {Emphasizing practical understanding over the technicalities of specific algorithms, this elegant textbook is an accessible introduction to the field of optimization, focusing on powerful and reliable convex optimization techniques. Students and practitioners will learn how to recognize, simplify, model and solve optimization problems - and apply these principles to their own projects. A clear and self-contained introduction to linear algebra demonstrates core mathematical concepts in a way that is easy to follow, and helps students to understand their practical relevance. Requiring only a basic understanding of geometry, calculus, probability and statistics, and striking a careful balance between accessibility and rigor, it enables students to quickly understand the material, without being overwhelmed by complex mathematics. Accompanied by numerous end-of-chapter problems, an online solutions manual for instructors, and relevant examples from diverse fields including engineering, data science, economics, finance, and management, this is the perfect introduction to optimization for undergraduate and graduate students.},
isbn = {978-1-107-05087-7},
langid = {english}
}
@misc{caronOptimalityConditionsNumerical2022,
title = {Optimality Conditions and Numerical Tolerances in {{QP}} Solvers},
author = {Caron, St{\'e}phane},
year = {2022},
month = nov,
url = {https://scaron.info/blog/optimality-conditions-and-numerical-tolerances-in-qp-solvers.html}
}
@article{cavalierModelingIntegerProgramming1990,
title = {Modeling and Integer Programming Techniques Applied to Propositional Calculus},
author = {Cavalier, Tom M. and Pardalos, Panos M. and Soyster, Allen L.},
year = {1990},
month = jan,
journal = {Computers \& Operations Research},
volume = {17},