-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathref_optimal_control.bib
927 lines (860 loc) · 64.4 KB
/
ref_optimal_control.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
@book{andersonOptimalControlLinear2007,
title = {Optimal {{Control}}: {{Linear Quadratic Methods}}},
shorttitle = {Optimal {{Control}}},
author = {Anderson, Brian D. O. and Moore, John B.},
year = {2007},
month = feb,
edition = {Reprint of the 1989 edition},
publisher = {Dover Publications},
url = {http://users.cecs.anu.edu.au/~john/papers/BOOK/B03.PDF},
isbn = {0-486-45766-4}
}
@article{astromControlPerspective2014,
title = {Control: {{A}} Perspective},
shorttitle = {Control},
author = {Astr{\"o}m, Karl J. and Kumar, P. R.},
year = {2014},
month = jan,
journal = {Automatica},
volume = {50},
number = {1},
pages = {3--43},
issn = {0005-1098},
doi = {10.1016/j.automatica.2013.10.012},
url = {http://www.sciencedirect.com/science/article/pii/S0005109813005037},
urldate = {2015-11-03},
abstract = {Feedback is an ancient idea, but feedback control is a young field. Nature long ago discovered feedback since it is essential for homeostasis and life. It was the key for harnessing power in the industrial revolution and is today found everywhere around us. Its development as a field involved contributions from engineers, mathematicians, economists and physicists. It is the first systems discipline; it represented a paradigm shift because it cut across the traditional engineering disciplines of aeronautical, chemical, civil, electrical and mechanical engineering, as well as economics and operations research. The scope of control makes it the quintessential multidisciplinary field. Its complex story of evolution is fascinating, and a perspective on its growth is presented in this paper. The interplay of industry, applications, technology, theory and research is discussed.}
}
@book{athansOptimalControlIntroduction2006,
title = {Optimal {{Control}}: {{An Introduction}} to the {{Theory}} and {{Its Applications}}},
shorttitle = {Optimal {{Control}}},
author = {Athans, Michael and Falb, Peter L.},
year = {2006},
month = dec,
edition = {Reprint of the 1966 edition},
publisher = {Dover Publications},
isbn = {0-486-45328-6}
}
@article{beardLinearOperatorEquations2002,
title = {Linear Operator Equations with Applications in Control and Signal Processing},
author = {Beard, R.W.},
year = {2002},
journal = {Control Systems, IEEE},
volume = {22},
number = {2},
pages = {69--79},
issn = {1066-033X},
doi = {10.1109/37.993316},
abstract = {{$<$}p{$>$}The author gives several definitions, including the definition of{$<$}br/{$>$}linear vector spaces, inner products, and Hilbert spaces. He defines{$<$}br/{$>$}linear operators and the Hilbert adjoint operator, and gives several{$<$}br/{$>$}illustrative examples. He presents a diagram which he says is key to{$<$}br/{$>$}understanding linear operator equations. It is a pedagogically important{$<$}br/{$>$}tool for understanding linear operators. Its details are discussed. When{$<$}br/{$>$}attention is restricted to linear matrix equations, the singular-value{$<$}br/{$>$}decomposition completely characterizes the fundamental subspaces of the{$<$}br/{$>$}operator, as is also discussed. The author presents several applications{$<$}br/{$>$}of the theory, including least squares, minimum-norm solutions,{$<$}br/{$>$}controllability and observability of linear systems, optimal control,{$<$}br/{$>$}optimal estimation, and modeling mechanical systems. The examples were{$<$}br/{$>$}chosen to illustrate the wide variety of problems that can be solved{$<$}br/{$>$}using the theory presented in the previous sections{$<$}/p{$>$}}
}
@book{bellSingularOptimalControl1975a,
title = {Singular {{Optimal Control Problems}}},
author = {Bell, D. J. and Jacobson, David H.},
year = {1975},
month = feb,
series = {Mathematics in {{Science}} and {{Engineering}}},
number = {117},
publisher = {Elsevier Science},
address = {London ; New York etc.},
isbn = {978-0-12-085060-0},
langid = {english}
}
@article{bettsSurveyNumericalMethods1998,
title = {Survey of {{Numerical Methods}} for {{Trajectory Optimization}}},
author = {Betts, John T.},
year = {1998},
journal = {Journal of Guidance, Control, and Dynamics},
volume = {21},
number = {2},
pages = {193--207},
issn = {0731-5090},
doi = {10.2514/2.4231},
url = {http://dx.doi.org/10.2514/2.4231},
urldate = {2016-04-13}
}
@book{bongiornoDesignLinearMultivariable2020,
title = {Design of {{Linear Multivariable Feedback Control Systems}}: {{The Wiener}}--{{Hopf Approach}} Using {{Transforms}} and {{Spectral Factorization}}},
shorttitle = {Design of {{Linear Multivariable Feedback Control Systems}}},
author = {Bongiorno, Joseph J. Jr. and Park, Kiheon},
year = {2020},
month = jul,
publisher = {Springer},
address = {Cham},
isbn = {978-3-030-44355-9},
langid = {english}
}
@book{bonnardSingularTrajectoriesTheir2003,
title = {Singular {{Trajectories}} and Their {{Role}} in {{Control Theory}}},
author = {Bonnard, Bernard and Chyba, Monique},
year = {2003},
month = jul,
edition = {2003 edition},
publisher = {Springer},
address = {Paris ; New York},
abstract = {The role of singular trajectories in control theory is analysed in this volume that contains about 60 exercieses and problems. A section is devoted to the applications of singular trajectories to the optimisation of batch reactors. The theoretical paart based on the Martinet case concerns the singulatrity analysis of singular trajectories in sub-Riemannian geometry. An algorithm is gibven to evaluate conjugate points and a final chapter discusses open problems. The volume will interest mathematicians and engineers.},
isbn = {978-3-540-00838-5},
langid = {english}
}
@book{bressanIntroductionMathematicalTheory2007,
title = {Introduction to the {{Mathematical Theory}} of {{Control}}},
author = {Bressan, Alberto and Piccoli, Benedetto},
year = {2007},
month = aug,
series = {{{AIMS Series}} on {{Applied Mathematics}}},
volume = {2},
publisher = {American Institute of Mathematical Sciences},
address = {Springfield, MO},
url = {https://www.aimsciences.org/book/AM/volume/27},
abstract = {This book provides an introduction to the mathematical theory of nonlinear control systems. It contains many topics that are usually scattered among different texts. The book also presents some topics of current research, which were never before included in a textbook. This volume will serve as an ideal textbook for graduate students. It is self-contained, with several appendices covering a wide mathematical background. Students will be aided by its lucid exposition. More than 100 figures and 100 exercises have been inserted, helping the readers to understand the key geometric ideas and build their intuition. For science or engineering students, this book provides a richly illustrated overview of the basic techniques and results in the theory of linear and nonlinear control. More mathematically oriented students can use this text as a useful introduction, before tackling more advanced, research oriented monographs.},
isbn = {978-1-60133-002-4},
langid = {english}
}
@incollection{brockettNotesControlLiouville2012,
title = {Notes on the {{Control}} of the {{Liouville Equation}}},
booktitle = {Control of {{Partial Differential Equations}}},
author = {Brockett, Roger},
year = {2012},
month = jan,
series = {Lecture {{Notes}} in {{Mathematics}}},
pages = {101--129},
publisher = {Springer Berlin Heidelberg},
url = {http://link.springer.com/chapter/10.1007/978-3-642-27893-8_2},
urldate = {2014-04-25},
abstract = {In these notes we motive the study of Liouville equations having control terms using examples from problem areas as diverse as atomic physics (NMR), biological motion control and minimum attention control. On one hand, the Liouville model is interpreted as applying to multiple trials involving a single system and on the other, as applying to the control of many identical copies of a single system; e.g., control of a flock. We illustrate the important role the Liouville formulation has in distinguishing between open loop and feedback control. Mathematical results involving controllability and optimization are discussed along with a theorem establishing the controllability of multiple moments associated with linear models. The methods used succeed by relating the behavior of the solutions of the Liouville equation to the behavior of the underlying ordinary differential equation, the related stochastic differential equation, and the consideration of the related moment equations.},
copyright = {{\copyright}2012 Springer-Verlag Berlin Heidelberg},
isbn = {978-3-642-27892-1 978-3-642-27893-8},
langid = {english}
}
@book{brysonAppliedOptimalControl1975,
title = {Applied {{Optimal Control}}: {{Optimization}}, {{Estimation}} and {{Control}}},
shorttitle = {Applied {{Optimal Control}}},
author = {Bryson, Jr., Arthur E. and Ho, Yu-Chi},
year = {1975},
month = jan,
edition = {Revised edition},
publisher = {CRC Press},
isbn = {978-0-89116-228-5},
langid = {english}
}
@book{carlsonInfiniteHorizonOptimal2011,
title = {Infinite {{Horizon Optimal Control}}: {{Deterministic}} and {{Stochastic Systems}}},
shorttitle = {Infinite {{Horizon Optimal Control}}},
author = {Carlson, Dean A. and Haurie, Alain B. and Leizarowitz, Arie},
year = {2011},
month = dec,
edition = {2nd ed; softcover reprint of the original 2nd ed. 1991 Edition},
publisher = {Springer},
address = {Berlin, Heidelberg},
url = {https://doi.org/10.1007/978-3-642-76755-5},
abstract = {This monograph deals with various classes of deterministic and stochastic continuous time optimal control problems that are defined over unbounded time intervals. For these problems the performance criterion is described by an improper integral and it is possible that, when evaluated at a given admissible element, this criterion is unbounded. To cope with this divergence new optimality concepts, referred to here as overtaking optimality, weakly overtaking optimality, agreeable plans, etc. , have been proposed. The motivation for studying these problems arises primarily from the economic and biological sciences where models of this type arise naturally. Indeed, any bound placed on the time hori\- zon is artificial when one considers the evolution of the state of an economy or species. The responsibility for the introduction of this interesting class of problems rests with the economists who first studied them in the modeling of capital accumulation processes. Perhaps the earliest of these was F. Ramsey [152] who, in his seminal work on the theory of saving in 1928, considered a dynamic optimization model defined on an infinite time horizon. Briefly, this problem can be described as a Lagrange problem with unbounded time interval. The advent of modern control theory, particularly the formulation of the famous Maximum Principle of Pontryagin, has had a considerable impact on the treat\- ment of these models as well as optimization theory in general.},
isbn = {978-3-642-76757-9},
langid = {english}
}
@book{clarkeFunctionalAnalysisCalculus2013,
title = {Functional {{Analysis}}, {{Calculus}} of {{Variations}} and {{Optimal Control}}},
author = {Clarke, Francis},
year = {2013},
month = feb,
series = {Graduate {{Texts}} in {{Mathematics}}},
number = {264},
publisher = {Springer},
address = {London ; New York},
url = {https://link.springer.com/book/10.1007/978-1-4471-4820-3},
isbn = {978-1-4471-4819-7},
langid = {english}
}
@article{desoerFeedbackSystemDesign1980,
title = {Feedback System Design: {{The}} Fractional Representation Approach to Analysis and Synthesis},
shorttitle = {Feedback System Design},
author = {Desoer, C.A. and Liu, Ruey-Wen and Murray, J. and Saeks, R.},
year = {1980},
month = jun,
journal = {IEEE Transactions on Automatic Control},
volume = {25},
number = {3},
pages = {399--412},
issn = {0018-9286},
doi = {10.1109/TAC.1980.1102374},
abstract = {The problem of designing a feedback system with prescribed properties is attacked via a fractional representation approach to feedback system analysis and synthesis. To this end we letHdenote a ring of operators with the prescribed properties and model a given plant as the ratio of two operators inH. This, in turn, leads to a simplified test to determine whether or not a feedback system in which that plant is embedded has the prescribed properties and a complete characterization of those compensators which will "place" the feedback system inH. The theory is formulated axiomatically to permit its application in a wide variety of system design problems and is extremely elementary in nature requiring no more than addition, multiplication, subtraction, and inversion for its derivation even in the most general settings.}
}
@article{doyleGuaranteedMarginsLQG1978,
title = {Guaranteed Margins for {{LQG}} Regulators},
author = {Doyle, J.},
year = {1978},
month = aug,
journal = {IEEE Transactions on Automatic Control},
volume = {23},
number = {4},
pages = {756--757},
issn = {1558-2523},
doi = {10.1109/TAC.1978.1101812},
abstract = {There are none.}
}
@book{egerstedtControlTheoreticSplines2009,
title = {Control {{Theoretic Splines}}: {{Optimal Control}}, {{Statistics}}, and {{Path Planning}}},
shorttitle = {Control {{Theoretic Splines}}},
author = {Egerstedt, Magnus and Martin, Clyde},
year = {2009},
month = dec,
publisher = {Princeton University Press},
isbn = {0-691-13296-8}
}
@article{elbertImplementationDynamicProgramming2013,
title = {Implementation of {{Dynamic Programming}} for \$n\$-{{Dimensional Optimal Control Problems With Final State Constraints}}},
author = {Elbert, Philipp and Ebbesen, Soren and Guzzella, Lino},
year = {2013},
month = may,
journal = {IEEE Transactions on Control Systems Technology},
volume = {21},
number = {3},
pages = {924--931},
issn = {2374-0159},
doi = {10.1109/TCST.2012.2190935},
abstract = {Many optimal control problems include a continuous nonlinear dynamic system, state, and control constraints, and final state constraints. When using dynamic programming to solve such a problem, the solution space typically needs to be discretized and interpolation is used to evaluate the cost-to-go function between the grid points. When implementing such an algorithm, it is important to treat numerical issues appropriately. Otherwise, the accuracy of the found solution will deteriorate and global optimality can be restored only by increasing the level of discretization. Unfortunately, this will also increase the computational effort needed to calculate the solution. A known problem is the treatment of states in the time-state space from which the final state constraint cannot be met within the given final time. In this brief, a novel method to handle this problem is presented. The new method guarantees global optimality of the found solution, while it is not restricted to a specific class of problems. Opposed to that, previously proposed methods either sacrifice global optimality or are applicable to a specific class of problems only. Compared to the basic implementation, the proposed method allows the use of a substantially lower level of discretization while achieving the same accuracy. As an example, an academic optimal control problem is analyzed. With the new method, the evaluation time was reduced by a factor of about 300, while the accuracy of the solution was maintained.}
}
@techreport{evansIntroductionMathematicalOptimal,
title = {An {{Introduction}} to {{Mathematical Optimal Control Theory}}},
author = {Evans, Lawrence C.},
institution = {Department of Mathematics, University of California, Berkeley},
url = {https://math.berkeley.edu/~evans/control.course.pdf}
}
@article{faulwasserTurnpikeDissipativityProperties2017,
title = {On Turnpike and Dissipativity Properties of Continuous-Time Optimal Control Problems},
author = {Faulwasser, Timm and Korda, Milan and Jones, Colin N. and Bonvin, Dominique},
year = {2017},
month = jul,
journal = {Automatica},
volume = {81},
pages = {297--304},
issn = {0005-1098},
doi = {10.1016/j.automatica.2017.03.012},
url = {https://www.sciencedirect.com/science/article/pii/S0005109817301358},
urldate = {2023-12-31},
abstract = {This paper investigates the relations between three different properties, which are of importance in optimal control problems: dissipativity of the underlying dynamics with respect to a specific supply rate, optimal operation at steady state, and the turnpike property. We show in a continuous-time setting that if along optimal trajectories a strict dissipation inequality is satisfied, then this implies optimal operation at this steady state and the existence of a turnpike at the same steady state. Finally, we establish novel converse turnpike results, i.e.,~we show that the existence of a turnpike at a steady state implies optimal operation at this steady state and dissipativity with respect to this steady state. We draw upon a numerical example to illustrate our findings.}
}
@article{francisOptimalLinearquadraticTimeinvariant1979,
title = {The Optimal Linear-Quadratic Time-Invariant Regulator with Cheap Control},
author = {Francis, B.},
year = {1979},
month = aug,
journal = {IEEE Transactions on Automatic Control},
volume = {24},
number = {4},
pages = {616--621},
issn = {0018-9286},
doi = {10.1109/TAC.1979.1102097},
abstract = {The infinite-time linear-quadratic regulator is considered as the weighting on the control energy tends to zero (cheap control). First, a study is made of the qualitative behavior of the limiting optimal state and control trajectories. In particular, the orders of initial singularity are found and related to the excess of poles over zeros in the plant. Secondly, it is found for which initial conditions the limiting minimum cost is zero (perfect regulation). This generalizes an earlier result of Kwakernaak and Sivan. Finally, a simple extension is made to the steady-state LQG problem with cheap control and accurate observations.},
annotation = {00222}
}
@book{geeringOptimalControlEngineering2007,
title = {Optimal {{Control}} with {{Engineering Applications}}},
author = {Geering, Hans Peter},
year = {2007},
month = apr,
edition = {2007 edition},
publisher = {Springer},
address = {Berlin ; New York},
abstract = {This book introduces a variety of problem statements in classical optimal control, in optimal estimation and filtering, and in optimal control problems with non-scalar-valued performance criteria. Many example problems are solved completely in the body of the text. All chapter-end exercises are sketched in the appendix. The theoretical part of the book is based on the calculus of variations, so the exposition is very transparent and requires little mathematical rigor.},
isbn = {978-3-540-69437-3},
langid = {english}
}
@article{gohControlParametrizationUnified1988,
title = {Control Parametrization: {{A}} Unified Approach to Optimal Control Problems with General Constraints},
shorttitle = {Control Parametrization},
author = {Goh, C.J. and Teo, K.L.},
year = {1988},
month = jan,
journal = {Automatica},
volume = {24},
number = {1},
pages = {3--18},
issn = {0005-1098},
doi = {10.1016/0005-1098(88)90003-9},
url = {http://www.sciencedirect.com/science/article/pii/0005109888900039},
urldate = {2012-11-06},
abstract = {In this paper, we consider a class of optimal control problems with general constraints. The aim is to provide a unified approach to the numerical solution of this general class of optimal control problems by using the control parametrization technique, where different types of constraints are shown to be equivalent to essentially the same functional form as the cost functional. Some convergence properties of this proposed method are also investigated.}
}
@article{hargravesDirectTrajectoryOptimization1987,
title = {Direct Trajectory Optimization Using Nonlinear Programming and Collocation},
author = {Hargraves, C. R. and Paris, S. W.},
year = {1987},
journal = {Journal of Guidance, Control, and Dynamics},
volume = {10},
number = {4},
pages = {338--342},
issn = {0731-5090},
doi = {10.2514/3.20223},
url = {http://dx.doi.org/10.2514/3.20223},
urldate = {2016-04-13}
}
@article{hartlSurveyMaximumPrinciples1995,
title = {A {{Survey}} of the {{Maximum Principles}} for {{Optimal Control Problems}} with {{State Constraints}}},
author = {Hartl, R. and Sethi, S. and Vickson, R.},
year = {1995},
month = jun,
journal = {SIAM Review},
volume = {37},
number = {2},
pages = {181--218},
issn = {0036-1445},
doi = {10.1137/1037043},
url = {https://epubs.siam.org/doi/abs/10.1137/1037043},
urldate = {2019-01-10},
abstract = {This paper gives a survey of the various forms of Pontryagin's maximum principle for optimal control problems with state variable inequality constraints. The relations between the different sets of optimality conditions arising in these forms are shown. Furthermore, the application of these maximum principle conditions is demonstrated by solving some illustrative examples.}
}
@book{helmkeOptimizationDynamicalSystems1996,
title = {Optimization and {{Dynamical Systems}}},
author = {Helmke, Uwe and Moore, John B.},
year = {1996},
month = jul,
publisher = {Springer},
address = {London ; New York},
abstract = {This study addresses both classical and previously unsolved optimization tasks in linear algebra, linear systems theory, control theory and signal processing. Exploiting developments in Computation such as Parallel Computing and Neural Networks, it gives a dynamical systems approach for tackling a wide class of constrained optimization tasks.Optimization and Dynamical Systems will be of interest to engineers and mathematicians. Engineers will learn the mathematics and the technical approach necessary to solve a wide class of constrained optimization tasks. Mathematicians will see how techniques from global analysis and differential geometry can be developed to achieve useful construction procedures for optimization on manifolds.},
isbn = {978-3-540-19857-4},
langid = {english}
}
@phdthesis{henrionMeasuresLinearMatrix2012,
type = {Czech Professorship Inaugural Lecture Manuscript},
title = {Measures and Linear Matrix Inequalities in Polynomial Optimal Control},
author = {Henrion, Didier},
year = {2012},
address = {Prague},
url = {http://homepages.laas.fr/henrion/Papers/henrionprof.pdf},
urldate = {2014-04-18},
school = {Czech Technical University in Prague}
}
@article{kelleyGradientTheoryOptimal1960,
title = {Gradient {{Theory}} of {{Optimal Flight Paths}}},
author = {Kelley, Henry J.},
year = {1960},
journal = {ARS Journal},
volume = {30},
number = {10},
pages = {947--954},
doi = {10.2514/8.5282},
url = {https://doi.org/10.2514/8.5282},
urldate = {2019-04-11}
}
@book{kirkOptimalControlTheory2004,
title = {Optimal {{Control Theory}}: {{An Introduction}}},
shorttitle = {Optimal {{Control Theory}}},
author = {Kirk, Donald E.},
year = {2004},
month = apr,
edition = {Reprint of the 1970 edition},
publisher = {Dover Publications},
isbn = {0-486-43484-2}
}
@article{kordaStochasticMPCFramework2014,
title = {Stochastic {{MPC Framework}} for {{Controlling}} the {{Average Constraint Violation}}},
author = {Korda, Milan and Gondhalekar, Ravi and Oldewurtel, Frauke and Jones, Colin N.},
year = {2014},
month = jul,
journal = {IEEE Transactions on Automatic Control},
volume = {59},
number = {7},
pages = {1706--1721},
issn = {1558-2523},
doi = {10.1109/TAC.2014.2310066},
url = {https://ieeexplore.ieee.org/abstract/document/6756951},
urldate = {2023-12-31},
abstract = {This technical note considers linear discrete-time systems with additive, bounded, disturbances subject to hard control input bounds and a stochastic constraint on the amount of state-constraint violation averaged over time. The amount of violations is quantified by a loss function and the averaging can be weighted, corresponding to exponential forgetting of past violations. The freedom in the choice of the loss function makes this formulation highly flexible-for instance, probabilistic constraints, or integrated chance constraints, can be enforced by an appropriate choice of the loss function. For the type of constraint considered, we develop a recursively feasible receding horizon control scheme exploiting the averaged-over-time nature by explicitly taking into account the amount of past constraint violations when determining the current control input. This leads to a significant reduction in conservatism. As a simple extension of the proposed approach we show how time-varying state-constraints can be handled within our framework. The computational complexity (online as well as offline) is comparable to existing model predictive control schemes. The effectiveness of the proposed methodology is demonstrated by means of a numerical example from building climate control.}
}
@book{krasovskiiGameTheoreticalControlProblems1988,
title = {Game-{{Theoretical Control Problems}}},
author = {Krasovskii, N.N. and Subbotin, A.I.},
year = {1988},
series = {Springer {{Series}} in {{Soviet Mathematics}}},
publisher = {Springer-Verlag},
address = {New York, NY},
abstract = {This book is devoted to an investigation of control problems which can be described by ordinary differential equations and be expressed in terms of game theoretical notions. In these terms, a strategy is a control based on the feedback principle which will assure a definite equality for the controlled process which is subject to uncertain factors such as a move or a controlling action of the opponent. Game Theoretical Control Problems contains definitions and formalizations of differential games, existence for equilibrium and extensive discussions of optimal strategies. Formal definitions and statements are accompanied by suitable motivations and discussions of computational algorithms. The book is addessed to mathematicians, engineers, economists and other users of control theoretical and game theoretical notions.},
isbn = {978-0-387-96389-1},
langid = {english}
}
@article{kuceraBridgeStatespaceTransferfunction1999,
title = {A Bridge between State-Space and Transfer-Function Methods},
author = {Ku{\v c}era, V.},
year = {1999},
journal = {Annual Reviews in Control},
volume = {23},
pages = {177--184},
issn = {1367-5788},
doi = {10.1016/S1367-5788(99)90085-8},
url = {http://www.sciencedirect.com/science/article/pii/S1367578899900858},
urldate = {2014-04-28},
abstract = {A synthesis of the two major approaches to linear control system design, namely the state-space and the transfer-function ones, can be achieved by use of dynamics assignment, a convenient reference design problem. Students, teachers, and researchers can all benefit from a deeper understanding of the relationships between these two approaches.}
}
@article{kuceraDesignSteadystateMinimum1979,
title = {Design of Steady-State Minimum Variance Controllers},
author = {Ku{\v c}era, Vladim{\'i}r},
year = {1979},
month = jul,
journal = {Automatica},
volume = {15},
number = {4},
pages = {411--418},
issn = {0005-1098},
doi = {10.1016/0005-1098(79)90015-3},
url = {http://www.sciencedirect.com/science/article/pii/0005109879900153},
urldate = {2014-04-28},
abstract = {A new technique to design optimal controllers is presented for plants described by rational transfer functions and additive disturbances with rational spectral densities. The objective is to minimize a weighted sum of the plant input and output steady-state variances subject to asymptotic stability of the closed-loop system. The technique is based on polynomial algebra. In fact, the design procedure is reduced to solving two linear polynomial equations whose unique solution directly yields the optimal controller transfer function as well as the minimized cost. This approach is simple, computationally attractive, and can handle unstable and/or nonminimum-phase plants with improper transfer functions. An integral part of the paper are effective computational algorithms, which include the spectral factorization, the solution of polynomial equations, and the evaluation of minimum cost.}
}
@article{kuceraDiophantineEquationsControl1993,
title = {Diophantine Equations in Control---{{A}} Survey},
author = {Ku{\v c}era, Vladim{\'i}r},
year = {1993},
month = nov,
journal = {Automatica},
volume = {29},
number = {6},
pages = {1361--1375},
issn = {0005-1098},
doi = {10.1016/0005-1098(93)90003-C},
url = {http://www.sciencedirect.com/science/article/pii/000510989390003C},
urldate = {2014-02-06},
abstract = {This survey is also a tutorial whose aim is to explain the role of diophantine equations in the synthesis of feedback control systems. These are linear equations in a ring and result from a fractional representation of the systems involved. The cornerstone of the exposition is a simple parametrization of all stabilizing controllers for a given plant. One can then choose, in principle, the best controllers for various applications. These ideas evolved from early attempts to use polynomial equations in the design of discrete-time linear systems. By now they have been extended to continuous-time, infinite-dimensional, time-varying and non-linear systems.}
}
@book{kuceraDiscreteLinearControl1979,
title = {Discrete Linear Control: The Polynomial Equation Approach},
shorttitle = {Discrete Linear Control},
author = {Ku{\v c}era, Vladim{\'i}r},
year = {1979},
publisher = {Wiley},
address = {Chichester [u.a.},
isbn = {0-471-99726-9 978-0-471-99726-9},
langid = {english}
}
@article{kuceraLinearQuadraticControl1983,
title = {Linear Quadratic Control. {{State}} Space vs. Polynomial Equations},
author = {Ku{\v c}era, Vladim{\'i}r},
year = {1983},
journal = {Kybernetika},
volume = {19},
number = {3},
pages = {185--195},
url = {http://dml.cz/dmlcz/124912}
}
@inproceedings{kuceraMethodTeachParameterization2011,
title = {A {{Method}} to {{Teach}} the {{Parameterization}} of {{All Stabilizing Controllers}}},
booktitle = {Proceedings of the 18th {{IFAC World Congress}}},
author = {Ku{\v c}era, Vladim{\'i}r},
editor = {Bittanti, Sergio},
year = {2011},
month = aug,
volume = {18},
pages = {6355--6360},
publisher = {IFAC},
address = {Milan, Italy},
doi = {10.3182/20110828-6-IT-1002.01148},
url = {http://www.ifac-papersonline.net/Detailed/49573.html},
urldate = {2014-02-06}
}
@article{kuceraNewResultsState1981,
title = {New Results in State Estimation and Regulation},
author = {Ku{\v c}era, Vladim{\'i}r},
year = {1981},
month = sep,
journal = {Automatica},
volume = {17},
number = {5},
pages = {745--748},
issn = {0005-1098},
doi = {10.1016/0005-1098(81)90021-2},
url = {http://www.sciencedirect.com/science/article/pii/0005109881900212},
urldate = {2014-04-28},
abstract = {An alternative technique to design linear state estimators and regulators is presented. This technique is based on transfer matrix considerations. The optimal regulator or estimator gain is obtained via spectral factorization and the solution of a simple equation in polynomial matrices. This approach provides further insight, displays the duality of estimation and control nicely, and bridges the state-space and frequency-domain techniques. The resulting design procedure is computationally attractive and particularly simple for system matrices in the observer or controller canonical form.}
}
@article{kuceraStochasticMultivariableControl1980,
title = {Stochastic Multivariable Control: {{A}} Polynomial Equation Approach},
shorttitle = {Stochastic Multivariable Control},
author = {Kucera, V.},
year = {1980},
month = oct,
journal = {IEEE Transactions on Automatic Control},
volume = {25},
number = {5},
pages = {913--919},
issn = {0018-9286},
doi = {10.1109/TAC.1980.1102493},
abstract = {A new technique to design optimal multivariable controllers is presented for stochastic plants described by rational transfer matrices. The objective is to minimize a weighted sum of the steady-state variances at the plant input and output subject to the asymptotic stability of the closed-loop system. The technique is based on polynomial matrices. Essentially, the design procedure is reduced to solving a linear matrix polynomial equation whose coefficient matrices are obtained by spectral factorization. The solution of this equation then directly yields the optimal controller transfer matrix as well as the associated minimum cost. The reported approach is relatively simple, computationally attractive, and lays bare the necessary and sufficient conditions for the optimal controller to exist. It is general enough to handle problems that cannot always be addressed by standard time-domain LQG techniques, such as problems involving plants with improper transfer matrices and/or singular noise intensities and weighting matrices.}
}
@article{lanzonDistanceMeasuresUncertain2009,
title = {Distance {{Measures}} for {{Uncertain Linear Systems}}: {{A General Theory}}},
shorttitle = {Distance {{Measures}} for {{Uncertain Linear Systems}}},
author = {Lanzon, A. and Papageorgiou, G.},
year = {2009},
month = jul,
journal = {IEEE Transactions on Automatic Control},
volume = {54},
number = {7},
pages = {1532--1547},
issn = {0018-9286},
doi = {10.1109/TAC.2009.2022098},
abstract = {In this paper, we propose a generic notion of distance between systems that can be used to measure discrepancy between open-loop systems in a feedback sense under several uncertainty structures. When the uncertainty structure is chosen to be four-block (or equivalently, normalized coprime factor) uncertainty, then this generic distance measure reduces to the well-known nu-gap metric. Associated with this generic distance notion, we also define a generic stability margin notion that allows us to give the distance measure a feedback interpretation by deriving generic robust stability and robust performance results. The proposed distance notion and the corresponding results exploit a powerful generalization of the small-gain theorem which handles perturbations in RfrL infin, rather than only in RfrH infin. When the uncertainty structure is fixed to one of the standard structures (e.g., additive, multiplicative, inverse multiplicative, coprime factor, four-block or any mixtures of the above), we give a step-by-step procedure (based on model validation ideas) that shows how the generic notion of distance and the correspondingly generic winding number conditions can be reduced to simple formulae. This work provides a unified framework that captures and embeds previous results in this area and also completes the picture by showing how other results of a similar nature can be obtained from the same framework. The techniques used involve only basic linear algebra, so they also provide a simplification of previous advanced proofs. Furthermore, the various distance measures so created can be used for non-conservative model embedding into the smallest uncertain family. An illustrative example is also given that demonstrates the superior qualities, above the nu-gap metric, of a particular distance measure obtained from this work in situations where the plant is lightly-damped. All systems considered in this paper are linear time-invariant.}
}
@article{leeOptimalTrajectoryGuidance1965,
title = {Optimal Trajectory, Guidance, and Conjugate Points},
author = {Lee, Imsong},
year = {1965},
month = dec,
journal = {Information and Control},
volume = {8},
number = {6},
pages = {589--606},
issn = {0019-9958},
doi = {10.1016/S0019-9958(65)90417-1},
url = {https://www.sciencedirect.com/science/article/pii/S0019995865904171},
urldate = {2023-04-03},
abstract = {Two interesting and important properties of the conjugate points have been discussed and illustrated by means of examples. These properties are shown to have significant implications for trajectory and guidance optimization problems. The conjugate point nearest to the initial time determines the maximum duration of an optimal control for a linear process with a quadratic cost functional and for a certain nonlinear process. If there is a conjugate point to the terminal time, then it is not possible to construct the linear, optimal guidance system in a neighborhood of the nominal trajectory.},
langid = {english}
}
@unpublished{lewisMaximumPrinciplePontryagin2006,
type = {Lecture Notes},
title = {The {{Maximum Principle}} of {{Pontryagin}} in Control and in Optimal Control},
author = {Lewis, Andrew D.},
year = {2006},
month = may,
url = {https://mast.queensu.ca/~andrew/teaching/pdf/maximum-principle.pdf}
}
@book{lewisOptimalControl2012,
title = {Optimal {{Control}}},
author = {Lewis, Frank L. and Vrabie, Draguna and Syrmo, Vassilis L.},
year = {2012},
month = feb,
edition = {3},
publisher = {John Wiley \& Sons},
url = {https://lewisgroup.uta.edu/FL%20books/Lewis%20optimal%20control%203rd%20edition%202012.pdf},
urldate = {2022-03-09},
abstract = {As a superb introductory text and an indispensable reference, this new edition of Optimal Control will serve the needs of both the professional engineer and the advanced student in mechanical, electrical, and aerospace engineering. Its coverage encompasses all the fundamental topics as well as the major changes that have occurred in recent years. An abundance of computer simulations using MATLAB and relevant Toolboxes is included to give the reader the actual experience of applying the theory to real-world situations. Major topics covered include: Static Optimization Optimal Control of Discrete-Time Systems Optimal Control of Continuous-Time Systems The Tracking Problem and Other LQR Extensions Final-Time-Free and Constrained Input Control Dynamic Programming Optimal Control for Polynomial Systems Output Feedback and Structured Control Robustness and Multivariable Frequency-Domain Techniques Differential Games Reinforcement Learning and Optimal Adaptive Control},
isbn = {978-0-470-63349-6},
langid = {british}
}
@book{liberzonCalculusVariationsOptimal2011,
title = {Calculus of {{Variations}} and {{Optimal Control Theory}}: {{A Concise Introduction}}},
shorttitle = {Calculus of {{Variations}} and {{Optimal Control Theory}}},
author = {Liberzon, Daniel},
year = {2011},
month = dec,
publisher = {Princeton University Press},
url = {http://liberzon.csl.illinois.edu/teaching/cvoc/cvoc.html},
isbn = {0-691-15187-3}
}
@book{limebeerDynamicsOptimalControl2018,
title = {Dynamics and {{Optimal Control}} of {{Road Vehicles}}},
author = {Limebeer, D. J. N. and Massaro, Matteo},
year = {2018},
publisher = {Oxford University Press},
address = {Oxford},
doi = {10.1093/oso/9780198825715.001.0001},
url = {https://oxford.universitypressscholarship.com/10.1093/oso/9780198825715.001.0001/oso-9780198825715},
urldate = {2022-03-30},
abstract = {The broad aim of this book is to provide a comprehensive coverage of the modelling and optimal control of both two- and four-wheeled road vehicles. The first focus of this book is a review of classical mechanics and its use in building vehicle and tyre dynamic models. The second is nonlinear optimal control, which is used to solve a range of minimum-time, minimum-fuel, and track curvature reconstruction problems. As is known classically, all thismaterial is bound together by the calculus of variations and stationary principles. The treatment of this material is supplemented with a large number of examples that highlight obscurities and subtleties in the theory. A particular strength of the book is its unified treatment of tyre, car, and motorcycle dynamics and the application of nonlinear optimal control to vehicle-related problems within a single text. These topics are usually treated independently, and can only be found in disparate texts and journal articles. It is our contention that presentday vehicle dynamicists should be familiar with all of these topic areas. The aim in writing this book is to provide a comprehensive and yet accessible text that emphasizes particularly the theoretical aspects of vehicular modelling and control.},
isbn = {978-0-19-882571-5},
langid = {english}
}
@article{limebeerFasterHigherGreener2015,
title = {Faster, {{Higher}}, and {{Greener}}: {{Vehicular Optimal Control}}},
shorttitle = {Faster, {{Higher}}, and {{Greener}}},
author = {Limebeer, David J.N. and Rao, Anil V.},
year = {2015},
month = apr,
journal = {IEEE Control Systems Magazine},
volume = {35},
number = {2},
pages = {36--56},
issn = {1941-000X},
doi = {10.1109/MCS.2014.2384951},
abstract = {Vehicular optimal control problems have been studied extensively since the early part of the 20th century. Progress in solving these problems has been driven primarily by applications in space and atmospheric flight, including launch vehicles, Earth-based and interplanetary space orbital transfer, and high-performance supersonic aircraft. In all of these applications, the ability to solve increasingly complex optimal control problems has been made possible by advances in high-speed computing. The mathematical and computing techniques being developed are now so diverse, and the range of applications of optimal control so broad, that a comprehensive review of the entire scope of vehicular optimal control is an impossible task. Instead, we will attempt only to provide a flavor of the scope and variety of these problems.}
}
@book{maccluerCalculusVariationsMechanics2012,
title = {Calculus of {{Variations}}: {{Mechanics}}, {{Control}} and {{Other Applications}}},
shorttitle = {Calculus of {{Variations}}},
author = {MacCluer, Charles R.},
year = {2012},
month = nov,
edition = {Reprint edition},
publisher = {Dover Publications},
address = {Mineola, N.Y.},
abstract = {The first truly up-to-date treatment of the calculus of variations, this text is also the first to offer a simple introduction to such key concepts as optimal control and linear-quadratic control design. Suitable for junior/senior--level students of math, science, and engineering, this volume also serves as a useful reference for engineers, chemists, and forest/environmental managers. Its broad perspective features numerous exercises, hints, outlines, and comments, plus several appendixes, including a practical discussion of MATLAB.Students will appreciate the text's reader-friendly style, which features gradual advancements in difficulty and starts by developing technique rather than focusing on technical details. The examples and exercises offer many citations of engineering-based applications, and the exercises range from elementary to graduate-level projects, including longer projects and those related to classic papers.},
isbn = {978-0-486-49837-9},
langid = {english}
}
@article{megretskiSystemAnalysisIntegral1997,
title = {System Analysis via Integral Quadratic Constraints},
author = {Megretski, A. and Rantzer, A.},
year = {1997},
month = jun,
journal = {IEEE Transactions on Automatic Control},
volume = {42},
number = {6},
pages = {819--830},
issn = {0018-9286},
doi = {10.1109/9.587335},
abstract = {This paper introduces a unified approach to robustness analysis with respect to nonlinearities, time variations, and uncertain parameters. From an original idea by Yakubovich (1967), the approach has been developed under a combination of influences from the Western and Russian traditions of control theory. It is shown how a complex system can be described, using integral quadratic constraints (IQC) for its elementary components. A stability theorem for systems described by IQCs is presented that covers classical passivity/dissipativity arguments but simplifies the use of multipliers and the treatment of causality. A systematic computational approach is described, and relations to other methods of stability analysis are discussed. Last, but not least, the paper contains a summarizing list of IQCs for important types of system components}
}
@book{pedregalVariationalApproachOptimal2022,
title = {A {{Variational Approach}} to {{Optimal Control}} of {{ODEs}}},
author = {Pedregal, Pablo},
year = {2022},
series = {Advances in {{Design}} and {{Control}}},
publisher = {SIAM},
url = {https://my.siam.org/Store/Product/viewproduct/?ProductId=42453611},
urldate = {2023-01-15},
isbn = {978-1-61197-710-3}
}
@article{peschPracticalGuideSolution1996,
title = {A {{Practical Guide To The Solution Of Real-Life Optimal Control Problems}}},
author = {Pesch, Hans Josef},
year = {1996},
month = jul,
journal = {Control and Cybernetics},
volume = {23},
abstract = {The present paper is an introductory and survey paper of the treatment of realistically modelled optimal control problems from applications in the aerospace field. Especially those problems are considered which include different types of constraints. In the tutorial part of the paper, recipes are given for the treatment of optimal control problems for which, among other constraints, control and/or state variable inequality constraints are to be taken into account. Optimal control problems having singular subarcs and/or discontinuities are also investigated. The discussion of the necessary conditions aims to the subsequent application of the multiple shooting method, which is known to be a very precise and efficient method for the solution of those multipoint boundary-value problems that arise from these necessary conditions. Homotopy techniques as well as the fusion of direct collocation and multiple shooting techniques are described. Both approaches facilitate the construction of an...}
}
@article{prajnaConvexProgramsTemporal2007,
title = {Convex {{Programs}} for {{Temporal Verification}} of {{Nonlinear Dynamical Systems}}},
author = {Prajna, S. and Rantzer, A.},
year = {2007},
month = jan,
journal = {SIAM Journal on Control and Optimization},
volume = {46},
number = {3},
pages = {999--1021},
issn = {0363-0129},
doi = {10.1137/050645178},
url = {http://epubs.siam.org/doi/abs/10.1137/050645178},
urldate = {2014-07-03},
abstract = {A methodology for safety verification of continuous and hybrid systems using barrier certificates has been proposed recently. Conditions that must be satisfied by a barrier certificate can be formulated as a convex program, and the feasibility of the program implies system safety in the sense that there is no trajectory starting from a given set of initial states that reaches a given unsafe region. The dual of this problem, i.e., the reachability problem, concerns proving the existence of a trajectory starting from the initial set that reaches another given set. Using insights from the linear programming duality appearing in the discrete shortest path problem, we show in this paper that reachability of continuous systems can also be verified through convex programming. Several convex programs for verifying safety and reachability, as well as other temporal properties such as eventuality, avoidance, and their combinations, are formulated. Some examples are provided to illustrate the application of the proposed methods. Finally, we exploit the convexity of our methods to derive a converse theorem for safety verification using barrier certificates., A methodology for safety verification of continuous and hybrid systems using barrier certificates has been proposed recently. Conditions that must be satisfied by a barrier certificate can be formulated as a convex program, and the feasibility of the program implies system safety in the sense that there is no trajectory starting from a given set of initial states that reaches a given unsafe region. The dual of this problem, i.e., the reachability problem, concerns proving the existence of a trajectory starting from the initial set that reaches another given set. Using insights from the linear programming duality appearing in the discrete shortest path problem, we show in this paper that reachability of continuous systems can also be verified through convex programming. Several convex programs for verifying safety and reachability, as well as other temporal properties such as eventuality, avoidance, and their combinations, are formulated. Some examples are provided to illustrate the application of the proposed methods. Finally, we exploit the convexity of our methods to derive a converse theorem for safety verification using barrier certificates.}
}
@article{rantzerDualLyapunovStability2001,
title = {A Dual to {{Lyapunov}}'s Stability Theorem},
author = {Rantzer, Anders},
year = {2001},
month = mar,
journal = {Systems \& Control Letters},
volume = {42},
number = {3},
pages = {161--168},
issn = {0167-6911},
doi = {10.1016/S0167-6911(00)00087-6},
url = {http://www.sciencedirect.com/science/article/pii/S0167691100000876},
urldate = {2014-07-03},
abstract = {Lyapunov's second theorem is a standard tool for stability analysis of ordinary differential equations. Here we introduce a theorem which can be viewed as a dual to Lyapunov's result. From existence of a scalar function satisfying certain inequalities it follows that ``almost all trajectories'' of the system tend to zero. The scalar function has a physical interpretation as the stationary density of a substance that is generated in all points of the state space and flows along the system trajectories. If the stationary density is bounded everywhere except at a singularity in the origin, then almost all trajectories tend towards the origin. The weaker notion of stability allows for applications also in situations where Lyapunov's theorem cannot be used. Moreover, the new criterion has a striking convexity property related to control synthesis.}
}
@article{rossReviewPseudospectralOptimal2012,
title = {A Review of Pseudospectral Optimal Control: {{From}} Theory to Flight},
shorttitle = {A Review of Pseudospectral Optimal Control},
author = {Ross, I. Michael and Karpenko, Mark},
year = {2012},
month = dec,
journal = {Annual Reviews in Control},
volume = {36},
number = {2},
pages = {182--197},
issn = {1367-5788},
doi = {10.1016/j.arcontrol.2012.09.002},
url = {http://www.sciencedirect.com/science/article/pii/S1367578812000375},
urldate = {2016-04-13},
abstract = {The home space for optimal control is a Sobolev space. The home space for pseudospectral theory is also a Sobolev space. It thus seems natural to combine pseudospectral theory with optimal control theory and construct ``pseudospectral optimal control theory'', a term coined by Ross. In this paper, we review key theoretical results in pseudospectral optimal control that have proven to be critical for a successful flight. Implementation details of flight demonstrations onboard NASA spacecraft are discussed along with emerging trends and techniques in both theory and practice. The 2011 launch of pseudospectral optimal control in embedded platforms is changing the way in which we see solutions to challenging control problems in aerospace and autonomous systems.}
}
@book{sageOptimumSystemsControl1977,
title = {Optimum {{Systems Control}}},
author = {Sage, Andrew P. and White, III, Chelsea C.},
year = {1977},
edition = {2},
publisher = {Prentice-Hall},
googlebooks = {5vZQAAAAMAAJ},
isbn = {978-0-13-638296-6},
langid = {english}
}
@article{seilerIntroductionDiskMargins2020,
title = {An {{Introduction}} to {{Disk Margins}} [{{Lecture Notes}}]},
author = {Seiler, Peter and Packard, Andrew and Gahinet, Pascal},
year = {2020},
journal = {IEEE Control Systems Magazine},
volume = {40},
number = {5},
pages = {78--95},
issn = {1941-000X},
doi = {10.1109/MCS.2020.3005277},
abstract = {This article provides a tutorial introduction to disk margins. These are robust stability measures that account for simuitaneous gain and phase perturbations in a feedback system. The article first reviews the classical (gain only and phase -only) margins and their limitations. This motivates the use of disk margins, which are defined using a set of perturbations that has simuitaneous gain and phase variations. A necessary and sufficient condition is provided to compute the disk margin for a single -input, single -output feedback system. Frequency -dependent disk margins can also be computed, yielding additional insight. The article concludes with a discussion of stability margins for multiple -input, multiple output (MIMO) feedback systems. A typical approach is to assess robust stability "loop -at -a -time," with a perturbation introduced into a single channel and all other channels held at their nominal values. MIMO disk margins provide a useful extension to consider simultaneous variations in multiple channels. This multiloop analysis can provide a more accurate robustness assessment as compared to the one -loop -at -a -time approach.}
}
@article{shakedGeneralTransferFunction1976,
title = {A General Transfer Function Approach to Linear Stationary Filtering and Steady-State Optimal Control Problems},
author = {Shaked, U.},
year = {1976},
journal = {International Journal of Control},
volume = {24},
number = {6},
pages = {741--770},
issn = {0020-7179},
doi = {10.1080/00207177608932860},
url = {http://www.tandfonline.com/doi/abs/10.1080/00207177608932860},
urldate = {2014-04-23},
abstract = {The transfer-function form of the stationary algebraic Riccati equation is investigated. A generalized spectral factorization technique that copes with unstable systems is introduced. This factorization is used to provide an efficient way of solving the Riccati equation and to establish the exact equivalence between the time domain and the transfer-function approaches to the linear stationary filtering and the deterministic optimal control problems. The proposed method is easily extended to cope, in the filtering problem, with coloured signals and the superiority of its computational method for systems having a small number of inputs and outputs is demonstrated. Finally, an application of the transfer-function approach in determining the class of all systems that share the same optimal solution is introduced.}
}
@book{schattlerGeometricOptimalControl2012,
title = {Geometric {{Optimal Control}}: {{Theory}}, {{Methods}} and {{Examples}}},
shorttitle = {Geometric {{Optimal Control}}},
author = {Sch{\"a}ttler, Heinz and Ledzewicz, Urszula},
year = {2012},
month = jun,
edition = {2012 edition},
publisher = {Springer},
address = {New York},
isbn = {978-1-4614-3833-5},
langid = {english}
}
@book{simaAlgorithmsLinearQuadraticOptimization1996,
title = {Algorithms for {{Linear-Quadratic Optimization}}},
author = {Sima, Vasile},
year = {1996},
month = mar,
edition = {1 edition},
publisher = {{Chapman and Hall/CRC}},
address = {New York},
abstract = {This textbook offers theoretical, algorithmic and computational guidelines for solving the most frequently encountered linear-quadratic optimization problems. It provides an overview of recent advances in control and systems theory, numerical line algebra, numerical optimization, scientific computations and software engineering.},
isbn = {978-0-8247-9612-9},
langid = {english}
}
@article{singhExtendedLinearQuadratic2017,
title = {An Extended Linear Quadratic Regulator for {{LTI}} Systems with Exogenous Inputs},
author = {Singh, Abhinav Kumar and Pal, Bikash C.},
year = {2017},
month = feb,
journal = {Automatica},
volume = {76},
number = {Supplement C},
pages = {10--16},
issn = {0005-1098},
doi = {10.1016/j.automatica.2016.10.014},
url = {http://www.sciencedirect.com/science/article/pii/S000510981630406X},
urldate = {2017-09-14},
abstract = {This paper proposes a cost effective control law for a linear time invariant (LTI) system having an extra set of exogenous inputs (or external disturbances) besides the traditional set of control inputs. No assumption is made with regard to a priori knowledge of the modeling equations for the exogenous inputs. The problem of optimal control for such a system is defined in the standard framework of linear quadratic control and an extended linear quadratic regulator (ELQR) is proposed as the solution to the problem. The ELQR approach is demonstrated through an example and is shown to be significantly more cost effective than currently available approaches for linear quadratic control.},
annotation = {00001}
}
@book{skeltonUnifiedAlgebraicApproach1998,
title = {A {{Unified Algebraic Approach To Control Design}}},
author = {Skelton, Robert E. and Iwasaki, T. and Grigoriadis, Karolos M.},
year = {1998},
edition = {2017 e-book},
publisher = {CRC Press},
address = {London ; Bristol, PA},
url = {https://doi.org/10.1201/9781315136523},
isbn = {978-0-7484-0592-3},
langid = {english}
}
@book{skogestadMultivariableFeedbackControl2005,
title = {Multivariable {{Feedback Control}}: {{Analysis}} and {{Design}}},
shorttitle = {Multivariable {{Feedback Control}}},
author = {Skogestad, Sigurd and Postlethwaite, Ian},
year = {2005},
month = nov,
edition = {2},
publisher = {Wiley},
url = {https://folk.ntnu.no/skoge/book/},
isbn = {0-470-01167-X}
}
@book{speyerPrimerOptimalControl2010,
title = {Primer on {{Optimal Control Theory}}},
author = {Speyer, Jason L. and Jacobson, David H.},
year = {2010},
month = may,
series = {Advances in {{Design}} and {{Control}}},
number = {20},
publisher = {{Society for Industrial and Applied Mathematics}},
address = {Philadelphia},
isbn = {978-0-89871-694-8},
langid = {english}
}
@article{sussmann300YearsOptimal1997,
title = {300 Years of Optimal Control: From the Brachystochrone to the Maximum Principle},
shorttitle = {300 Years of Optimal Control},
author = {Sussmann, H.J. and Willems, J.C.},
year = {1997},
month = jun,
journal = {IEEE Control Systems},
volume = {17},
number = {3},
pages = {32--44},
issn = {1066-033X},
doi = {10.1109/37.588098},
abstract = {An historical review of the development of optimal control from the publication of the brachystochrone problem by Johann Bernoulli in 1696. Ideas on curve minimization already known at the time are briefly outlined. The brachystochrone problem is stated and Bernoulli's solution is given. Bernoulli's personality and his family are discussed. The article then traces the development of the necessary conditions for a minimum, from the Euler-Lagrange equations to the work of Legendre and Weierstrass and, eventually, the maximum principle of optimal control theory}
}
@book{troutmanVariationalCalculusOptimal1995,
title = {Variational {{Calculus}} and {{Optimal Control}}: {{Optimization}} with {{Elementary Convexity}}},
shorttitle = {Variational {{Calculus}} and {{Optimal Control}}},
author = {Troutman, John L.},
year = {1995},
month = dec,
edition = {2nd edition},
publisher = {Springer},
address = {New York},
abstract = {An introduction to the variational methods used to formulate and solve mathematical and physical problems, allowing the reader an insight into the systematic use of elementary (partial) convexity of differentiable functions in Euclidian space. By helping students directly characterize the solutions for many minimization problems, the text serves as a prelude to the field theory for sufficiency, laying as it does the groundwork for further explorations in mathematics, physics, mechanical and electrical engineering, as well as computer science.},
isbn = {978-0-387-94511-8},
langid = {english}
}
@book{vidyasagarControlSystemSynthesis2011,
title = {Control {{System Synthesis}}: {{A Factorization Approach}}},
shorttitle = {Control {{System Synthesis}}},
author = {Vidyasagar, Mathukumali},
year = {2011},
series = {Synthesis {{Lectures}} on {{Control}} and {{Mechatronics}}},
publisher = {Morgan \& Claypool Publishers}
}
@book{vinterOptimalControl2010,
title = {Optimal {{Control}}},
author = {Vinter, Richard},
year = {2010},
month = jul,
publisher = {Birkh{\"a}user},
address = {Boston},
url = {https://link.springer.com/book/10.1007/978-0-8176-8086-2},
isbn = {978-0-8176-4990-6},
langid = {english}
}
@article{youlaModernWienerHopf1976,
title = {Modern {{Wiener}}--{{Hopf}} Design of Optimal Controllers {{Part I}}: {{The}} Single-Input-Output Case},
shorttitle = {Modern {{Wiener}}--{{Hopf}} Design of Optimal Controllers {{Part I}}},
author = {Youla, D.C. and Bongiorno, Joseph J. and Jabr, H.},
year = {1976},
month = feb,
journal = {IEEE Transactions on Automatic Control},
volume = {21},
number = {1},
pages = {3--13},
issn = {0018-9286},
doi = {10.1109/TAC.1976.1101139},
abstract = {An analytical feedback design technique is presented here for single-input-output processes which are characterized by their rational transfer functions. The design procedure accounts for the topological structure of the feedback system ensuring asymptotic stability for the closed-loop configuration. The plant or process being controlled can be unstable and/or nonminimum phase. The treatment of feedback sensor noise, disturbance inputs, and process saturation is another major contribution of this work. The cornerstone in the development is the selection of a performance index based on sound engineering considerations. It is these considerations, in fact, which ensure the existence of an optimal compensator for the system and make the performance index a natural one for the problem at hand.}
}
@article{youlaModernWienerHopfDesign1976,
title = {Modern {{Wiener-Hopf}} Design of Optimal Controllers--{{Part II}}: {{The}} Multivariable Case},
shorttitle = {Modern {{Wiener-Hopf}} Design of Optimal Controllers--{{Part II}}},
author = {Youla, D.C. and Jabr, H. and Bongiorno, Joseph J.},
year = {1976},
month = jun,
journal = {IEEE Transactions on Automatic Control},
volume = {21},
number = {3},
pages = {319--338},
issn = {0018-9286},
doi = {10.1109/TAC.1976.1101223},
abstract = {In many modern-day control problems encountered in the fluid, petroleum, power, gas and paper industries, cross coupling (interaction) between controlled and manipulated variables can be so severe that any attempt to employ single-loop controllers results in unacceptable performance. In all these situations, any workable control strategy most take into account the true multivariable nature of the plant and address itself directly to the design of a compatible multivariable controller. Any practical design technique most be able to cope with load disturbance, plant saturation, measurement noise, process lag, sensitivity and also incorporate suitable criteria delimiting transient behavior and steady-state performance. These difficulties, when compounded by the fact that many plants (such as chemical reactors) are inherently open-loop unstable have hindered the development of an inclusive frequency-domain analytic design methodology. However, a solution based on a least-square Wiener-Hopf minimization of an appropriately chosen cost functional is now available. The optimal controller obtained by this method guarantees an asymptotically stable and dynamical closed-loop configuration irrespective of whether or not the plant is proper, stable, or minimum-phase and also permits the stability margin of the optimal design to be ascertained in advance. The main purpose of this paper is to lay bare the physical assumptions underlying the choice of model and to present an explicit formula for the optimal controller.}
}
@article{zemanekTimeoptimalControlBilinear2017,
title = {Time-Optimal {{Control}} for {{Bilinear Nonnegative-in-control Systems}}: {{Application}} to {{Magnetic Manipulation}}},
shorttitle = {Time-Optimal {{Control}} for {{Bilinear Nonnegative-in-control Systems}}},
author = {Zem{\'a}nek, Ji{\v r}{\'i} and {\v C}elikovsk{\'y}, Sergej and Hur{\'a}k, Zden{\v e}k},
year = {2017},
month = jul,
journal = {IFAC-PapersOnLine},
series = {20th {{IFAC World Congress}}},
volume = {50},
number = {1},
pages = {16032--16039},
issn = {2405-8963},
doi = {10.1016/j.ifacol.2017.08.1916},
url = {http://www.sciencedirect.com/science/article/pii/S2405896317325430},
urldate = {2018-12-19},
abstract = {The paper describes a simple time-optimal control strategy for a class of second-order bilinear systems with nonnegative inputs. The structure of the model is motivated by the task of noncontact manipulation of an object in a planar force field generated by a single source; such setup constitutes a basic building block for a planar manipulation by an array of force field sources. The nonnegative-in-control property means that an object (particle) placed freely in the field can only feel an attractive force towards the source. In this paper we further restrict the control inputs to a binary signal---the field can be switched on and off. The control objective is to bring the object to the origin (where the source of the force field is located) as fast as possible. The optimal switching strategy is proposed using geometric arguments and verified using numerical simulations and experiments with a laboratory platform for noncontact magnetic manipulation.}
}