Actual source code: almm.c
1: #include <../src/tao/constrained/impls/almm/almm.h>
2: #include <petsctao.h>
3: #include <petsc/private/petscimpl.h>
4: #include <petsc/private/vecimpl.h>
6: static PetscErrorCode TaoALMMCombinePrimal_Private(Tao, Vec, Vec, Vec);
7: static PetscErrorCode TaoALMMCombineDual_Private(Tao, Vec, Vec, Vec);
8: static PetscErrorCode TaoALMMSplitPrimal_Private(Tao, Vec, Vec, Vec);
9: static PetscErrorCode TaoALMMComputeOptimalityNorms_Private(Tao);
10: static PetscErrorCode TaoALMMComputeAugLagAndGradient_Private(Tao);
11: static PetscErrorCode TaoALMMComputePHRLagAndGradient_Private(Tao);
13: static PetscErrorCode TaoSolve_ALMM(Tao tao)
14: {
15: TAO_ALMM *auglag = (TAO_ALMM *)tao->data;
16: TaoConvergedReason reason;
17: PetscReal updated;
19: /* reset initial multiplier/slack guess */
20: if (!tao->recycle) {
21: if (tao->ineq_constrained) {
22: VecZeroEntries(auglag->Ps);
23: TaoALMMCombinePrimal_Private(tao, auglag->Px, auglag->Ps, auglag->P);
24: VecZeroEntries(auglag->Yi);
25: }
26: if (tao->eq_constrained) VecZeroEntries(auglag->Ye);
27: }
29: /* compute initial nonlinear Lagrangian and its derivatives */
30: (*auglag->sub_obj)(tao);
31: TaoALMMComputeOptimalityNorms_Private(tao);
32: /* print initial step and check convergence */
33: PetscInfo(tao, "Solving with %s formulation\n", TaoALMMTypes[auglag->type]);
34: TaoLogConvergenceHistory(tao, auglag->Lval, auglag->gnorm, auglag->cnorm, tao->ksp_its);
35: TaoMonitor(tao, tao->niter, auglag->fval, auglag->gnorm, auglag->cnorm, 0.0);
36: PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
37: /* set initial penalty factor and inner solver tolerance */
38: switch (auglag->type) {
39: case TAO_ALMM_CLASSIC:
40: auglag->mu = auglag->mu0;
41: break;
42: case TAO_ALMM_PHR:
43: auglag->cenorm = 0.0;
44: if (tao->eq_constrained) VecDot(auglag->Ce, auglag->Ce, &auglag->cenorm);
45: auglag->cinorm = 0.0;
46: if (tao->ineq_constrained) {
47: VecCopy(auglag->Ci, auglag->Ciwork);
48: VecScale(auglag->Ciwork, -1.0);
49: VecPointwiseMax(auglag->Ciwork, auglag->Cizero, auglag->Ciwork);
50: VecDot(auglag->Ciwork, auglag->Ciwork, &auglag->cinorm);
51: }
52: /* determine initial penalty factor based on the balance of constraint violation and objective function value */
53: auglag->mu = PetscMax(1.e-6, PetscMin(10.0, 2.0 * PetscAbsReal(auglag->fval) / (auglag->cenorm + auglag->cinorm)));
54: break;
55: default:
56: break;
57: }
58: auglag->gtol = auglag->gtol0;
59: PetscInfo(tao, "Initial penalty: %.2g\n", (double)auglag->mu);
61: /* start aug-lag outer loop */
62: while (tao->reason == TAO_CONTINUE_ITERATING) {
63: ++tao->niter;
64: /* update subsolver tolerance */
65: PetscInfo(tao, "Subsolver tolerance: ||G|| <= %e\n", (double)auglag->gtol);
66: TaoSetTolerances(auglag->subsolver, auglag->gtol, 0.0, 0.0);
67: /* solve the bound-constrained or unconstrained subproblem */
68: TaoSolve(auglag->subsolver);
69: TaoGetConvergedReason(auglag->subsolver, &reason);
70: tao->ksp_its += auglag->subsolver->ksp_its;
71: if (reason != TAO_CONVERGED_GATOL) PetscInfo(tao, "Subsolver failed to converge, reason: %s\n", TaoConvergedReasons[reason]);
72: /* evaluate solution and test convergence */
73: (*auglag->sub_obj)(tao);
74: TaoALMMComputeOptimalityNorms_Private(tao);
75: /* decide whether to update multipliers or not */
76: updated = 0.0;
77: if (auglag->cnorm <= auglag->ytol) {
78: PetscInfo(tao, "Multipliers updated: ||C|| <= %e\n", (double)auglag->ytol);
79: /* constraints are good, update multipliers and convergence tolerances */
80: if (tao->eq_constrained) {
81: VecAXPY(auglag->Ye, auglag->mu, auglag->Ce);
82: VecSet(auglag->Cework, auglag->ye_max);
83: VecPointwiseMin(auglag->Ye, auglag->Cework, auglag->Ye);
84: VecSet(auglag->Cework, auglag->ye_min);
85: VecPointwiseMax(auglag->Ye, auglag->Cework, auglag->Ye);
86: }
87: if (tao->ineq_constrained) {
88: VecAXPY(auglag->Yi, auglag->mu, auglag->Ci);
89: VecSet(auglag->Ciwork, auglag->yi_max);
90: VecPointwiseMin(auglag->Yi, auglag->Ciwork, auglag->Yi);
91: VecSet(auglag->Ciwork, auglag->yi_min);
92: VecPointwiseMax(auglag->Yi, auglag->Ciwork, auglag->Yi);
93: }
94: /* tolerances are updated only for non-PHR methods */
95: if (auglag->type != TAO_ALMM_PHR) {
96: auglag->ytol = PetscMax(tao->catol, auglag->ytol / PetscPowReal(auglag->mu, auglag->mu_pow_good));
97: auglag->gtol = PetscMax(tao->gatol, auglag->gtol / auglag->mu);
98: }
99: updated = 1.0;
100: } else {
101: /* constraints are bad, update penalty factor */
102: auglag->mu = PetscMin(auglag->mu_max, auglag->mu_fac * auglag->mu);
103: /* tolerances are reset only for non-PHR methods */
104: if (auglag->type != TAO_ALMM_PHR) {
105: auglag->ytol = PetscMax(tao->catol, 0.1 / PetscPowReal(auglag->mu, auglag->mu_pow_bad));
106: auglag->gtol = PetscMax(tao->gatol, 1.0 / auglag->mu);
107: }
108: PetscInfo(tao, "Penalty increased: mu = %.2g\n", (double)auglag->mu);
109: }
110: TaoLogConvergenceHistory(tao, auglag->fval, auglag->gnorm, auglag->cnorm, tao->ksp_its);
111: TaoMonitor(tao, tao->niter, auglag->fval, auglag->gnorm, auglag->cnorm, updated);
112: PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
113: }
115: return 0;
116: }
118: static PetscErrorCode TaoView_ALMM(Tao tao, PetscViewer viewer)
119: {
120: TAO_ALMM *auglag = (TAO_ALMM *)tao->data;
121: PetscBool isascii;
123: PetscViewerASCIIPushTab(viewer);
124: TaoView(auglag->subsolver, viewer);
125: PetscViewerASCIIPopTab(viewer);
126: PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii);
127: if (isascii) {
128: PetscViewerASCIIPushTab(viewer);
129: PetscViewerASCIIPrintf(viewer, "ALMM Formulation Type: %s\n", TaoALMMTypes[auglag->type]);
130: PetscViewerASCIIPopTab(viewer);
131: }
132: return 0;
133: }
135: static PetscErrorCode TaoSetUp_ALMM(Tao tao)
136: {
137: TAO_ALMM *auglag = (TAO_ALMM *)tao->data;
138: VecType vec_type;
139: Vec SL, SU;
140: PetscBool is_cg = PETSC_FALSE, is_lmvm = PETSC_FALSE;
144: TaoComputeVariableBounds(tao);
145: /* alias base vectors and create extras */
146: VecGetType(tao->solution, &vec_type);
147: auglag->Px = tao->solution;
148: if (!tao->gradient) { /* base gradient */
149: VecDuplicate(tao->solution, &tao->gradient);
150: }
151: auglag->LgradX = tao->gradient;
152: if (!auglag->Xwork) { /* opt var work vector */
153: VecDuplicate(tao->solution, &auglag->Xwork);
154: }
155: if (tao->eq_constrained) {
156: auglag->Ce = tao->constraints_equality;
157: auglag->Ae = tao->jacobian_equality;
158: if (!auglag->Ye) { /* equality multipliers */
159: VecDuplicate(auglag->Ce, &auglag->Ye);
160: }
161: if (!auglag->Cework) VecDuplicate(auglag->Ce, &auglag->Cework);
162: }
163: if (tao->ineq_constrained) {
164: auglag->Ci = tao->constraints_inequality;
165: auglag->Ai = tao->jacobian_inequality;
166: if (!auglag->Yi) { /* inequality multipliers */
167: VecDuplicate(auglag->Ci, &auglag->Yi);
168: }
169: if (!auglag->Ciwork) VecDuplicate(auglag->Ci, &auglag->Ciwork);
170: if (!auglag->Cizero) {
171: VecDuplicate(auglag->Ci, &auglag->Cizero);
172: VecZeroEntries(auglag->Cizero);
173: }
174: if (!auglag->Ps) { /* slack vars */
175: VecDuplicate(auglag->Ci, &auglag->Ps);
176: }
177: if (!auglag->LgradS) { /* slack component of Lagrangian gradient */
178: VecDuplicate(auglag->Ci, &auglag->LgradS);
179: }
180: /* create vector for combined primal space and the associated communication objects */
181: if (!auglag->P) {
182: PetscMalloc1(2, &auglag->Parr);
183: auglag->Parr[0] = auglag->Px;
184: auglag->Parr[1] = auglag->Ps;
185: VecConcatenate(2, auglag->Parr, &auglag->P, &auglag->Pis);
186: PetscMalloc1(2, &auglag->Pscatter);
187: VecScatterCreate(auglag->P, auglag->Pis[0], auglag->Px, NULL, &auglag->Pscatter[0]);
188: VecScatterCreate(auglag->P, auglag->Pis[1], auglag->Ps, NULL, &auglag->Pscatter[1]);
189: }
190: if (tao->eq_constrained) {
191: /* create vector for combined dual space and the associated communication objects */
192: if (!auglag->Y) {
193: PetscMalloc1(2, &auglag->Yarr);
194: auglag->Yarr[0] = auglag->Ye;
195: auglag->Yarr[1] = auglag->Yi;
196: VecConcatenate(2, auglag->Yarr, &auglag->Y, &auglag->Yis);
197: PetscMalloc1(2, &auglag->Yscatter);
198: VecScatterCreate(auglag->Y, auglag->Yis[0], auglag->Ye, NULL, &auglag->Yscatter[0]);
199: VecScatterCreate(auglag->Y, auglag->Yis[1], auglag->Yi, NULL, &auglag->Yscatter[1]);
200: }
201: if (!auglag->C) VecDuplicate(auglag->Y, &auglag->C);
202: } else {
203: if (!auglag->C) auglag->C = auglag->Ci;
204: if (!auglag->Y) auglag->Y = auglag->Yi;
205: }
206: } else {
207: if (!auglag->P) auglag->P = auglag->Px;
208: if (!auglag->G) auglag->G = auglag->LgradX;
209: if (!auglag->C) auglag->C = auglag->Ce;
210: if (!auglag->Y) auglag->Y = auglag->Ye;
211: }
212: /* initialize parameters */
213: if (auglag->type == TAO_ALMM_PHR) {
214: auglag->mu_fac = 10.0;
215: auglag->yi_min = 0.0;
216: auglag->ytol0 = 0.5;
217: auglag->gtol0 = tao->gatol;
218: if (tao->gatol_changed && tao->catol_changed) {
219: PetscInfo(tao, "TAOALMM with PHR: different gradient and constraint tolerances are not supported, setting catol = gatol\n");
220: tao->catol = tao->gatol;
221: }
222: }
223: /* set the Lagrangian formulation type for the subsolver */
224: switch (auglag->type) {
225: case TAO_ALMM_CLASSIC:
226: auglag->sub_obj = TaoALMMComputeAugLagAndGradient_Private;
227: break;
228: case TAO_ALMM_PHR:
229: auglag->sub_obj = TaoALMMComputePHRLagAndGradient_Private;
230: break;
231: default:
232: break;
233: }
234: /* set up the subsolver */
235: TaoSetSolution(auglag->subsolver, auglag->P);
236: TaoSetObjective(auglag->subsolver, TaoALMMSubsolverObjective_Private, (void *)auglag);
237: TaoSetObjectiveAndGradient(auglag->subsolver, NULL, TaoALMMSubsolverObjectiveAndGradient_Private, (void *)auglag);
238: if (tao->bounded) {
239: /* make sure that the subsolver is a bound-constrained method */
240: PetscObjectTypeCompare((PetscObject)auglag->subsolver, TAOCG, &is_cg);
241: PetscObjectTypeCompare((PetscObject)auglag->subsolver, TAOLMVM, &is_lmvm);
242: if (is_cg) {
243: TaoSetType(auglag->subsolver, TAOBNCG);
244: PetscInfo(tao, "TAOCG detected for bound-constrained problem, switching to TAOBNCG instead.");
245: }
246: if (is_lmvm) {
247: TaoSetType(auglag->subsolver, TAOBQNLS);
248: PetscInfo(tao, "TAOLMVM detected for bound-constrained problem, switching to TAOBQNLS instead.");
249: }
250: /* create lower and upper bound clone vectors for subsolver */
251: if (!auglag->PL) VecDuplicate(auglag->P, &auglag->PL);
252: if (!auglag->PU) VecDuplicate(auglag->P, &auglag->PU);
253: if (tao->ineq_constrained) {
254: /* create lower and upper bounds for slack, set lower to 0 */
255: VecDuplicate(auglag->Ci, &SL);
256: VecSet(SL, 0.0);
257: VecDuplicate(auglag->Ci, &SU);
258: VecSet(SU, PETSC_INFINITY);
259: /* combine opt var bounds with slack bounds */
260: TaoALMMCombinePrimal_Private(tao, tao->XL, SL, auglag->PL);
261: TaoALMMCombinePrimal_Private(tao, tao->XU, SU, auglag->PU);
262: /* destroy work vectors */
263: VecDestroy(&SL);
264: VecDestroy(&SU);
265: } else {
266: /* no inequality constraints, just copy bounds into the subsolver */
267: VecCopy(tao->XL, auglag->PL);
268: VecCopy(tao->XU, auglag->PU);
269: }
270: TaoSetVariableBounds(auglag->subsolver, auglag->PL, auglag->PU);
271: }
272: TaoSetUp(auglag->subsolver);
273: auglag->G = auglag->subsolver->gradient;
275: return 0;
276: }
278: static PetscErrorCode TaoDestroy_ALMM(Tao tao)
279: {
280: TAO_ALMM *auglag = (TAO_ALMM *)tao->data;
282: TaoDestroy(&auglag->subsolver);
283: if (tao->setupcalled) {
284: VecDestroy(&auglag->Xwork); /* opt work */
285: if (tao->eq_constrained) {
286: VecDestroy(&auglag->Ye); /* equality multipliers */
287: VecDestroy(&auglag->Cework); /* equality work vector */
288: }
289: if (tao->ineq_constrained) {
290: VecDestroy(&auglag->Ps); /* slack vars */
291: auglag->Parr[0] = NULL; /* clear pointer to tao->solution, will be destroyed by TaoDestroy() shell */
292: PetscFree(auglag->Parr); /* array of primal vectors */
293: VecDestroy(&auglag->LgradS); /* slack grad */
294: VecDestroy(&auglag->Cizero); /* zero vector for pointwise max */
295: VecDestroy(&auglag->Yi); /* inequality multipliers */
296: VecDestroy(&auglag->Ciwork); /* inequality work vector */
297: VecDestroy(&auglag->P); /* combo primal */
298: ISDestroy(&auglag->Pis[0]); /* index set for X inside P */
299: ISDestroy(&auglag->Pis[1]); /* index set for S inside P */
300: PetscFree(auglag->Pis); /* array of P index sets */
301: VecScatterDestroy(&auglag->Pscatter[0]);
302: VecScatterDestroy(&auglag->Pscatter[1]);
303: PetscFree(auglag->Pscatter);
304: if (tao->eq_constrained) {
305: VecDestroy(&auglag->Y); /* combo multipliers */
306: PetscFree(auglag->Yarr); /* array of dual vectors */
307: VecDestroy(&auglag->C); /* combo constraints */
308: ISDestroy(&auglag->Yis[0]); /* index set for Ye inside Y */
309: ISDestroy(&auglag->Yis[1]); /* index set for Yi inside Y */
310: PetscFree(auglag->Yis);
311: VecScatterDestroy(&auglag->Yscatter[0]);
312: VecScatterDestroy(&auglag->Yscatter[1]);
313: PetscFree(auglag->Yscatter);
314: }
315: }
316: if (tao->bounded) {
317: VecDestroy(&auglag->PL); /* lower bounds for subsolver */
318: VecDestroy(&auglag->PU); /* upper bounds for subsolver */
319: }
320: }
321: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetType_C", NULL);
322: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMSetType_C", NULL);
323: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetSubsolver_C", NULL);
324: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMSetSubsolver_C", NULL);
325: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetMultipliers_C", NULL);
326: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMSetMultipliers_C", NULL);
327: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetPrimalIS_C", NULL);
328: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetDualIS_C", NULL);
329: PetscFree(tao->data);
330: return 0;
331: }
333: static PetscErrorCode TaoSetFromOptions_ALMM(Tao tao, PetscOptionItems *PetscOptionsObject)
334: {
335: TAO_ALMM *auglag = (TAO_ALMM *)tao->data;
336: PetscInt i;
338: PetscOptionsHeadBegin(PetscOptionsObject, "Augmented Lagrangian multiplier method solves problems with general constraints by converting them into a sequence of unconstrained problems.");
339: PetscOptionsReal("-tao_almm_mu_init", "initial penalty parameter", "", auglag->mu0, &auglag->mu0, NULL);
340: PetscOptionsReal("-tao_almm_mu_factor", "increase factor for the penalty parameter", "", auglag->mu_fac, &auglag->mu_fac, NULL);
341: PetscOptionsReal("-tao_almm_mu_power_good", "exponential for penalty parameter when multiplier update is accepted", "", auglag->mu_pow_good, &auglag->mu_pow_good, NULL);
342: PetscOptionsReal("-tao_almm_mu_power_bad", "exponential for penalty parameter when multiplier update is rejected", "", auglag->mu_pow_bad, &auglag->mu_pow_bad, NULL);
343: PetscOptionsReal("-tao_almm_mu_max", "maximum safeguard for penalty parameter updates", "", auglag->mu_max, &auglag->mu_max, NULL);
344: PetscOptionsReal("-tao_almm_ye_min", "minimum safeguard for equality multiplier updates", "", auglag->ye_min, &auglag->ye_min, NULL);
345: PetscOptionsReal("-tao_almm_ye_max", "maximum safeguard for equality multipliers updates", "", auglag->ye_max, &auglag->ye_max, NULL);
346: PetscOptionsReal("-tao_almm_yi_min", "minimum safeguard for inequality multipliers updates", "", auglag->yi_min, &auglag->yi_min, NULL);
347: PetscOptionsReal("-tao_almm_yi_max", "maximum safeguard for inequality multipliers updates", "", auglag->yi_max, &auglag->yi_max, NULL);
348: PetscOptionsEnum("-tao_almm_type", "augmented Lagrangian formulation type for the subproblem", "TaoALMMType", TaoALMMTypes, (PetscEnum)auglag->type, (PetscEnum *)&auglag->type, NULL);
349: PetscOptionsHeadEnd();
350: TaoSetOptionsPrefix(auglag->subsolver, ((PetscObject)tao)->prefix);
351: TaoAppendOptionsPrefix(auglag->subsolver, "tao_almm_subsolver_");
352: TaoSetFromOptions(auglag->subsolver);
353: for (i = 0; i < tao->numbermonitors; i++) {
354: PetscObjectReference((PetscObject)tao->monitorcontext[i]);
355: TaoSetMonitor(auglag->subsolver, tao->monitor[i], tao->monitorcontext[i], tao->monitordestroy[i]);
356: if (tao->monitor[i] == TaoMonitorDefault || tao->monitor[i] == TaoDefaultCMonitor || tao->monitor[i] == TaoDefaultGMonitor || tao->monitor[i] == TaoDefaultSMonitor) auglag->info = PETSC_TRUE;
357: }
358: return 0;
359: }
361: /* -------------------------------------------------------- */
363: /*MC
364: TAOALMM - Augmented Lagrangian multiplier method for solving nonlinear optimization problems with general constraints.
366: Options Database Keys:
367: + -tao_almm_mu_init <real> - initial penalty parameter (default: 10.)
368: . -tao_almm_mu_factor <real> - increase factor for the penalty parameter (default: 100.)
369: . -tao_almm_mu_max <real> - maximum safeguard for penalty parameter updates (default: 1.e20)
370: . -tao_almm_mu_power_good <real> - exponential for penalty parameter when multiplier update is accepted (default: 0.9)
371: . -tao_almm_mu_power_bad <real> - exponential for penalty parameter when multiplier update is rejected (default: 0.1)
372: . -tao_almm_ye_min <real> - minimum safeguard for equality multiplier updates (default: -1.e20)
373: . -tao_almm_ye_max <real> - maximum safeguard for equality multiplier updates (default: 1.e20)
374: . -tao_almm_yi_min <real> - minimum safeguard for inequality multiplier updates (default: -1.e20)
375: . -tao_almm_yi_max <real> - maximum safeguard for inequality multiplier updates (default: 1.e20)
376: - -tao_almm_type <classic,phr> - change formulation of the augmented Lagrangian merit function for the subproblem (default: classic)
378: Level: beginner
380: Notes:
381: This method converts a constrained problem into a sequence of unconstrained problems via the augmented
382: Lagrangian merit function. Bound constraints are pushed down to the subproblem without any modifications.
384: Two formulations are offered for the subproblem: canonical Hestenes-Powell augmented Lagrangian with slack
385: variables for inequality constraints, and a slack-less Powell-Hestenes-Rockafellar (PHR) formulation utilizing a
386: pointwise max() penalty on inequality constraints. The canonical augmented Lagrangian formulation typically
387: converges faster for most problems. However, PHR may be desirable for problems featuring a large number
388: of inequality constraints because it avoids inflating the size of the subproblem with slack variables.
390: The subproblem is solved using a nested first-order TAO solver. The user can retrieve a pointer to
391: the subsolver via `TaoALMMGetSubsolver()` or pass command line arguments to it using the
392: "-tao_almm_subsolver_" prefix. Currently, `TAOALMM` does not support second-order methods for the
393: subproblem. It is also highly recommended that the subsolver chosen by the user utilize a trust-region
394: strategy for globalization (default: `TAOBQNKTR`) especially if the outer problem features bound constraints.
396: .vb
397: while unconverged
398: solve argmin_x L(x) s.t. l <= x <= u
399: if ||c|| <= y_tol
400: if ||c|| <= c_tol && ||Lgrad|| <= g_tol:
401: problem converged, return solution
402: else
403: constraints sufficiently improved
404: update multipliers and tighten tolerances
405: endif
406: else
407: constraints did not improve
408: update penalty and loosen tolerances
409: endif
410: endwhile
411: .ve
413: .seealso: `TAOALMM`, `Tao`, `TaoALMMGetType()`, `TaoALMMSetType()`, `TaoALMMSetSubsolver()`, `TaoALMMGetSubsolver()`,
414: `TaoALMMGetMultipliers()`, `TaoALMMSetMultipliers()`, `TaoALMMGetPrimalIS()`, `TaoALMMGetDualIS()`
415: M*/
416: PETSC_EXTERN PetscErrorCode TaoCreate_ALMM(Tao tao)
417: {
418: TAO_ALMM *auglag;
420: PetscNew(&auglag);
422: tao->ops->destroy = TaoDestroy_ALMM;
423: tao->ops->setup = TaoSetUp_ALMM;
424: tao->ops->setfromoptions = TaoSetFromOptions_ALMM;
425: tao->ops->view = TaoView_ALMM;
426: tao->ops->solve = TaoSolve_ALMM;
428: tao->gatol = 1.e-5;
429: tao->grtol = 0.0;
430: tao->gttol = 0.0;
431: tao->catol = 1.e-5;
432: tao->crtol = 0.0;
434: tao->data = (void *)auglag;
435: auglag->parent = tao;
436: auglag->mu0 = 10.0;
437: auglag->mu = auglag->mu0;
438: auglag->mu_fac = 10.0;
439: auglag->mu_max = PETSC_INFINITY;
440: auglag->mu_pow_good = 0.9;
441: auglag->mu_pow_bad = 0.1;
442: auglag->ye_min = PETSC_NINFINITY;
443: auglag->ye_max = PETSC_INFINITY;
444: auglag->yi_min = PETSC_NINFINITY;
445: auglag->yi_max = PETSC_INFINITY;
446: auglag->ytol0 = 0.1 / PetscPowReal(auglag->mu0, auglag->mu_pow_bad);
447: auglag->ytol = auglag->ytol0;
448: auglag->gtol0 = 1.0 / auglag->mu0;
449: auglag->gtol = auglag->gtol0;
451: auglag->sub_obj = TaoALMMComputeAugLagAndGradient_Private;
452: auglag->type = TAO_ALMM_CLASSIC;
453: auglag->info = PETSC_FALSE;
455: TaoCreate(PetscObjectComm((PetscObject)tao), &auglag->subsolver);
456: TaoSetType(auglag->subsolver, TAOBQNKTR);
457: TaoSetTolerances(auglag->subsolver, auglag->gtol, 0.0, 0.0);
458: TaoSetMaximumIterations(auglag->subsolver, 1000);
459: TaoSetMaximumFunctionEvaluations(auglag->subsolver, 10000);
460: TaoSetFunctionLowerBound(auglag->subsolver, PETSC_NINFINITY);
461: PetscObjectIncrementTabLevel((PetscObject)auglag->subsolver, (PetscObject)tao, 1);
463: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetType_C", TaoALMMGetType_Private);
464: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMSetType_C", TaoALMMSetType_Private);
465: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetSubsolver_C", TaoALMMGetSubsolver_Private);
466: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMSetSubsolver_C", TaoALMMSetSubsolver_Private);
467: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetMultipliers_C", TaoALMMGetMultipliers_Private);
468: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMSetMultipliers_C", TaoALMMSetMultipliers_Private);
469: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetPrimalIS_C", TaoALMMGetPrimalIS_Private);
470: PetscObjectComposeFunction((PetscObject)tao, "TaoALMMGetDualIS_C", TaoALMMGetDualIS_Private);
471: return 0;
472: }
474: static PetscErrorCode TaoALMMCombinePrimal_Private(Tao tao, Vec X, Vec S, Vec P)
475: {
476: TAO_ALMM *auglag = (TAO_ALMM *)tao->data;
478: if (tao->ineq_constrained) {
479: VecScatterBegin(auglag->Pscatter[0], X, P, INSERT_VALUES, SCATTER_REVERSE);
480: VecScatterEnd(auglag->Pscatter[0], X, P, INSERT_VALUES, SCATTER_REVERSE);
481: VecScatterBegin(auglag->Pscatter[1], S, P, INSERT_VALUES, SCATTER_REVERSE);
482: VecScatterEnd(auglag->Pscatter[1], S, P, INSERT_VALUES, SCATTER_REVERSE);
483: } else {
484: VecCopy(X, P);
485: }
486: return 0;
487: }
489: static PetscErrorCode TaoALMMCombineDual_Private(Tao tao, Vec EQ, Vec IN, Vec Y)
490: {
491: TAO_ALMM *auglag = (TAO_ALMM *)tao->data;
493: if (tao->eq_constrained) {
494: if (tao->ineq_constrained) {
495: VecScatterBegin(auglag->Yscatter[0], EQ, Y, INSERT_VALUES, SCATTER_REVERSE);
496: VecScatterEnd(auglag->Yscatter[0], EQ, Y, INSERT_VALUES, SCATTER_REVERSE);
497: VecScatterBegin(auglag->Yscatter[1], IN, Y, INSERT_VALUES, SCATTER_REVERSE);
498: VecScatterEnd(auglag->Yscatter[1], IN, Y, INSERT_VALUES, SCATTER_REVERSE);
499: } else {
500: VecCopy(EQ, Y);
501: }
502: } else {
503: VecCopy(IN, Y);
504: }
505: return 0;
506: }
508: static PetscErrorCode TaoALMMSplitPrimal_Private(Tao tao, Vec P, Vec X, Vec S)
509: {
510: TAO_ALMM *auglag = (TAO_ALMM *)tao->data;
512: if (tao->ineq_constrained) {
513: VecScatterBegin(auglag->Pscatter[0], P, X, INSERT_VALUES, SCATTER_FORWARD);
514: VecScatterEnd(auglag->Pscatter[0], P, X, INSERT_VALUES, SCATTER_FORWARD);
515: VecScatterBegin(auglag->Pscatter[1], P, S, INSERT_VALUES, SCATTER_FORWARD);
516: VecScatterEnd(auglag->Pscatter[1], P, S, INSERT_VALUES, SCATTER_FORWARD);
517: } else {
518: VecCopy(P, X);
519: }
520: return 0;
521: }
523: /* this assumes that the latest constraints are stored in Ce and Ci, and also combined in C */
524: static PetscErrorCode TaoALMMComputeOptimalityNorms_Private(Tao tao)
525: {
526: TAO_ALMM *auglag = (TAO_ALMM *)tao->data;
528: /* if bounded, project the gradient */
529: if (tao->bounded) VecBoundGradientProjection(auglag->LgradX, auglag->Px, tao->XL, tao->XU, auglag->LgradX);
530: if (auglag->type == TAO_ALMM_PHR) {
531: VecNorm(auglag->LgradX, NORM_INFINITY, &auglag->gnorm);
532: auglag->cenorm = 0.0;
533: if (tao->eq_constrained) VecNorm(auglag->Ce, NORM_INFINITY, &auglag->cenorm);
534: auglag->cinorm = 0.0;
535: if (tao->ineq_constrained) {
536: VecCopy(auglag->Yi, auglag->Ciwork);
537: VecScale(auglag->Ciwork, -1.0 / auglag->mu);
538: VecPointwiseMax(auglag->Ciwork, auglag->Ci, auglag->Ciwork);
539: VecNorm(auglag->Ciwork, NORM_INFINITY, &auglag->cinorm);
540: }
541: auglag->cnorm_old = auglag->cnorm;
542: auglag->cnorm = PetscMax(auglag->cenorm, auglag->cinorm);
543: auglag->ytol = auglag->ytol0 * auglag->cnorm_old;
544: } else {
545: VecNorm(auglag->LgradX, NORM_2, &auglag->gnorm);
546: VecNorm(auglag->C, NORM_2, &auglag->cnorm);
547: }
548: return 0;
549: }
551: static PetscErrorCode TaoALMMEvaluateIterate_Private(Tao tao, Vec P)
552: {
553: TAO_ALMM *auglag = (TAO_ALMM *)tao->data;
555: /* split solution into primal and slack components */
556: TaoALMMSplitPrimal_Private(tao, auglag->P, auglag->Px, auglag->Ps);
558: /* compute f, df/dx and the constraints */
559: TaoComputeObjectiveAndGradient(tao, auglag->Px, &auglag->fval, auglag->LgradX);
560: if (tao->eq_constrained) {
561: TaoComputeEqualityConstraints(tao, auglag->Px, auglag->Ce);
562: TaoComputeJacobianEquality(tao, auglag->Px, auglag->Ae, auglag->Ae);
563: }
564: if (tao->ineq_constrained) {
565: TaoComputeInequalityConstraints(tao, auglag->Px, auglag->Ci);
566: TaoComputeJacobianInequality(tao, auglag->Px, auglag->Ai, auglag->Ai);
567: switch (auglag->type) {
568: case TAO_ALMM_CLASSIC:
569: /* classic formulation converts inequality to equality constraints via slack variables */
570: VecAXPY(auglag->Ci, -1.0, auglag->Ps);
571: break;
572: case TAO_ALMM_PHR:
573: /* PHR is based on Ci <= 0 while TAO defines Ci >= 0 so we hit it with a negative sign */
574: VecScale(auglag->Ci, -1.0);
575: MatScale(auglag->Ai, -1.0);
576: break;
577: default:
578: break;
579: }
580: }
581: /* combine constraints into one vector */
582: TaoALMMCombineDual_Private(tao, auglag->Ce, auglag->Ci, auglag->C);
583: return 0;
584: }
586: /*
587: Lphr = f + 0.5*mu*[ (Ce + Ye/mu)^T (Ce + Ye/mu) + pmin(0, Ci + Yi/mu)^T pmin(0, Ci + Yi/mu)]
589: dLphr/dX = dF/dX + mu*[ (Ce + Ye/mu)^T Ae + pmin(0, Ci + Yi/mu)^T Ai]
591: dLphr/dS = 0
592: */
593: static PetscErrorCode TaoALMMComputePHRLagAndGradient_Private(Tao tao)
594: {
595: TAO_ALMM *auglag = (TAO_ALMM *)tao->data;
596: PetscReal eq_norm = 0.0, ineq_norm = 0.0;
598: TaoALMMEvaluateIterate_Private(tao, auglag->P);
599: if (tao->eq_constrained) {
600: /* Ce_work = mu*(Ce + Ye/mu) */
601: VecWAXPY(auglag->Cework, 1.0 / auglag->mu, auglag->Ye, auglag->Ce);
602: VecDot(auglag->Cework, auglag->Cework, &eq_norm); /* contribution to scalar Lagrangian */
603: VecScale(auglag->Cework, auglag->mu);
604: /* dL/dX += mu*(Ce + Ye/mu)^T Ae */
605: MatMultTransposeAdd(auglag->Ae, auglag->Cework, auglag->LgradX, auglag->LgradX);
606: }
607: if (tao->ineq_constrained) {
608: /* Ci_work = mu * pmax(0, Ci + Yi/mu) where pmax() is pointwise max() */
609: VecWAXPY(auglag->Ciwork, 1.0 / auglag->mu, auglag->Yi, auglag->Ci);
610: VecPointwiseMax(auglag->Ciwork, auglag->Cizero, auglag->Ciwork);
611: VecDot(auglag->Ciwork, auglag->Ciwork, &ineq_norm); /* contribution to scalar Lagrangian */
612: /* dL/dX += mu * pmax(0, Ci + Yi/mu)^T Ai */
613: VecScale(auglag->Ciwork, auglag->mu);
614: MatMultTransposeAdd(auglag->Ai, auglag->Ciwork, auglag->LgradX, auglag->LgradX);
615: /* dL/dS = 0 because there are no slacks in PHR */
616: VecZeroEntries(auglag->LgradS);
617: }
618: /* combine gradient together */
619: TaoALMMCombinePrimal_Private(tao, auglag->LgradX, auglag->LgradS, auglag->G);
620: /* compute L = f + 0.5 * mu * [(Ce + Ye/mu)^T (Ce + Ye/mu) + pmax(0, Ci + Yi/mu)^T pmax(0, Ci + Yi/mu)] */
621: auglag->Lval = auglag->fval + 0.5 * auglag->mu * (eq_norm + ineq_norm);
622: return 0;
623: }
625: /*
626: Lc = F + Ye^TCe + Yi^T(Ci - S) + 0.5*mu*[Ce^TCe + (Ci - S)^T(Ci - S)]
628: dLc/dX = dF/dX + Ye^TAe + Yi^TAi + 0.5*mu*[Ce^TAe + (Ci - S)^TAi]
630: dLc/dS = -[Yi + mu*(Ci - S)]
631: */
632: static PetscErrorCode TaoALMMComputeAugLagAndGradient_Private(Tao tao)
633: {
634: TAO_ALMM *auglag = (TAO_ALMM *)tao->data;
635: PetscReal yeTce = 0.0, yiTcims = 0.0, ceTce = 0.0, cimsTcims = 0.0;
637: TaoALMMEvaluateIterate_Private(tao, auglag->P);
638: if (tao->eq_constrained) {
639: /* compute scalar contributions */
640: VecDot(auglag->Ye, auglag->Ce, &yeTce);
641: VecDot(auglag->Ce, auglag->Ce, &ceTce);
642: /* dL/dX += ye^T Ae */
643: MatMultTransposeAdd(auglag->Ae, auglag->Ye, auglag->LgradX, auglag->LgradX);
644: /* dL/dX += mu * ce^T Ae */
645: MatMultTranspose(auglag->Ae, auglag->Ce, auglag->Xwork);
646: VecAXPY(auglag->LgradX, auglag->mu, auglag->Xwork);
647: }
648: if (tao->ineq_constrained) {
649: /* compute scalar contributions */
650: VecDot(auglag->Yi, auglag->Ci, &yiTcims);
651: VecDot(auglag->Ci, auglag->Ci, &cimsTcims);
652: /* dL/dX += yi^T Ai */
653: MatMultTransposeAdd(auglag->Ai, auglag->Yi, auglag->LgradX, auglag->LgradX);
654: /* dL/dX += mu * (ci - s)^T Ai */
655: MatMultTranspose(auglag->Ai, auglag->Ci, auglag->Xwork);
656: VecAXPY(auglag->LgradX, auglag->mu, auglag->Xwork);
657: /* dL/dS = -[yi + mu*(ci - s)] */
658: VecWAXPY(auglag->LgradS, auglag->mu, auglag->Ci, auglag->Yi);
659: VecScale(auglag->LgradS, -1.0);
660: }
661: /* combine gradient together */
662: TaoALMMCombinePrimal_Private(tao, auglag->LgradX, auglag->LgradS, auglag->G);
663: /* compute L = f + ye^T ce + yi^T (ci - s) + 0.5*mu*||ce||^2 + 0.5*mu*||ci - s||^2 */
664: auglag->Lval = auglag->fval + yeTce + yiTcims + 0.5 * auglag->mu * (ceTce + cimsTcims);
665: return 0;
666: }
668: PetscErrorCode TaoALMMSubsolverObjective_Private(Tao tao, Vec P, PetscReal *Lval, void *ctx)
669: {
670: TAO_ALMM *auglag = (TAO_ALMM *)ctx;
672: VecCopy(P, auglag->P);
673: (*auglag->sub_obj)(auglag->parent);
674: *Lval = auglag->Lval;
675: return 0;
676: }
678: PetscErrorCode TaoALMMSubsolverObjectiveAndGradient_Private(Tao tao, Vec P, PetscReal *Lval, Vec G, void *ctx)
679: {
680: TAO_ALMM *auglag = (TAO_ALMM *)ctx;
682: VecCopy(P, auglag->P);
683: (*auglag->sub_obj)(auglag->parent);
684: VecCopy(auglag->G, G);
685: *Lval = auglag->Lval;
686: return 0;
687: }