@misc{oai:ir.soken.ac.jp:00001501,
author = {崔, 小可 and サイ, ショカ and CUI, Xiaoke},
month = {2016-02-17, 2016-02-17},
note = {A basic problem in science is to fit a model to observations subject to errors. It is clear that the more observations that are available the more accurate will it be possible to calculate the parameters in the model. This gives rise to the problem of "solving"an overdetermined linear or nonlinear system of equations. When enough observations are not available, it gives rise to underdetermined systems. Overdetermined systems together with underdeter-mined systems are called *least squares problems*. It can be shown that the solution which minimizes a weighted sum of the squares of the residual is optimal in a certain sense. These solutions are called *least squares solutions.*

Least squares problems are usually written in the form

min||b－Ax ||2, A∈R^{m×n}, b∈R^{n}, （0.1）

x∈R^{n}

where the norm ||・||2 stands for 2-norm. When *A* is large and sparse, it is advantageous to apply iterative methods to the normal equations A^{T}(Ax － b) ＝ 0 or AA^{T} y－ b＝0. Since the condition number of *A*^{T} A or *AA*^{T} is the square of that of *A*, when *A* is ill-conditioned, preconditioning for the iterative methods becomes necessary.

In this thesis, we consider constructing preconditioners for some Krylov subspace it-erative methods to solve least squares problems more efficiently. We especially focused on one kind of preconditioners, in which preconditioners are the approximate generalized inverses of the coefficient matrices of the least squares problems. We proposed two different approaches for how to construct the approximate generalized inverses of the coefficient matrices: one is based on the *Minimal Residual* method with the steepest descend direction, and the other is based on the *Greville's Method* which is an old method developed for computing the generalized inverse based on the rank-one update. And for these two preconditioners, we also discuss how to apply them to least squares problems. Both theoretical issues and practical implementation issues about the preconditioning are discussed in this thesis. Our numerical tests showed that our methods performed competitively rank deficient ill-conditioned problems. As an example of problems from the real world, we apply our preconditioners to the linear programming problems, where many large-scale sparse least squares problems with rank deficient coefficient matrices arise. Our numerical tests showed that our methods showed more robustness than the Cholesky decomposition method., 総研大甲第1286号},
title = {Approximate Generalized Inverse Preconditioning Methods for Least Squares Problems},
year = {}
}