1# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""`LinearOperator` acting like a diagonal matrix."""
16
17from __future__ import absolute_import
18from __future__ import division
19from __future__ import print_function
20
21from tensorflow.python.framework import ops
22from tensorflow.python.ops import array_ops
23from tensorflow.python.ops import check_ops
24from tensorflow.python.ops import math_ops
25from tensorflow.python.ops.linalg import linalg_impl as linalg
26from tensorflow.python.ops.linalg import linear_operator
27from tensorflow.python.ops.linalg import linear_operator_util
28from tensorflow.python.util.tf_export import tf_export
29
30__all__ = ["LinearOperatorDiag",]
31
32
33@tf_export("linalg.LinearOperatorDiag")
34class LinearOperatorDiag(linear_operator.LinearOperator):
35  """`LinearOperator` acting like a [batch] square diagonal matrix.
36
37  This operator acts like a [batch] diagonal matrix `A` with shape
38  `[B1,...,Bb, N, N]` for some `b >= 0`.  The first `b` indices index a
39  batch member.  For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
40  an `N x N` matrix.  This matrix `A` is not materialized, but for
41  purposes of broadcasting this shape will be relevant.
42
43  `LinearOperatorDiag` is initialized with a (batch) vector.
44
45  ```python
46  # Create a 2 x 2 diagonal linear operator.
47  diag = [1., -1.]
48  operator = LinearOperatorDiag(diag)
49
50  operator.to_dense()
51  ==> [[1.,  0.]
52       [0., -1.]]
53
54  operator.shape
55  ==> [2, 2]
56
57  operator.log_abs_determinant()
58  ==> scalar Tensor
59
60  x = ... Shape [2, 4] Tensor
61  operator.matmul(x)
62  ==> Shape [2, 4] Tensor
63
64  # Create a [2, 3] batch of 4 x 4 linear operators.
65  diag = tf.random.normal(shape=[2, 3, 4])
66  operator = LinearOperatorDiag(diag)
67
68  # Create a shape [2, 1, 4, 2] vector.  Note that this shape is compatible
69  # since the batch dimensions, [2, 1], are broadcast to
70  # operator.batch_shape = [2, 3].
71  y = tf.random.normal(shape=[2, 1, 4, 2])
72  x = operator.solve(y)
73  ==> operator.matmul(x) = y
74  ```
75
76  #### Shape compatibility
77
78  This operator acts on [batch] matrix with compatible shape.
79  `x` is a batch matrix with compatible shape for `matmul` and `solve` if
80
81  ```
82  operator.shape = [B1,...,Bb] + [N, N],  with b >= 0
83  x.shape =   [C1,...,Cc] + [N, R],
84  and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
85  ```
86
87  #### Performance
88
89  Suppose `operator` is a `LinearOperatorDiag` of shape `[N, N]`,
90  and `x.shape = [N, R]`.  Then
91
92  * `operator.matmul(x)` involves `N * R` multiplications.
93  * `operator.solve(x)` involves `N` divisions and `N * R` multiplications.
94  * `operator.determinant()` involves a size `N` `reduce_prod`.
95
96  If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and
97  `[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
98
99  #### Matrix property hints
100
101  This `LinearOperator` is initialized with boolean flags of the form `is_X`,
102  for `X = non_singular, self_adjoint, positive_definite, square`.
103  These have the following meaning:
104
105  * If `is_X == True`, callers should expect the operator to have the
106    property `X`.  This is a promise that should be fulfilled, but is *not* a
107    runtime assert.  For example, finite floating point precision may result
108    in these promises being violated.
109  * If `is_X == False`, callers should expect the operator to not have `X`.
110  * If `is_X == None` (the default), callers should have no expectation either
111    way.
112  """
113
114  def __init__(self,
115               diag,
116               is_non_singular=None,
117               is_self_adjoint=None,
118               is_positive_definite=None,
119               is_square=None,
120               name="LinearOperatorDiag"):
121    r"""Initialize a `LinearOperatorDiag`.
122
123    Args:
124      diag:  Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
125        The diagonal of the operator.  Allowed dtypes: `float16`, `float32`,
126          `float64`, `complex64`, `complex128`.
127      is_non_singular:  Expect that this operator is non-singular.
128      is_self_adjoint:  Expect that this operator is equal to its hermitian
129        transpose.  If `diag.dtype` is real, this is auto-set to `True`.
130      is_positive_definite:  Expect that this operator is positive definite,
131        meaning the quadratic form `x^H A x` has positive real part for all
132        nonzero `x`.  Note that we do not require the operator to be
133        self-adjoint to be positive-definite.  See:
134        https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
135      is_square:  Expect that this operator acts like square [batch] matrices.
136      name: A name for this `LinearOperator`.
137
138    Raises:
139      TypeError:  If `diag.dtype` is not an allowed type.
140      ValueError:  If `diag.dtype` is real, and `is_self_adjoint` is not `True`.
141    """
142    parameters = dict(
143        diag=diag,
144        is_non_singular=is_non_singular,
145        is_self_adjoint=is_self_adjoint,
146        is_positive_definite=is_positive_definite,
147        is_square=is_square,
148        name=name
149    )
150
151    with ops.name_scope(name, values=[diag]):
152      self._diag = linear_operator_util.convert_nonref_to_tensor(
153          diag, name="diag")
154      self._check_diag(self._diag)
155
156      # Check and auto-set hints.
157      if not self._diag.dtype.is_complex:
158        if is_self_adjoint is False:
159          raise ValueError("A real diagonal operator is always self adjoint.")
160        else:
161          is_self_adjoint = True
162
163      if is_square is False:
164        raise ValueError("Only square diagonal operators currently supported.")
165      is_square = True
166
167      super(LinearOperatorDiag, self).__init__(
168          dtype=self._diag.dtype,
169          is_non_singular=is_non_singular,
170          is_self_adjoint=is_self_adjoint,
171          is_positive_definite=is_positive_definite,
172          is_square=is_square,
173          parameters=parameters,
174          name=name)
175      # TODO(b/143910018) Remove graph_parents in V3.
176      self._set_graph_parents([self._diag])
177
178  def _check_diag(self, diag):
179    """Static check of diag."""
180    if diag.shape.ndims is not None and diag.shape.ndims < 1:
181      raise ValueError("Argument diag must have at least 1 dimension.  "
182                       "Found: %s" % diag)
183
184  def _shape(self):
185    # If d_shape = [5, 3], we return [5, 3, 3].
186    d_shape = self._diag.shape
187    return d_shape.concatenate(d_shape[-1:])
188
189  def _shape_tensor(self):
190    d_shape = array_ops.shape(self._diag)
191    k = d_shape[-1]
192    return array_ops.concat((d_shape, [k]), 0)
193
194  @property
195  def diag(self):
196    return self._diag
197
198  def _assert_non_singular(self):
199    return linear_operator_util.assert_no_entries_with_modulus_zero(
200        self._diag,
201        message="Singular operator:  Diagonal contained zero values.")
202
203  def _assert_positive_definite(self):
204    if self.dtype.is_complex:
205      message = (
206          "Diagonal operator had diagonal entries with non-positive real part, "
207          "thus was not positive definite.")
208    else:
209      message = (
210          "Real diagonal operator had non-positive diagonal entries, "
211          "thus was not positive definite.")
212
213    return check_ops.assert_positive(
214        math_ops.real(self._diag),
215        message=message)
216
217  def _assert_self_adjoint(self):
218    return linear_operator_util.assert_zero_imag_part(
219        self._diag,
220        message=(
221            "This diagonal operator contained non-zero imaginary values.  "
222            " Thus it was not self-adjoint."))
223
224  def _matmul(self, x, adjoint=False, adjoint_arg=False):
225    diag_term = math_ops.conj(self._diag) if adjoint else self._diag
226    x = linalg.adjoint(x) if adjoint_arg else x
227    diag_mat = array_ops.expand_dims(diag_term, -1)
228    return diag_mat * x
229
230  def _matvec(self, x, adjoint=False):
231    diag_term = math_ops.conj(self._diag) if adjoint else self._diag
232    return diag_term * x
233
234  def _determinant(self):
235    return math_ops.reduce_prod(self._diag, axis=[-1])
236
237  def _log_abs_determinant(self):
238    log_det = math_ops.reduce_sum(
239        math_ops.log(math_ops.abs(self._diag)), axis=[-1])
240    if self.dtype.is_complex:
241      log_det = math_ops.cast(log_det, dtype=self.dtype)
242    return log_det
243
244  def _solve(self, rhs, adjoint=False, adjoint_arg=False):
245    diag_term = math_ops.conj(self._diag) if adjoint else self._diag
246    rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
247    inv_diag_mat = array_ops.expand_dims(1. / diag_term, -1)
248    return rhs * inv_diag_mat
249
250  def _to_dense(self):
251    return array_ops.matrix_diag(self._diag)
252
253  def _diag_part(self):
254    return self.diag
255
256  def _add_to_tensor(self, x):
257    x_diag = array_ops.matrix_diag_part(x)
258    new_diag = self._diag + x_diag
259    return array_ops.matrix_set_diag(x, new_diag)
260
261  def _eigvals(self):
262    return ops.convert_to_tensor_v2_with_dispatch(self.diag)
263
264  def _cond(self):
265    abs_diag = math_ops.abs(self.diag)
266    return (math_ops.reduce_max(abs_diag, axis=-1) /
267            math_ops.reduce_min(abs_diag, axis=-1))
268