Kernel Quantum Probability Library
The KQP library aims at providing tools for working with quantums probabilities
sparse.hpp
1 
2 /*
3  This file is part of the Kernel Quantum Probability library (KQP).
4 
5  KQP is free software: you can redistribute it and/or modify
6  it under the terms of the GNU General Public License as published by
7  the Free Software Foundation, either version 3 of the License, or
8  (at your option) any later version.
9 
10  KQP is distributed in the hope that it will be useful,
11  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  GNU General Public License for more details.
14 
15  You should have received a copy of the GNU General Public License
16  along with KQP. If not, see <http://www.gnu.org/licenses/>.
17  */
18 #ifndef __KQP_SPARSE_FEATURE_MATRIX_H__
19 #define __KQP_SPARSE_FEATURE_MATRIX_H__
20 
21 #include <boost/lexical_cast.hpp>
22 #include <kqp/subset.hpp>
23 #include <kqp/feature_matrix.hpp>
24 #include <Eigen/Sparse>
25 
26 namespace kqp {
27 
28 
29 
37  template <typename Scalar>
38  class Sparse : public FeatureMatrixBase<Scalar> {
39  public:
40  KQP_SCALAR_TYPEDEFS(Scalar);
41  typedef Sparse<Scalar> Self;
42  typedef Eigen::SparseMatrix<Scalar, Eigen::ColMajor> Storage;
43 
44  virtual ~Sparse() {}
45 
46  Sparse() {}
47  Sparse(Index dimension) : m_matrix(dimension, 0) {}
48  Sparse(const Self &other) : m_matrix(other.m_matrix) {}
49 
50 #ifndef SWIG
51  Sparse(Storage &&storage) : m_matrix(std::move(storage)) {}
52  Sparse(Self &&other) : m_matrix(std::move(other.m_matrix)) {}
53 
54 #endif
55 
56  Sparse(const ScalarMatrix &mat, double threshold = 0) : m_matrix(mat.rows(), mat.cols()) {
57  Matrix<Real, 1, Dynamic> thresholds = threshold * mat.colwise().norm();
58 
59  Matrix<Index, 1, Dynamic> countsPerCol((mat.array().abs() >= thresholds.colwise().replicate(mat.rows()).array()).template cast<Index>().colwise().sum());
60 
61  m_matrix.reserve(countsPerCol);
62 
63  for(Index i = 0; i < mat.rows(); i++)
64  for(Index j = 0; j < mat.cols(); j++)
65  if (std::abs(mat(i,j)) > thresholds[j])
66  m_matrix.insert(i,j) = mat(i,j);
67  }
68 
69  Sparse(const Storage &storage) : m_matrix(storage) {}
70  Sparse(const Eigen::SparseMatrix<Scalar, Eigen::RowMajor> &storage) : m_matrix(storage) {}
71 
72  ScalarMatrix toDense() {
73  return ScalarMatrix(m_matrix);
74  }
75 
76  // --- Base methods
77  inline Index size() const {
78  return m_matrix.cols();
79  }
80 
81  inline Index dimension() const {
82  return m_matrix.rows();
83  }
84 
85  void add(const FMatrixBase &_other, const std::vector<bool> *which = NULL) override {
86  const Self &other = kqp::our_dynamic_cast<const Self&>(_other);
87 
88  // Computes the indices of the vectors to add
89  std::vector<Index> ix;
90  Index toAdd = 0;
91  if (which) {
92  for(size_t i = 0; i < which->size(); i++)
93  if ((*which)[i])
94  ix.push_back(i);
95 
96  toAdd = ix.size();
97  }
98  else toAdd = other.size();
99 
100  //FIXME: re-implement when conservativeResize is implemented in Eigen::SparseMatrix
101 
102  // Construct the vector of non zero entries count
103 
104  std::vector<Index> counts;
105  for(Index i = 0; i < m_matrix.cols(); i++)
106  counts.push_back(m_matrix.col(i).nonZeros());
107 
108  for(Index i = 0; i < toAdd; i++)
109  counts.push_back(other.m_matrix.col(ix.empty() ? i : ix[i]).nonZeros());
110 
111 
112  // Prepare the resultant sparse matrix
113 
114  // FIXME: m_matrix.conservativeResize(m_matrix.rows(), m_matrix.cols() + toAdd)
115  Index offset = m_matrix.cols();
116  Storage s(m_matrix.rows(), m_matrix.cols() + toAdd);
117  s.reserve(counts);
118 
119  // Fill the result
120 
121  for(Index i = 0; i < m_matrix.cols(); ++i) // FIXME: remove this
122  for (typename Storage::InnerIterator it(m_matrix,i); it; ++it)
123  s.insert(it.row(), i) = it.value();
124 
125  for(Index i = 0; i < toAdd; ++i)
126  for (typename Storage::InnerIterator it(other.m_matrix, ix.empty() ? i : ix[i]); it; ++it)
127  s.insert(it.row(), offset+i) = it.value();
128 
129 
130  // Compress (should not do anything but free the non zero buffer)
131  m_matrix = std::move(s);
132  m_matrix.makeCompressed();
133  }
134 
135  const ScalarMatrix &gramMatrix() const {
136  Index current = m_gramMatrix.rows();
137  if (size() == current) return m_gramMatrix;
138 
139  if (current < size())
140  m_gramMatrix.conservativeResize(size(), size());
141  Index tofill = size() - current;
142 
143  // Compute the remaining inner products
144  Index ncols = m_matrix.cols();
145 
146  for(Index i = 0; i < ncols; ++i)
147  for(Index j = current; j < ncols; ++j)
148  m_gramMatrix(i,j) = m_matrix.col(i).dot(m_matrix.col(j));
149  m_gramMatrix.bottomLeftCorner(tofill, current) = m_gramMatrix.topRightCorner(current, tofill).adjoint().eval();
150 
151  return m_gramMatrix;
152  }
153 
154 
155  FMatrixBasePtr subset(const std::vector<bool>::const_iterator &begin, const std::vector<bool>::const_iterator &end) const override{
156  // Construct
157  std::vector<Index> selected;
158  auto it = begin;
159  for(Index i = 0; i < m_matrix.cols(); i++) {
160  if (it == end || *it)
161  selected.push_back(i);
162  it++;
163  }
164 
165  // Prepare the resultant sparse matrix
166  Storage s(m_matrix.rows(), selected.size());
167  Eigen::VectorXi counts(selected.size());
168  for(size_t i = 0; i < selected.size(); ++i)
169  counts[i] = m_matrix.col(selected[i]).nonZeros();
170  s.reserve(counts);
171 
172  // Fill the result
173  for(size_t i = 0; i < selected.size(); ++i)
174  for (typename Storage::InnerIterator it(m_matrix,selected[i]); it; ++it)
175  s.insert(it.row(), i) = it.value();
176 
177  return FMatrixBasePtr(new Self(std::move(s)));
178  }
179 
180  // Computes alpha * X * A + beta * Y * B (X = *this)
181 
182  FMatrixBasePtr copy() const override {
183  return FMatrixBasePtr(new Self(*this));
184  }
185 
187  const Storage *operator->() const { return &m_matrix; }
188  const Storage &operator*() const { return m_matrix; }
189 
190  FMatrixBase& operator=(const Self &other) {
191  m_matrix = other.m_matrix;
192  m_gramMatrix = other.m_gramMatrix;
193  return *this;
194  }
195 
196  virtual FMatrixBase& operator=(const FMatrixBase &other) override {
197  return *this = kqp::our_dynamic_cast<const Self&>(other);
198  }
199 
201  const Storage &getMatrix() const {
202  return m_matrix;
203  }
204 
205 
206  private:
207 
209  mutable ScalarMatrix m_gramMatrix;
210 
211 
213  Storage m_matrix;
214 
215  };
216 
217 
218 
219  template<typename Scalar>
220  std::ostream& operator<<(std::ostream &out, const Sparse<Scalar> &f) {
221  return out << "[Sparse Matrix with scalar " << KQP_DEMANGLE((Scalar)0) << "]" << std::endl << f.getMatrix();
222  }
223 
224 
225  template<typename Scalar>
226  class SparseSpace : public SpaceBase<Scalar> {
227  public:
228  typedef SparseSpace<Scalar> Self;
229  KQP_SPACE_TYPEDEFS("sparse", Scalar);
230 #ifndef SWIG
231  using SpaceBase<Scalar>::k;
232 #endif
233  static FSpacePtr create(Index dimension) { return FSpacePtr(new SparseSpace(dimension)); }
234 
235  SparseSpace() {}
236  SparseSpace(Index dimension) : m_dimension(dimension) {}
237 
238  inline static const Sparse<Scalar>& cast(const FeatureMatrixBase<Scalar> &mX) { return kqp::our_dynamic_cast<const Sparse<Scalar> &>(mX); }
239 
240  Index dimension() const override { return m_dimension; }
241  void dimension(Index dimension) { m_dimension = dimension; }
242 
243  virtual FSpacePtr copy() const override { return FSpacePtr(new SparseSpace(m_dimension)); }
244 
245  virtual FMatrixBasePtr newMatrix() const override {
246  return FMatrixBasePtr(new Sparse<Scalar>(m_dimension));
247  }
248  virtual FMatrixBasePtr newMatrix(const FMatrixBase &mX) const override {
249  return FMatrixBasePtr(new Sparse<Scalar>(cast(mX)));
250  }
251 
252 
253  const ScalarMatrix &k(const FeatureMatrixBase<Scalar> &mX) const override {
254  return cast(mX).gramMatrix();
255  }
256 
257  virtual ScalarMatrix k(const FeatureMatrixBase<Scalar> &mX1, const ScalarAltMatrix &mY1, const RealAltVector &mD1,
258  const FeatureMatrixBase<Scalar> &mX2, const ScalarAltMatrix &mY2, const RealAltVector &mD2) const override {
259  return mD1.asDiagonal() * mY1.adjoint() * cast(mX1)->adjoint() * *cast(mX2) * mY2 * mD2.asDiagonal();
260  };
261 
262  virtual void load(const picojson::object &json) override {
263  m_dimension = getNumeric<Index>("", json, "dimension");
264  }
265 
266  virtual picojson::object save() const override {
267  picojson::object json = SpaceBase<Scalar>::save();
268  json["dimension"] = picojson::value((double)m_dimension);
269  return json;
270  }
271 
272 
273 
274  private:
275  Index m_dimension;
276  };
277 
278 
279 } // end namespace kqp
280 
281 #endif