libmesh解析
本工作只是尝试解析原libmesh的代码,供学习使用
 全部  命名空间 文件 函数 变量 类型定义 枚举 枚举值 友元 
distributed_vector.C
浏览该文件的文档.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2023 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 
20 // Local includes
21 #include "libmesh/distributed_vector.h"
22 
23 // libMesh includes
24 #include "libmesh/dense_vector.h"
25 #include "libmesh/dense_subvector.h"
26 #include "libmesh/int_range.h"
27 #include "libmesh/libmesh_common.h"
28 #include "libmesh/tensor_tools.h"
29 
30 // TIMPI includes
31 #include "timpi/parallel_implementation.h"
32 #include "timpi/parallel_sync.h"
33 
34 // C++ includes
35 #include <cstdlib> // *must* precede <cmath> for proper std:abs() on PGI, Sun Studio CC
36 #include <cmath> // for std::abs
37 #include <limits> // std::numeric_limits<T>::min()
38 
39 
40 namespace libMesh
41 {
42 
43 
44 
45 //--------------------------------------------------------------------------
46 // DistributedVector methods
47 template <typename T>
49 {
50  // This function must be run on all processors at once
51  parallel_object_only();
52 
53  libmesh_assert (this->initialized());
54  libmesh_assert_equal_to (_values.size(), _local_size);
55  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
56 
57  T local_sum = 0.;
58 
59  for (auto & val : _values)
60  local_sum += val;
61 
62  this->comm().sum(local_sum);
63 
64  return local_sum;
65 }
66 
67 
68 
69 template <typename T>
71 {
72  // This function must be run on all processors at once
73  parallel_object_only();
74 
75  libmesh_assert (this->initialized());
76  libmesh_assert_equal_to (_values.size(), _local_size);
77  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
78 
79  Real local_l1 = 0.;
80 
81  for (auto & val : _values)
82  local_l1 += std::abs(val);
83 
84  this->comm().sum(local_l1);
85 
86  return local_l1;
87 }
88 
89 
90 
91 template <typename T>
93 {
94  // This function must be run on all processors at once
95  parallel_object_only();
96 
97  libmesh_assert (this->initialized());
98  libmesh_assert_equal_to (_values.size(), _local_size);
99  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
100 
101  Real local_l2 = 0.;
102 
103  for (auto & val : _values)
104  local_l2 += TensorTools::norm_sq(val);
105 
106  this->comm().sum(local_l2);
107 
108  return std::sqrt(local_l2);
109 }
110 
111 
112 
113 template <typename T>
115 {
116  // This function must be run on all processors at once
117  parallel_object_only();
118 
119  libmesh_assert (this->initialized());
120  libmesh_assert_equal_to (_values.size(), _local_size);
121  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
122 
123  Real local_linfty = 0.;
124 
125  for (auto & val : _values)
126  local_linfty = std::max(local_linfty,
127  static_cast<Real>(std::abs(val))
128  ); // Note we static_cast so that both
129  // types are the same, as required
130  // by std::max
131 
132  this->comm().max(local_linfty);
133 
134  return local_linfty;
135 }
136 
137 
138 
139 template <typename T>
141 {
142  libmesh_assert (this->closed());
143  libmesh_assert (this->initialized());
144  libmesh_assert_equal_to (_values.size(), _local_size);
145  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
146 
147  add(1., v);
148 
149  return *this;
150 }
151 
152 
153 
154 template <typename T>
156 {
157  libmesh_assert (this->closed());
158  libmesh_assert (this->initialized());
159  libmesh_assert_equal_to (_values.size(), _local_size);
160  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
161 
162  add(-1., v);
163 
164  return *this;
165 }
166 
167 
168 
169 template <typename T>
171 {
172  libmesh_assert_equal_to(size(), v.size());
173 
174  const DistributedVector<T> & v_vec = cast_ref<const DistributedVector<T> &>(v);
175 
176  for (auto i : index_range(_values))
177  _values[i] *= v_vec._values[i];
178 
179  return *this;
180 }
181 
182 
183 
184 template <typename T>
186 {
187  libmesh_assert_equal_to(size(), v.size());
188 
189  const DistributedVector<T> & v_vec = cast_ref<const DistributedVector<T> &>(v);
190 
191  for (auto i : index_range(_values))
192  _values[i] /= v_vec._values[i];
193 
194  return *this;
195 }
196 
197 
198 
199 
200 template <typename T>
202 {
203  for (auto & val : _values)
204  {
205  // Don't divide by zero
206  libmesh_assert_not_equal_to (val, T(0));
207 
208  val = 1. / val;
209  }
210 }
211 
212 
213 
214 
215 template <typename T>
217 {
218  // Replace values by complex conjugate
219  for (auto & val : _values)
220  val = libmesh_conj(val);
221 }
222 
223 
224 
225 
226 
227 template <typename T>
229 {
230  libmesh_assert (this->initialized());
231  libmesh_assert_equal_to (_values.size(), _local_size);
232  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
233 
234  for (auto & val : _values)
235  val += v;
236 }
237 
238 
239 
240 template <typename T>
242 {
243  libmesh_assert (this->initialized());
244  libmesh_assert_equal_to (_values.size(), _local_size);
245  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
246 
247  add (1., v);
248 }
249 
250 
251 
252 template <typename T>
253 void DistributedVector<T>::add (const T a, const NumericVector<T> & v_in)
254 {
255  libmesh_assert (this->initialized());
256  libmesh_assert_equal_to (_values.size(), _local_size);
257  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
258 
259  // Make sure the NumericVector passed in is really a DistributedVector
260  const DistributedVector<T> * v = cast_ptr<const DistributedVector<T> *>(&v_in);
261  libmesh_error_msg_if(!v, "Cannot add different types of NumericVectors.");
262 
263  for (auto i : index_range(_values))
264  _values[i] += a * v->_values[i];
265 }
266 
267 
268 
269 template <typename T>
270 void DistributedVector<T>::scale (const T factor)
271 {
272  libmesh_assert (this->initialized());
273  libmesh_assert_equal_to (_values.size(), _local_size);
274  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
275 
276  for (auto & val : _values)
277  val *= factor;
278 }
279 
280 template <typename T>
282 {
283  libmesh_assert (this->initialized());
284  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
285 
286  for (auto & val : _values)
287  val = std::abs(val);
288 }
289 
290 
291 
292 
293 
294 template <typename T>
296 {
297  // This function must be run on all processors at once
298  parallel_object_only();
299 
300  // Make sure the NumericVector passed in is really a DistributedVector
301  const DistributedVector<T> * v = cast_ptr<const DistributedVector<T> *>(&V);
302 
303  // Make sure that the two vectors are distributed in the same way.
304  libmesh_assert_equal_to ( this->first_local_index(), v->first_local_index() );
305  libmesh_assert_equal_to ( this->last_local_index(), v->last_local_index() );
306 
307  // The result of dotting together the local parts of the vector.
308  T local_dot = 0;
309 
310  for (auto i : index_range(_values))
311  local_dot += this->_values[i] * v->_values[i];
312 
313  // The local dot products are now summed via MPI
314  this->comm().sum(local_dot);
315 
316  return local_dot;
317 }
318 
319 
320 
321 template <typename T>
324 {
325  libmesh_assert (this->initialized());
326  libmesh_assert_equal_to (_values.size(), _local_size);
327  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
328 
329  for (auto & val : _values)
330  val = s;
331 
332  return *this;
333 }
334 
335 
336 
337 template <typename T>
340 {
341  // Make sure the NumericVector passed in is really a DistributedVector
342  const DistributedVector<T> * v = cast_ptr<const DistributedVector<T> *>(&v_in);
343 
344  *this = *v;
345 
346  return *this;
347 }
348 
349 
350 
351 template <typename T>
354 {
356  this->_is_closed = v._is_closed;
357 
358  _global_size = v._global_size;
359  _local_size = v._local_size;
360  _first_local_index = v._first_local_index;
361  _last_local_index = v._last_local_index;
362 
363  if (v.local_size() == this->local_size())
364  _values = v._values;
365 
366  else
367  libmesh_error_msg("v.local_size() = " << v.local_size() << " must be equal to this->local_size() = " << this->local_size());
368 
369  return *this;
370 }
371 
372 
373 
374 template <typename T>
376 DistributedVector<T>::operator = (const std::vector<T> & v)
377 {
378  libmesh_assert (this->initialized());
379  libmesh_assert_equal_to (_values.size(), _local_size);
380  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
381 
382  if (v.size() == local_size())
383  _values = v;
384 
385  else if (v.size() == size())
386  for (auto i : index_range(*this))
387  _values[i-first_local_index()] = v[i];
388 
389  else
390  libmesh_error_msg("Incompatible sizes in DistributedVector::operator=");
391 
392  return *this;
393 }
394 
395 
396 
397 template <typename T>
399 
400 {
401  libmesh_assert (this->initialized());
402  libmesh_assert_equal_to (_values.size(), _local_size);
403  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
404 
405  DistributedVector<T> * v_local = cast_ptr<DistributedVector<T> *>(&v_local_in);
406 
407  v_local->_first_local_index = 0;
408 
409  v_local->_global_size =
410  v_local->_local_size =
411  v_local->_last_local_index = size();
412 
413  v_local->_is_initialized =
414  v_local->_is_closed = true;
415 
416  // Call localize on the vector's values. This will help
417  // prevent code duplication
418  localize (v_local->_values);
419 
420 #ifndef LIBMESH_HAVE_MPI
421 
422  libmesh_assert_equal_to (local_size(), size());
423 
424 #endif
425 }
426 
427 
428 
429 template <typename T>
431  const std::vector<numeric_index_type> &) const
432 {
433  libmesh_assert (this->initialized());
434  libmesh_assert_equal_to (_values.size(), _local_size);
435  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
436 
437  // TODO: We don't yet support the send list; this is inefficient:
438  localize (v_local_in);
439 }
440 
441 
442 
443 template <typename T>
444 void DistributedVector<T>::localize (std::vector<T> & v_local,
445  const std::vector<numeric_index_type> & indices) const
446 {
447  // Resize v_local so there is enough room to hold all the local values.
448  v_local.resize(indices.size());
449 
450  // We need to know who has the values we want, so get everyone's _local_size
451  std::vector<numeric_index_type> local_sizes;
452  this->comm().allgather (_local_size, local_sizes);
453 
454  // Make a vector of partial sums of local sizes
455  std::vector<numeric_index_type> local_size_sums(this->n_processors());
456  local_size_sums[0] = local_sizes[0];
457  for (auto i : IntRange<numeric_index_type>(1, local_sizes.size()))
458  local_size_sums[i] = local_size_sums[i-1] + local_sizes[i];
459 
460  // We now fill in 'requested_ids' based on the indices. Also keep
461  // track of the local index (in the indices vector) for each of
462  // these, since we need that when unpacking.
463  std::map<processor_id_type, std::vector<numeric_index_type>>
464  requested_ids, local_requested_ids;
465 
466  // We'll use this typedef a couple of times below.
467  typedef typename std::vector<numeric_index_type>::iterator iter_t;
468 
469  // For each index in indices, determine which processor it is on.
470  // This is an O(N*log(p)) algorithm that uses std::upper_bound().
471  // Note: upper_bound() returns an iterator to the first entry which is
472  // greater than the given value.
473  for (auto i : index_range(indices))
474  {
475  iter_t ub = std::upper_bound(local_size_sums.begin(),
476  local_size_sums.end(),
477  indices[i]);
478 
479  processor_id_type on_proc = cast_int<processor_id_type>
480  (std::distance(local_size_sums.begin(), ub));
481 
482  requested_ids[on_proc].push_back(indices[i]);
483  local_requested_ids[on_proc].push_back(i);
484  }
485 
486  auto gather_functor =
487  [this]
488  (processor_id_type, const std::vector<dof_id_type> & ids,
489  std::vector<T> & values)
490  {
491  // The first send/receive we did was for indices, the second one will be
492  // for corresponding floating point values, so create storage for that now...
493  const std::size_t ids_size = ids.size();
494  values.resize(ids_size);
495 
496  for (std::size_t i=0; i != ids_size; i++)
497  {
498  // The index of the requested value
499  const numeric_index_type requested_index = ids[i];
500 
501  // Transform into local numbering, and get requested value.
502  values[i] = _values[requested_index - _first_local_index];
503  }
504  };
505 
506  auto action_functor =
507  [& v_local, & local_requested_ids]
508  (processor_id_type pid,
509  const std::vector<dof_id_type> &,
510  const std::vector<T> & values)
511  {
512  // Now write the received values to the appropriate place(s) in v_local
513  for (auto i : index_range(values))
514  {
515  libmesh_assert(local_requested_ids.count(pid));
516  libmesh_assert_less(i, local_requested_ids[pid].size());
517 
518  // Get the index in v_local where this value needs to be inserted.
519  const numeric_index_type local_requested_index =
520  local_requested_ids[pid][i];
521 
522  // Actually set the value in v_local
523  v_local[local_requested_index] = values[i];
524  }
525  };
526 
527  const T * ex = nullptr;
528  Parallel::pull_parallel_vector_data
529  (this->comm(), requested_ids, gather_functor, action_functor, ex);
530 }
531 
532 
533 
534 template <typename T>
536  const numeric_index_type last_local_idx,
537  const std::vector<numeric_index_type> & send_list)
538 {
539  // Only good for serial vectors
540  libmesh_assert_equal_to (this->size(), this->local_size());
541  libmesh_assert_greater (last_local_idx, first_local_idx);
542  libmesh_assert_less_equal (send_list.size(), this->size());
543  libmesh_assert_less (last_local_idx, this->size());
544 
545  const numeric_index_type my_size = this->size();
546  const numeric_index_type my_local_size = (last_local_idx - first_local_idx + 1);
547 
548  // Don't bother for serial cases
549  if ((first_local_idx == 0) &&
550  (my_local_size == my_size))
551  return;
552 
553 
554  // Build a parallel vector, initialize it with the local
555  // parts of (*this)
556  DistributedVector<T> parallel_vec(this->comm());
557 
558  parallel_vec.init (my_size, my_local_size, true, PARALLEL);
559 
560  // Copy part of *this into the parallel_vec
561  for (numeric_index_type i=first_local_idx; i<=last_local_idx; i++)
562  parallel_vec._values[i-first_local_idx] = _values[i];
563 
564  // localize like normal
565  parallel_vec.localize (*this, send_list);
566 }
567 
568 
569 
570 template <typename T>
571 void DistributedVector<T>::localize (std::vector<T> & v_local) const
572 {
573  // This function must be run on all processors at once
574  parallel_object_only();
575 
576  libmesh_assert (this->initialized());
577  libmesh_assert_equal_to (_values.size(), _local_size);
578  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
579 
580  v_local = this->_values;
581 
582  this->comm().allgather (v_local);
583 
584 #ifndef LIBMESH_HAVE_MPI
585  libmesh_assert_equal_to (local_size(), size());
586 #endif
587 }
588 
589 
590 
591 template <typename T>
592 void DistributedVector<T>::localize_to_one (std::vector<T> & v_local,
593  const processor_id_type pid) const
594 {
595  // This function must be run on all processors at once
596  parallel_object_only();
597 
598  libmesh_assert (this->initialized());
599  libmesh_assert_equal_to (_values.size(), _local_size);
600  libmesh_assert_equal_to ((_last_local_index - _first_local_index), _local_size);
601 
602  v_local = this->_values;
603 
604  this->comm().gather (pid, v_local);
605 
606 #ifndef LIBMESH_HAVE_MPI
607  libmesh_assert_equal_to (local_size(), size());
608 #endif
609 }
610 
611 
612 
613 template <typename T>
615  const NumericVector<T> &)
616 //void DistributedVector<T>::pointwise_mult (const NumericVector<T> & vec1,
617 // const NumericVector<T> & vec2)
618 {
619  libmesh_not_implemented();
620 }
621 
622 template <typename T>
624  const NumericVector<T> &)
625 {
626  libmesh_not_implemented();
627 }
628 
629 //--------------------------------------------------------------
630 // Explicit instantiations
631 template class LIBMESH_EXPORT DistributedVector<Number>;
632 
633 } // namespace libMesh
bool closed()
Checks that the library has been closed.
Definition: libmesh.C:268
T libmesh_conj(T a)
virtual T dot(const NumericVector< T > &V) const override
计算 ,即 (*this) 与向量 v 的点积。
numeric_index_type _last_local_index
本地存储的最后一个分量(+1)。
virtual void scale(const T factor) override
缩放向量的每个元素。
virtual numeric_index_type last_local_index() const override
获取实际存储在该处理器上的最后一个向量元素的索引+1。
virtual numeric_index_type size() const =0
获取向量的大小。
virtual NumericVector< T > & operator-=(const NumericVector< T > &v) override
将 v 从 *this 减去, 。等价于 u.add(-1, v)。
DistributedVector & operator=(const DistributedVector &)
复制赋值运算符。我们不能默认实现它(尽管它本质上实现了默认行为), 因为生成的默认版本尝试自动调用基类(NumericVector)的复制赋值运算符, 而选择使其成为纯虚拟函数,出于其他设计原因。 ...
提供了不同线性代数库的向量存储方案的统一接口。
Definition: dof_map.h:67
virtual NumericVector< T > & operator/=(const NumericVector< T > &v) override
计算此向量条目与另一个向量的分量除法, 。
virtual void add(const numeric_index_type i, const T value) override
将 value 添加到由 i 指定的向量条目。 请注意,此方法的库实现是线程安全的, 例如,将在写入向量之前锁定 _numeric_vector_mutex 。
bool _is_initialized
在调用 init() 后设置为 true。
ADRealEigenVector< T, D, asd > sqrt(const ADRealEigenVector< T, D, asd > &)
计算自动微分实数向量的平方根。
Definition: type_vector.h:88
virtual Real linfty_norm() const override
获取向量的 -范数,即向量的最大绝对值。
numeric_index_type _global_size
全局向量大小。
virtual numeric_index_type local_size() const override
获取向量的本地大小,即 index_stop - index_start。
virtual NumericVector< T > & operator+=(const NumericVector< T > &v) override
将向量加上 v , 。等价于 u.add(1, v)。
uint8_t processor_id_type
Definition: id_types.h:104
ADRealEigenVector< T, D, asd > abs(const ADRealEigenVector< T, D, asd > &)
计算自动微分实数向量的绝对值。
Definition: type_vector.h:112
T norm_sq(std::complex< T > a)
Definition: tensor_tools.h:74
virtual void reciprocal() override
计算每个向量条目的分量倒数, 。
virtual Real l1_norm() const override
获取向量的 -范数,即条目的绝对值之和。
该类提供了一个简单的并行分布式向量数据类型, 专门用于 libmesh。提供了一些集体通信功能。
dof_id_type numeric_index_type
Definition: id_types.h:99
bool _is_initialized
Flag that tells if init() has been called.
Definition: libmesh.C:242
virtual void pointwise_divide(const NumericVector< T > &vec1, const NumericVector< T > &vec2) override
计算该向量与另一个向量的逐点除法。
virtual void pointwise_mult(const NumericVector< T > &vec1, const NumericVector< T > &vec2) override
比较该向量与另一个向量的全局相对差异。
virtual void conjugate() override
反转向量中每个条目的虚部。
virtual void localize_to_one(std::vector< T > &v_local, const processor_id_type proc_id=0) const override
在处理器 proc_id 上创建全局向量的本地副本。 默认情况下,数据发送到处理器 0。此方法对于从一个处理器输出数据非常有用。
numeric_index_type _local_size
本地向量大小。
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
virtual numeric_index_type first_local_index() const override
获取实际存储在该处理器上的第一个向量元素的索引。
virtual NumericVector< T > & operator*=(const NumericVector< T > &v) override
计算此向量条目与另一个向量的条目之间的分量乘法, 。
virtual void localize(std::vector< T > &v_local) const override
创建全局向量的副本并存储在本地向量 v_local 中。
virtual Real l2_norm() const override
获取向量的 -范数,即条目平方和的平方根。
virtual void abs() override
设置 ,对向量中的每个条目进行绝对值操作。
bool initialized()
Checks that library initialization has been done.
Definition: libmesh.C:261
virtual void init(const numeric_index_type N, const numeric_index_type n_local, const bool fast=false, const ParallelType ptype=AUTOMATIC) override
更改向量的维度为 n 。如果可能的话 ,该向量的保留内存保持不变。 如果 n==0 ,所有内存都将被释放。因此,如果要调整向量的大小并释放不需要的内存, 必须首先调用 init(0) ,然后调用 ini...
std::vector< T > _values
实际的向量数据类型,用于保存向量条目。
virtual T sum() const override
获取向量中所有值的总和。
numeric_index_type _first_local_index
本地存储的第一个分量。
bool _is_closed
用于跟踪向量的值在在一些或全部处理器上进行插入或添加值操作后是否在所有处理器上保持一致的标志。