ARTS  2.2.66
cloudbox.cc
Go to the documentation of this file.
1 /* Copyright (C) 2002-2012 Claudia Emde <claudia.emde@dlr.de>
2 
3  This program is free software; you can redistribute it and/or modify it
4  under the terms of the GNU General Public License as published by the
5  Free Software Foundation; either version 2, or (at your option) any
6  later version.
7 
8  This program is distributed in the hope that it will be useful,
9  but WITHOUT ANY WARRANTY; without even the implied warranty of
10  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11  GNU General Public License for more details.
12 
13  You should have received a copy of the GNU General Public License
14  along with this program; if not, write to the Free Software
15  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
16  USA.
17 */
18 
27 #include "cloudbox.h"
28 
29 extern const Index GFIELD3_P_GRID;
30 extern const Index GFIELD3_LAT_GRID;
31 extern const Index GFIELD3_LON_GRID;
32 extern const Numeric PI;
33 
34 /*===========================================================================
35  === External declarations
36  ===========================================================================*/
37 #include <stdexcept>
38 #include <cmath>
39 #include <algorithm>
40 #include <limits>
41 
42 #include "arts.h"
43 #include "messages.h"
44 #include "make_vector.h"
45 #include "logic.h"
46 #include "ppath.h"
47 #include "physics_funcs.h"
48 #include "math_funcs.h"
49 #include "check_input.h"
50 #include "rng.h"
51 #include <ctime>
52 #include "mc_antenna.h"
53 #include "sorting.h"
54 #include "lin_alg.h"
55 
56 
57 
60 
70 void chk_massdensity_field( bool& empty_flag,
71  const Index& dim,
72  const Tensor3& massdensity,
73  const Vector& p_grid,
74  const Vector& lat_grid,
75  const Vector& lon_grid
76  )
77 {
78 
79  // check p
80  if ( massdensity.npages() != p_grid.nelem()) {
81 
82  ostringstream os;
83  os << "The size of *p_grid* ("
84  << p_grid.nelem()
85  << ") is not equal to the number of pages of *massdensity* ("
86  << massdensity.npages()
87  <<").";
88  throw runtime_error(os.str() );
89  }
90 
91  // check lat
92  if(dim >= 2 )
93  {
94  if ( massdensity.nrows() != lat_grid.nelem()) {
95 
96  ostringstream os;
97  os << "The size of *lat_grid* ("
98  << lat_grid.nelem()
99  << ") is not equal to the number of rows of *massdensity* ("
100  << massdensity
101  << ").";
102  throw runtime_error(os.str() );
103 
104  }
105  }
106 
107  // check lon
108  if(dim == 3 )
109  {
110  if ( massdensity.ncols() != lon_grid.nelem()) {
111 
112  ostringstream os;
113  os << "The size of *lon_grid* ("
114  << lon_grid.nelem()
115  << ") is not equal to the number of columns of *massdensity*"
116  << massdensity
117  << ").";
118  throw runtime_error(os.str() );
119 
120  }
121  }
122 
123  empty_flag = false;
124  // set empty_flag to true if a single value of hydromet_field is unequal zero
125  for (Index j=0; j<massdensity.npages(); j++) {
126  for (Index k=0; k<massdensity.nrows(); k++) {
127  for (Index l=0; l<massdensity.ncols(); l++) {
128  if ( massdensity(j,k,l) != 0.0) empty_flag = true;
129  }
130  }
131  }
132 }
133 
134 
135 
137 
145 void chk_if_pnd_zero_p(const Index& i_p,
146  const GriddedField3& pnd_field_raw,
147  const String& pnd_field_file,
148  const Verbosity& verbosity)
149 
150 {
151  const ConstVectorView pfr_p_grid = pnd_field_raw.get_numeric_grid(GFIELD3_P_GRID);
152  const ConstVectorView pfr_lat_grid = pnd_field_raw.get_numeric_grid(GFIELD3_LAT_GRID);
153  const ConstVectorView pfr_lon_grid = pnd_field_raw.get_numeric_grid(GFIELD3_LON_GRID);
154 
155  for (Index i = 0; i < pfr_lat_grid.nelem(); i++ )
156  {
157  for (Index j = 0; j < pfr_lon_grid.nelem(); j++ )
158  {
159  if ( pnd_field_raw.data(i_p, i, j) != 0. )
160  {
161  CREATE_OUT1;
162  ostringstream os;
163  os << "Warning: \n"
164  << "The particle number density field contained in the file '"
165  << pnd_field_file << "'\nis non-zero outside the cloudbox "
166  << "or close the cloudbox boundary at the \n"
167  << "following position:\n"
168  << "pressure = " << pfr_p_grid[i_p]
169  << ", p_index = " << i_p << "\n"
170  << "latitude = " << pfr_lat_grid[i]
171  << ", lat_index = " << i << "\n"
172  << "longitude = " << pfr_lon_grid[j]
173  << ", lon_index = " << j << "\n"
174  << "\n";
175  // throw runtime_error( os.str() );
176  out1 << os.str();
177  }
178  }
179  }
180 }
181 
182 
183 
185 
193 void chk_if_pnd_zero_lat(const Index& i_lat,
194  const GriddedField3& pnd_field_raw,
195  const String& pnd_field_file,
196  const Verbosity& verbosity)
197 
198 {
199  const ConstVectorView pfr_p_grid = pnd_field_raw.get_numeric_grid(GFIELD3_P_GRID);
200  const ConstVectorView pfr_lat_grid = pnd_field_raw.get_numeric_grid(GFIELD3_LAT_GRID);
201  const ConstVectorView pfr_lon_grid = pnd_field_raw.get_numeric_grid(GFIELD3_LON_GRID);
202 
203  for (Index i = 0; i < pfr_p_grid.nelem(); i++ )
204  {
205  for (Index j = 0; j < pfr_lon_grid.nelem(); j++ )
206  {
207  if ( pnd_field_raw.data(i, i_lat, j) != 0. )
208  {
209  CREATE_OUT1;
210  ostringstream os;
211  os << "Warning: \n"
212  << "The particle number density field contained in the file '"
213  << pnd_field_file << "'\nis non-zero outside the cloudbox "
214  << "or close the cloudbox boundary at the \n"
215  << "following position:\n"
216  << "pressure = " << pfr_p_grid[i] << ", p_index = "
217  << i << "\n"
218  << "latitude = " << pfr_lat_grid[i_lat]
219  << ", lat_index = "<<i_lat<< "\n"
220  << "longitude = " << pfr_lon_grid[j]
221  << ", lon_index = " << j << "\n"
222  << "\n";
223  //throw runtime_error( os.str() );
224  out1 << os.str();
225  }
226  }
227  }
228 }
229 
230 
231 
233 
241 void chk_if_pnd_zero_lon(const Index& i_lon,
242  const GriddedField3& pnd_field_raw,
243  const String& pnd_field_file,
244  const Verbosity& verbosity)
245 
246 {
247  const ConstVectorView pfr_p_grid = pnd_field_raw.get_numeric_grid(GFIELD3_P_GRID);
248  const ConstVectorView pfr_lat_grid = pnd_field_raw.get_numeric_grid(GFIELD3_LAT_GRID);
249  const ConstVectorView pfr_lon_grid = pnd_field_raw.get_numeric_grid(GFIELD3_LON_GRID);
250 
251  for (Index i = 0; i < pfr_p_grid.nelem(); i++ )
252  {
253  for (Index j = 0; j < pfr_lat_grid.nelem(); j++ )
254  {
255  if ( pnd_field_raw.data(i, j, i_lon) != 0. )
256  {
257  CREATE_OUT1;
258  ostringstream os;
259  os << "Warning: \n"
260  << "The particle number density field contained in the file '"
261  << pnd_field_file << "'\nis non-zero outside the cloudbox "
262  << "or close the cloudbox boundary at the \n"
263  << "following position:\n"
264  << "pressure = " << pfr_p_grid[i]
265  << ", p_index = " << i << "\n"
266  << "latitude = " << pfr_lat_grid[j]
267  << ", lat_index = " << j << "\n"
268  << "longitude = " << pfr_lon_grid[i_lon]
269  << ", lon_index = "
270  << i_lon << "\n"
271  << "\n";
272  //throw runtime_error( os.str() );
273  out1 << os.str();
274  }
275  }
276  }
277 }
278 
279 
280 
282 
295  const GriddedField3& pnd_field_raw,
296  const String& pnd_field_file,
297  const Index& atmosphere_dim,
298  const Verbosity& verbosity)
299 {
300  CREATE_OUT3;
301 
302  const ConstVectorView pfr_p_grid = pnd_field_raw.get_numeric_grid(GFIELD3_P_GRID);
303  const ConstVectorView pfr_lat_grid = pnd_field_raw.get_numeric_grid(GFIELD3_LAT_GRID);
304  const ConstVectorView pfr_lon_grid = pnd_field_raw.get_numeric_grid(GFIELD3_LON_GRID);
305 
306  // The consistency of the dimensions is checked in the reading routine.
307  // Here we have to check whether the atmospheric dimension is correct and whether
308  // the particle number density is 0 on the cloudbox boundary and outside the cloudbox.
309 
310  out3 << "Check particle number density file " << pnd_field_file
311  << "\n";
312 
313  if (atmosphere_dim == 1 && (pfr_lat_grid.nelem() != 1
314  || pfr_lon_grid.nelem() != 1) )
315  {
316  ostringstream os;
317  os << "The atmospheric dimension is 1D but the particle "
318  << "number density file * " << pnd_field_file
319  << " is for a 3D atmosphere. \n";
320  throw runtime_error( os.str() );
321  }
322 
323 
324  else if( atmosphere_dim == 3)
325  {
326  if(pfr_lat_grid.nelem() == 1
327  || pfr_lon_grid.nelem() == 1)
328  {
329  ostringstream os;
330  os << "The atmospheric dimension is 3D but the particle "
331  << "number density file * " << pnd_field_file
332  << " is for a 1D or a 2D atmosphere. \n";
333  throw runtime_error( os.str() );
334  }
335  }
336 
337  out3 << "Particle number density data is o.k. \n";
338 }
339 
340 
341 
343 
353  const ArrayOfGriddedField3& pnd_field_raw,
354  const String& pnd_field_file,
355  const Index& atmosphere_dim,
356  const Verbosity& verbosity
357  )
358 {
359  CREATE_OUT3;
360 
361  for( Index i = 0; i < pnd_field_raw.nelem(); i++)
362  {
363  out3 << "Element in pnd_field_raw_file:" << i << "\n";
364  chk_pnd_data(pnd_field_raw[i],
365  pnd_field_file, atmosphere_dim,
366  verbosity);
367  }
368 }
369 
370 
371 
373 
391  const Index& dim,
392  const ArrayOfGriddedField3& pnd_field_raw,
393  ConstVectorView p_grid,
394  ConstVectorView lat_grid,
395  ConstVectorView lon_grid,
396  const ArrayOfIndex& cloudbox_limits )
397 {
398  Numeric p, lat, lon, v;
399  Index n, p_i, lat_i, lon_i;
400  // For any non-zero point, verify we're outside the cloudbox
401  for (n=0; n < pnd_field_raw.nelem(); n++) {
402  for (p_i=0; p_i < pnd_field_raw[n].data.npages(); p_i++) {
403  for (lat_i=0; lat_i < pnd_field_raw[n].data.nrows(); lat_i++) {
404  for (lon_i=0; lon_i < pnd_field_raw[n].data.ncols(); lon_i++) {
405  v = pnd_field_raw[n].data(p_i, lat_i, lon_i);
406  if (v != 0) {
407  // Verify pressure is between cloudbox limits
408  p = pnd_field_raw[n].get_numeric_grid(GFIELD3_P_GRID)[p_i];
409 // if (!((p <= p_grid[cloudbox_limits[0]]) &
410 // (p >= p_grid[cloudbox_limits[1]]))) {
411  if ( (p <= p_grid[cloudbox_limits[1]]) ||
412  ( (p >= p_grid[cloudbox_limits[0]]) &&
413  (cloudbox_limits[0]!=0)) ) {
414  ostringstream os;
415  os << "Found non-zero pnd outside cloudbox. "
416  << "Cloudbox extends from p="
417  << p_grid[cloudbox_limits[0]]
418  << " Pa to p="
419  << p_grid[cloudbox_limits[1]]
420  << " Pa, but found pnd=" << v
421  << "/m³ at p=" << p << " Pa for particle #" << n
422  << ".";
423  throw runtime_error(os.str());
424  }
425  // Verify latitude is too
426  if (dim > 1) {
427  lat = pnd_field_raw[n].get_numeric_grid(GFIELD3_LAT_GRID)[lat_i];
428  if (!((lat > lat_grid[cloudbox_limits[2]]) &
429  (lat < lat_grid[cloudbox_limits[3]]))) {
430  ostringstream os;
431  os << "Found non-zero pnd outside cloudbox. "
432  << "Cloudbox extends from lat="
433  << lat_grid[cloudbox_limits[2]]
434  << "° to lat="
435  << lat_grid[cloudbox_limits[3]]
436  << "°, but found pnd=" << v
437  << "/m³ at lat=" << lat << "° for particle #" << n
438  << ".";
439  throw runtime_error(os.str());
440  }
441  }
442  // Etc. for longitude
443  if (dim > 2) {
444  lon = pnd_field_raw[n].get_numeric_grid(GFIELD3_LON_GRID)[lon_i];
445  if (!((lon > lon_grid[cloudbox_limits[4]]) &
446  (lon < lon_grid[cloudbox_limits[5]]))) {
447  ostringstream os;
448  os << "Found non-zero pnd outside cloudbox. "
449  << "Cloudbox extends from lon="
450  << lon_grid[cloudbox_limits[4]]
451  << "° to lat="
452  << lon_grid[cloudbox_limits[5]]
453  << "°, but found pnd=" << v
454  << "/m³ at lon=" << lon << "° for particle #" << n
455  << ".";
456  throw runtime_error(os.str());
457  }
458  }
459  }
460  }
461  }
462  }
463  }
464 }
465 
466 
467 
469 
481  const ArrayOfString& part_species,
482  const String& delim
483  )
484 {
485  ArrayOfString strarr;
486  Numeric sizeval;
487 
488  for ( Index k=0; k<part_species.nelem(); k++ )
489  {
490  part_species[k].split ( strarr, delim );
491  if ( strarr.nelem() > 5 )
492  {
493  ostringstream os;
494  os << "Individual strings in part_species can only contain up to 5\n"
495  << "elements, but entry #" << k << " contains the following "
496  << strarr.nelem() << ":\n" << strarr << "\n";
497  throw runtime_error ( os.str() );
498  }
499 
500  if ( strarr.nelem() > 3 && strarr[3] != "*" )
501  {
502  istringstream os1 ( strarr[3] );
503  os1 >> sizeval;
504  if (os1.fail())
505  {
506  ostringstream os;
507  os << "Sizemin specification can only be a number or wildcard ('*')"
508  << ", but is '" << strarr[3] << "'\n";
509  throw runtime_error ( os.str() );
510  }
511  }
512 
513  if ( strarr.nelem() > 4 && strarr[4] != "*" )
514  {
515  istringstream os1 ( strarr[4] );
516  os1 >> sizeval;
517  if (os1.fail())
518  {
519  ostringstream os;
520  os << "Sizemax specification can only be a number or wildcard ('*')"
521  << ", but is '" << strarr[4] << "'\n";
522  throw runtime_error ( os.str() );
523  }
524  }
525  }
526 }
527 
528 
529 
531 
542  const ArrayOfScatteringMetaData& scat_meta_array,
543  const Verbosity&)
544 {
545  if (scat_data_array.nelem() != scat_meta_array.nelem())
546  {
547  ostringstream os;
548  os << "The number of elments in *scat_data_array*\n"
549  << "and *scat_meta_array* do not match.\n"
550  << "Each SingleScattering file must correspond\n"
551  << "to one ScatteringMeta data file.";
552  throw runtime_error( os.str());
553  }
554 
555 }
556 
558 
568  const String& scat_meta_file,
569  const Verbosity& verbosity)
570 {
571  CREATE_OUT2;
572  out2 << " Check scattering meta properties file "<< scat_meta_file << "\n";
573 
574 /* this check is outdated. type now is free from!
575  however, we might want to have other things checked here!?
576  - which parameters at least are needed? -> radius, ...?
577  - ...
578  if (scat_meta.type != "Ice" && scat_meta.type != "Water" && scat_meta.type != "Aerosol")
579  {
580  ostringstream os;
581  os << "Type in " << scat_meta_file << " must be 'Ice', 'Water' or 'Aerosol'\n";
582  throw runtime_error( os.str() );
583  }
584 */
585  //(more) checks need to be included
586 }
587 
588 
589 
591 
605 void chk_scat_data(const SingleScatteringData& scat_data_array,
606  const String& scat_data_file,
607  ConstVectorView f_grid,
608  const Verbosity& verbosity)
609 {
610  CREATE_OUT2;
611  out2 << " Check single scattering properties file "<< scat_data_file
612  << "\n";
613 
614  if (scat_data_array.particle_type != 10 &&
615  scat_data_array.particle_type != 20 &&
616  scat_data_array.particle_type != 30)
617  {
618  ostringstream os;
619  os << "Particle type value in file" << scat_data_file << "is wrong."
620  << "It must be \n"
621  << "10 - arbitrary oriented particles \n"
622  << "20 - randomly oriented particles or \n"
623  << "30 - horizontally aligned particles.\n";
624  throw runtime_error( os.str() );
625  }
626 
627  chk_interpolation_grids("scat_data_array.f_grid to f_grid",
628  scat_data_array.f_grid,
629  f_grid);
630 
631 /* if (!(scat_data_array.f_grid[0] <= f_grid[0] &&
632  last(f_grid) <=
633  last(scat_data_array.f_grid) ))
634  {
635  ostringstream os;
636  os << "The range of frequency grid in the single scattering"
637  << " properties datafile "
638  << scat_data_file << " does not contain all values of"
639  << "*f_grid*.";
640  throw runtime_error( os.str() );
641  }*/
642 
643  // Here we only check whether the temperature grid is of the unit K, not
644  // whether it corresponds to the required values it T_field. The second
645  // option is not trivial since here one has to look whether the pnd_field
646  // is none zero for the corresponding temperature. This check done in the
647  // functions where the multiplication with the particle number density is
648  // done.
649 
650  if (!(0. < scat_data_array.T_grid[0] && last(scat_data_array.T_grid) < 1001.))
651  {
652  ostringstream os;
653  os << "The temperature values in " << scat_data_file
654  << " are negative or very large. Check whether you have used the "
655  << "right unit [Kelvin].";
656  throw runtime_error( os.str() );
657  }
658 
659  if (scat_data_array.za_grid[0] != 0.)
660  {
661  ostringstream os;
662  os << "The first value of the za grid in the single"
663  << " scattering properties datafile "
664  << scat_data_file << " must be 0.";
665  throw runtime_error( os.str() );
666  }
667 
668  if (last(scat_data_array.za_grid) != 180.)
669  {
670  ostringstream os;
671  os << "The last value of the za grid in the single"
672  << " scattering properties datafile "
673  << scat_data_file << " must be 180.";
674  throw runtime_error( os.str() );
675  }
676 
677  if (scat_data_array.particle_type == 10 && scat_data_array.aa_grid[0] != -180.)
678  {
679  ostringstream os;
680  os << "For particle_type = 10 (general orientation) the first value"
681  << " of the aa grid in the single scattering"
682  << " properties datafile "
683  << scat_data_file << "must be -180.";
684  throw runtime_error( os.str() );
685  }
686 
687  if (scat_data_array.particle_type == 30 && scat_data_array.aa_grid[0] != 0.)
688  {
689  ostringstream os;
690  os << "For particle_type = 30 (horizontal orientation)"
691  << " the first value"
692  << " of the aa grid in the single scattering"
693  << " properties datafile "
694  << scat_data_file << "must be 0.";
695  throw runtime_error( os.str() );
696  }
697 
698  if (scat_data_array.particle_type == 30 && last(scat_data_array.aa_grid) != 180.)
699  {
700  ostringstream os;
701  os << "The last value of the aa grid in the single"
702  << " scattering properties datafile "
703  << scat_data_file << " must be 180.";
704  throw runtime_error( os.str() );
705  }
706 
707  ostringstream os_pha_mat;
708  os_pha_mat << "pha_mat* in the file *" << scat_data_file;
709  ostringstream os_ext_mat;
710  os_ext_mat << "ext_mat* in the file * " << scat_data_file;
711  ostringstream os_abs_vec;
712  os_abs_vec << "abs_vec* in the file * " << scat_data_file;
713 
714  switch (scat_data_array.particle_type){
715 
717 
718  out2 << " Datafile is for arbitrarily orientated particles. \n";
719 
720  chk_size(os_pha_mat.str(), scat_data_array.pha_mat_data,
721  scat_data_array.f_grid.nelem(), scat_data_array.T_grid.nelem(),
722  scat_data_array.za_grid.nelem(), scat_data_array.aa_grid.nelem(),
723  scat_data_array.za_grid.nelem(), scat_data_array.aa_grid.nelem(),
724  16);
725 
726  chk_size(os_ext_mat.str(), scat_data_array.ext_mat_data,
727  scat_data_array.f_grid.nelem(), scat_data_array.T_grid.nelem(),
728  scat_data_array.za_grid.nelem(), scat_data_array.aa_grid.nelem(),
729  7);
730 
731  chk_size(os_abs_vec.str(), scat_data_array.abs_vec_data,
732  scat_data_array.f_grid.nelem(), scat_data_array.T_grid.nelem(),
733  scat_data_array.za_grid.nelem(), scat_data_array.aa_grid.nelem(),
734  4);
735  break;
736 
738 
739  out2 << " Datafile is for randomly oriented particles, i.e., "
740  << "macroscopically isotropic and mirror-symmetric scattering "
741  << "media. \n";
742 
743  chk_size(os_pha_mat.str(), scat_data_array.pha_mat_data,
744  scat_data_array.f_grid.nelem(), scat_data_array.T_grid.nelem(),
745  scat_data_array.za_grid.nelem(), 1, 1, 1, 6);
746 
747  chk_size(os_ext_mat.str(), scat_data_array.ext_mat_data,
748  scat_data_array.f_grid.nelem(), scat_data_array.T_grid.nelem(),
749  1, 1, 1);
750 
751  chk_size(os_abs_vec.str(), scat_data_array.abs_vec_data,
752  scat_data_array.f_grid.nelem(), scat_data_array.T_grid.nelem(),
753  1, 1, 1);
754  break;
755 
757 
758  out2 << " Datafile is for horizontally aligned particles. \n";
759 
760  chk_size(os_pha_mat.str(), scat_data_array.pha_mat_data,
761  scat_data_array.f_grid.nelem(), scat_data_array.T_grid.nelem(),
762  scat_data_array.za_grid.nelem(), scat_data_array.aa_grid.nelem(),
763  scat_data_array.za_grid.nelem()/2+1, 1,
764  16);
765 
766  chk_size(os_ext_mat.str(), scat_data_array.ext_mat_data,
767  scat_data_array.f_grid.nelem(), scat_data_array.T_grid.nelem(),
768  scat_data_array.za_grid.nelem()/2+1, 1,
769  3);
770 
771  chk_size(os_abs_vec.str(), scat_data_array.abs_vec_data,
772  scat_data_array.f_grid.nelem(), scat_data_array.T_grid.nelem(),
773  scat_data_array.za_grid.nelem()/2+1, 1,
774  2);
775  break;
776 
778  throw runtime_error(
779  "Special case for spherical particles not"
780  "implemented."
781  "Use p20 (randomly oriented particles) instead."
782  );
783 
784  }
785 
786 }
787 
788 
789 
790 
791 
809  const GridPos& gp_p,
810  const GridPos& gp_lat,
811  const GridPos& gp_lon,
812  const ArrayOfIndex& cloudbox_limits,
813  const bool& include_boundaries,
814  const Index& atmosphere_dim )
815 
816 {
817  if (include_boundaries)
818  {
819  // Pressure dimension
820  double ipos = fractional_gp( gp_p );
821  if( ipos < double( cloudbox_limits[0] ) ||
822  ipos > double( cloudbox_limits[1] ) )
823  { return false; }
824 
825  else if( atmosphere_dim >= 2 )
826  {
827  // Latitude dimension
828  ipos = fractional_gp( gp_lat );
829  if( ipos < double( cloudbox_limits[2] ) ||
830  ipos > double( cloudbox_limits[3] ) )
831  { return false; }
832 
833  else if( atmosphere_dim == 3 )
834  {
835  // Longitude dimension
836  ipos = fractional_gp( gp_lon );
837  if( ipos < double( cloudbox_limits[4] ) ||
838  ipos > double( cloudbox_limits[5] ) )
839  { return false; }
840  }
841  }
842  return true;
843  }
844  else
845  {
846  // Pressure dimension
847  double ipos = fractional_gp( gp_p );
848  if( ipos <= double( cloudbox_limits[0] ) ||
849  ipos >= double( cloudbox_limits[1] ) )
850  { return false; }
851 
852  else if( atmosphere_dim >= 2 )
853  {
854  // Latitude dimension
855  ipos = fractional_gp( gp_lat );
856  if( ipos <= double( cloudbox_limits[2] ) ||
857  ipos >= double( cloudbox_limits[3] ) )
858  { return false; }
859 
860  else if( atmosphere_dim == 3 )
861  {
862  // Longitude dimension
863  ipos = fractional_gp( gp_lon );
864  if( ipos <= double( cloudbox_limits[4] ) ||
865  ipos >= double( cloudbox_limits[5] ) )
866  { return false; }
867  }
868  }
869  return true;
870  }
871 }
872 
873 
874 
892 bool is_inside_cloudbox(const Ppath& ppath_step,
893  const ArrayOfIndex& cloudbox_limits,
894  const bool include_boundaries)
895 
896 {
897  assert( cloudbox_limits.nelem() == 6 );
898  const Index np=ppath_step.np;
899 
900  return is_gp_inside_cloudbox(ppath_step.gp_p[np-1],ppath_step.gp_lat[np-1],
901  ppath_step.gp_lon[np-1],cloudbox_limits,include_boundaries);
902 
903 }
904 
905 
906 
924 void pnd_fieldMH97 (Tensor4View pnd_field,
925  const Tensor3& IWC_field,
926  const Tensor3& t_field,
927  const ArrayOfIndex& limits,
928  const ArrayOfScatteringMetaData& scat_meta_array,
929  const Index& scat_data_start,
930  const Index& npart,
931  const String& part_string,
932  const String& delim,
933  const Verbosity& verbosity)
934 {
935  ArrayOfIndex intarr;
936  Index pos;
937  Vector vol_unsorted ( npart, 0.0 );
938  Vector vol ( npart, 0.0 );
939  Vector dm ( npart, 0.0 );
940  Vector rho ( npart, 0.0 );
941  Vector pnd ( npart, 0.0 );
942  Vector dN ( npart, 0.0 );
943 
944  String psd_param;
945  String partfield_name;
946 
947  //split String and copy to ArrayOfString
948  parse_psd_param( psd_param, part_string, delim);
949  parse_partfield_name( partfield_name, part_string, delim);
950 
951  bool noisy = (psd_param == "MH97n");
952 
953  for ( Index i=0; i < npart; i++ )
954  vol_unsorted[i] = ( scat_meta_array[i+scat_data_start].volume );
955  get_sorted_indexes(intarr, vol_unsorted);
956 
957  // extract scattering meta data
958  for ( Index i=0; i< npart; i++ )
959  {
960  pos = intarr[i]+scat_data_start;
961 
962  vol[i] = ( scat_meta_array[pos].volume ); //m^3
963  // calculate melted diameter from volume [m]
964  dm[i] = pow (
965  ( scat_meta_array[pos].volume *6./PI ),
966  ( 1./3. ) );
967  // get density from meta data [kg/m^3]
968  rho[i] = scat_meta_array[pos].density;
969  // get aspect ratio from meta data [ ]
970  }
971 
972  if (dm.nelem() > 0)
973  // dm.nelem()=0 implies no selected particles for the respective particle
974  // field. should not occur.
975  {
976  // iteration over all atm. levels
977  for ( Index p=limits[0]; p<limits[1]; p++ )
978  {
979  for ( Index lat=limits[2]; lat<limits[3]; lat++ )
980  {
981  for ( Index lon=limits[4]; lon<limits[5]; lon++ )
982  {
983  // we only need to go through PSD calc if there is any material
984  if (IWC_field ( p, lat, lon ) > 0.)
985  {
986  // iteration over all given size bins
987  for ( Index i=0; i<dm.nelem(); i++ )
988  {
989  // calculate particle size distribution with MH97
990  // [# m^-3 m^-1]
991  dN[i] = IWCtopnd_MH97 ( IWC_field ( p, lat, lon ),
992  dm[i], t_field ( p, lat, lon ),
993  rho[i], noisy );
994  //dN2[i] = dN[i] * vol[i] * rho[i];
995  }
996 
997  // scale pnds by bin width
998  if (dm.nelem() > 1)
999  scale_pnd( pnd, dm, dN );
1000  else
1001  pnd = dN;
1002 
1003  // calculate error of pnd sum and real XWC
1004  chk_pndsum ( pnd, IWC_field ( p,lat,lon ), vol, rho,
1005  p, lat, lon, partfield_name, verbosity );
1006 
1007  // writing pnd vector to wsv pnd_field
1008  for ( Index i = 0; i< npart; i++ )
1009  {
1010  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
1011  lat-limits[2], lon-limits[4] ) = pnd[i];
1012  }
1013  }
1014  else
1015  for ( Index i = 0; i< npart; i++ )
1016  {
1017  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
1018  lat-limits[2], lon-limits[4] ) = 0.;
1019  }
1020  }
1021  }
1022  }
1023  }
1024 }
1025 
1026 
1045 void pnd_fieldH11 (Tensor4View pnd_field,
1046  const Tensor3& IWC_field,
1047  const Tensor3& t_field,
1048  const ArrayOfIndex& limits,
1049  const ArrayOfScatteringMetaData& scat_meta_array,
1050  const Index& scat_data_start,
1051  const Index& npart,
1052  const String& part_string,
1053  const String& delim,
1054  const Verbosity& verbosity)
1055 {
1056  ArrayOfIndex intarr;
1057  Index pos;
1058  Vector dmax_unsorted ( npart, 0.0 );
1059  Vector vol ( npart, 0.0 );
1060  Vector dm ( npart, 0.0 );
1061  Vector rho ( npart, 0.0 );
1062  Vector pnd ( npart, 0.0 );
1063  Vector dN ( npart, 0.0 );
1064  String partfield_name;
1065 
1066  //split String and copy to ArrayOfString
1067  parse_partfield_name( partfield_name, part_string, delim);
1068 
1069  for ( Index i=0; i < npart; i++ )
1070  dmax_unsorted[i] = ( scat_meta_array[i+scat_data_start].diameter_max );
1071  get_sorted_indexes(intarr, dmax_unsorted);
1072 
1073  // extract scattering meta data
1074  for ( Index i=0; i< npart; i++ )
1075  {
1076  pos = intarr[i]+scat_data_start;
1077 
1078  vol[i]= scat_meta_array[pos].volume; //[m^3]
1079  // get maximum diameter from meta data [m]
1080  dm[i] = scat_meta_array[pos].diameter_max;
1081  // get density from meta data [kg/m^3]
1082  rho[i] = scat_meta_array[pos].density;
1083  // get aspect ratio from meta data [ ]
1084  }
1085 
1086  if (dm.nelem() > 0)
1087  // dm.nelem()=0 implies no selected particles for the respective particle
1088  // field. should not occur anymore.
1089  {
1090  // itertation over all atm. levels
1091  for ( Index p=limits[0]; p<limits[1]; p++ )
1092  {
1093  for ( Index lat=limits[2]; lat<limits[3]; lat++ )
1094  {
1095  for ( Index lon=limits[4]; lon<limits[5]; lon++ )
1096  {
1097  // we only need to go through PSD calc if there is any material
1098  if (IWC_field ( p, lat, lon ) > 0.)
1099  {
1100  // iteration over all given size bins
1101  for ( Index i=0; i<dm.nelem(); i++ ) //loop over number of particles
1102  {
1103  // calculate particle size distribution for H11
1104  // [# m^-3 m^-1]
1105  dN[i] = IWCtopnd_H11 ( dm[i], t_field ( p, lat, lon ) );
1106  }
1107  // scale pnds by scale width
1108  if (dm.nelem() > 1)
1109  scale_pnd( pnd, dm, dN ); //[# m^-3]
1110  else
1111  pnd = dN;
1112 
1113  // scale H11 distribution (which is independent of Ice or
1114  // Snow massdensity) to current massdensity.
1115  /* JM120411: we don't need this - it's doing exactly, what
1116  chk_pndsum is doing. instead we redefine verbosity
1117  for checksum here to suppress the 'scaling is off'
1118  warnings and let chk_pndsum do the proper scaling.
1119  scale_H11 ( pnd, IWC_field ( p,lat,lon ), vol, rho ); */
1120 
1121  // calculate proper scaling of pnd sum from real IWC and apply
1122  chk_pndsum ( pnd, IWC_field ( p,lat,lon ), vol, rho,
1123  p, lat, lon, partfield_name, verbosity );
1124 
1125  // writing pnd vector to wsv pnd_field
1126  for ( Index i =0; i< npart; i++ )
1127  {
1128  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
1129  lat-limits[2], lon-limits[4] ) = pnd[i];
1130  }
1131  }
1132  else
1133  {
1134  for ( Index i = 0; i< npart; i++ )
1135  {
1136  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
1137  lat-limits[2], lon-limits[4] ) = 0.;
1138  }
1139  }
1140  }
1141  }
1142  }
1143  }
1144 }
1145 
1146 
1165 void pnd_fieldH13 (Tensor4View pnd_field,
1166  const Tensor3& IWC_field,
1167  const Tensor3& t_field,
1168  const ArrayOfIndex& limits,
1169  const ArrayOfScatteringMetaData& scat_meta_array,
1170  const Index& scat_data_start,
1171  const Index& npart,
1172  const String& part_string,
1173  const String& delim,
1174  const Verbosity& verbosity)
1175 {
1176  ArrayOfIndex intarr;
1177  Index pos;
1178  Vector dmax_unsorted ( npart, 0.0 );
1179  Vector vol ( npart, 0.0 );
1180  Vector dm ( npart, 0.0 );
1181  Vector rho ( npart, 0.0 );
1182  Vector pnd ( npart, 0.0 );
1183  Vector dN ( npart, 0.0 );
1184 // Vector ar ( npart, 0.0 );
1185  String partfield_name;
1186 
1187  //split String and copy to ArrayOfString
1188  parse_partfield_name( partfield_name, part_string, delim);
1189 
1190  for ( Index i=0; i < npart; i++ )
1191  dmax_unsorted[i] = ( scat_meta_array[i+scat_data_start].diameter_max );
1192  get_sorted_indexes(intarr, dmax_unsorted);
1193 
1194  // extract scattering meta data
1195  for ( Index i=0; i< npart; i++ )
1196  {
1197  pos = intarr[i]+scat_data_start;
1198 
1199  vol[i]= scat_meta_array[pos].volume; //[m^3]
1200  // get maximum diameter from meta data [m]
1201  dm[i] = scat_meta_array[pos].diameter_max;
1202  // get density from meta data [kg/m^3]
1203  rho[i] = scat_meta_array[pos].density;
1204  // get aspect ratio from meta data [ ]
1205 // ar[i] = scat_meta_array[pos].aspect_ratio;
1206  }
1207 
1208 /*
1209  // Collect all unique aspect ratios and check if the are more than one
1210  vector<Numeric> ar_in;
1211  for (Iterator1D it = ar.begin(); it != ar.end(); ++it)
1212  if (find(ar_in.begin(), ar_in.end(), *it) == ar_in.end())
1213  ar_in.push_back(*it);
1214 
1215  if (ar_in.size()>1)
1216  {
1217  ostringstream os;
1218  os << "There are " << ar_in.size() << " unique aspect ratios in *scat_meta_array*.\n"
1219  "This parametrization is only valid for one single\n"
1220  "aspect ratio\n";
1221  throw runtime_error(os.str());
1222  }
1223 */
1224 
1225  if (dm.nelem() > 0)
1226  // dm.nelem()=0 implies no selected particles for the respective particle
1227  // field. should not occur anymore.
1228  {
1229  // itertation over all atm. levels
1230  for ( Index p=limits[0]; p<limits[1]; p++ )
1231  {
1232  for ( Index lat=limits[2]; lat<limits[3]; lat++ )
1233  {
1234  for ( Index lon=limits[4]; lon<limits[5]; lon++ )
1235  {
1236  // we only need to go through PSD calc if there is any material
1237  if (IWC_field ( p, lat, lon ) > 0.)
1238  {
1239  // iteration over all given size bins
1240  for ( Index i=0; i<dm.nelem(); i++ ) //loop over number of particles
1241  {
1242  // calculate particle size distribution for H13
1243  // [# m^-3 m^-1]
1244  dN[i] = IWCtopnd_H13 ( dm[i], t_field ( p, lat, lon ) );
1245  }
1246  // scale pnds by scale width
1247  if (dm.nelem() > 1)
1248  scale_pnd( pnd, dm, dN ); //[# m^-3]
1249  else
1250  pnd = dN;
1251 
1252  // scale H13 distribution (which is independent of Ice or
1253  // Snow massdensity) to current massdensity.
1254  /* JM120411: we don't need this - it's doing exactly, what
1255  chk_pndsum is doing. instead we redefine verbosity
1256  for checksum here to suppress the 'scaling is off'
1257  warnings and let chk_pndsum do the proper scaling.
1258  scale_H13 ( pnd, IWC_field ( p,lat,lon ), vol, rho );*/
1259 
1260  // calculate proper scaling of pnd sum from real IWC and apply
1261  chk_pndsum ( pnd, IWC_field ( p,lat,lon ), vol, rho,
1262  p, lat, lon, partfield_name, verbosity );
1263 
1264  // writing pnd vector to wsv pnd_field
1265  for ( Index i =0; i< npart; i++ )
1266  {
1267  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
1268  lat-limits[2], lon-limits[4] ) = pnd[i];
1269  }
1270  }
1271  else
1272  {
1273  for ( Index i = 0; i< npart; i++ )
1274  {
1275  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
1276  lat-limits[2], lon-limits[4] ) = 0.;
1277  }
1278  }
1279  }
1280  }
1281  }
1282  }
1283 }
1284 
1304  const Tensor3& IWC_field,
1305  const Tensor3& t_field,
1306  const ArrayOfIndex& limits,
1307  const ArrayOfScatteringMetaData& scat_meta_array,
1308  const Index& scat_data_start,
1309  const Index& npart,
1310  const String& part_string,
1311  const String& delim,
1312  const Verbosity& verbosity)
1313 {
1314  CREATE_OUT1;
1315 
1316  ArrayOfIndex intarr;
1317  Index pos;
1318  Vector dmax_unsorted ( npart, 0.0 );
1319  Vector vol ( npart, 0.0 );
1320  Vector dm ( npart, 0.0 );
1321  Vector rho ( npart, 0.0 );
1322  Vector pnd ( npart, 0.0 );
1323  Vector ar ( npart, 0.0 ); // Aspect ratio set for the T-matrix calculations
1324  String partfield_name;
1325 
1326  //split String and copy to ArrayOfString
1327  parse_partfield_name( partfield_name, part_string, delim);
1328 
1329  for ( Index i=0; i < npart; i++ )
1330  dmax_unsorted[i] = ( scat_meta_array[i+scat_data_start].diameter_max );
1331  get_sorted_indexes(intarr, dmax_unsorted);
1332 
1333  // extract scattering meta data
1334  for ( Index i=0; i< npart; i++ )
1335  {
1336  pos = intarr[i]+scat_data_start;
1337 
1338  vol[i]= scat_meta_array[pos].volume; //[m^3]
1339  // get maximum diameter from meta data [m]
1340  dm[i] = scat_meta_array[pos].diameter_max;
1341  // get density from meta data [kg/m^3]
1342  rho[i] = scat_meta_array[pos].density;
1343  // get aspect ratio from meta data [ ]
1344  ar[i] = scat_meta_array[pos].aspect_ratio;
1345  }
1346  // Collect all unique maximum diameters
1347  vector<Numeric> dm_in;
1348  for (Iterator1D it = dm.begin(); it != dm.end(); ++it)
1349  if (find(dm_in.begin(), dm_in.end(), *it) == dm_in.end())
1350  dm_in.push_back(*it);
1351 
1352  // Collect all unique aspect ratios
1353  vector<Numeric> ar_in;
1354  for (Iterator1D it = ar.begin(); it != ar.end(); ++it)
1355  if (find(ar_in.begin(), ar_in.end(), *it) == ar_in.end())
1356  ar_in.push_back(*it);
1357 
1358  Vector dm_input;
1359  Vector ar_input;
1360  dm_input=dm_in;
1361  ar_input=ar_in;
1362 
1363  // Check size and shape limits
1364  if (dm[0]<7.7*1e-5)
1365  {
1366  ostringstream os;
1367  os << "The *H13Shape* parametrization is only valid for particles with\n"
1368  "a maximum diameter >= to 77 um, the smallest value of *diameter_max* in\n"
1369  "this simulation is " << dm[0] << " um and thus to large. Set a new *diameter_max_grid*!\n";
1370  throw runtime_error(os.str());
1371  }
1372 
1373  if (ar_input.nelem()==1)
1374  {
1375  out1 << "WARNING! Only one unique aspect ratio is used. The parametrization\n"
1376  << "*H13* will generate the same result but with less computations\n"
1377  << "and thus on a sorter time\n";
1378  }
1379 
1380  if (ar_input[ar_input.nelem()-1] >= 1)
1381  {
1382  ostringstream os;
1383  os << "H13Shape is only valid for prolate speheroids\n"
1384  "and cylinders at the moment, i.e for aspect ratios smaller\n"
1385  "than one. The maximum aspect ratio chosen is " << ar_input[ar_input.nelem()-1] << ".\n"
1386  "Set a new *aspect_ratio_grid";
1387  throw runtime_error( os.str() );
1388  }
1389 
1390  Vector dN ( dm_input.nelem(), 0.0 );
1391  Vector Ar ( dm_input.nelem(), 0.0 );
1392  Vector arH13 ( dm_input.nelem(), 0.0 );
1393  Vector pnd_temp ( dm_input.nelem(), 0.0 );
1394 
1395 
1396  //const bool suppress=true;
1397  //const Verbosity temp_verb(0,0,0);
1398 
1399  if (dm_input.nelem() > 0)
1400  // dm.nelem()=0 implies no selected particles for the respective particle
1401  // field. should not occur anymore.
1402  {
1403  // itertation over all atm. levels
1404  for ( Index p=limits[0]; p<limits[1]; p++ )
1405  {
1406  for ( Index lat=limits[2]; lat<limits[3]; lat++ )
1407  {
1408  for ( Index lon=limits[4]; lon<limits[5]; lon++ )
1409  {
1410  // we only need to go through PSD calc if there is any material
1411  if (IWC_field ( p, lat, lon ) > 0.)
1412  {
1413  // iteration over all given size bins
1414  for ( Index i=0; i<dm_input.nelem(); i++ ) //loop over number of particles
1415  {
1416  // calculate particle size distribution for H13Shape
1417  // [# m^-3 m^-1]
1418  dN[i] = IWCtopnd_H13Shape ( dm_input[i], t_field ( p, lat, lon ) );
1419 
1420  // calculate Area ratio distribution for H13Shape
1421  Ar[i] = area_ratioH13 (dm_input[i], t_field (p, lat, lon ) );
1422 
1423  // Aspect ratio equals area ratio (for prolate particles)
1424  arH13[i] = Ar[i];
1425  }
1426 
1427  // scale pnds by scale width
1428  if (dm_input.nelem() > 1)
1429  scale_pnd( pnd_temp, dm_input, dN ); //[# m^-3]
1430  else
1431  pnd_temp = dN;
1432 
1433  // Check which element in arthat is closest to arH13 and assign
1434  // the PND for that size to that particle and zeros to the rest
1435  Index l;
1436  l=ar_input.nelem();
1437 
1438  Vector diff;
1439 
1440  for ( Index k=0, j=0; j<pnd_temp.nelem(); k+=l,j++ )
1441  {
1442  diff = ar[Range(k,l)];
1443 
1444  diff -= arH13[j];
1445 
1446  Numeric minval = std::numeric_limits<Numeric>::infinity();
1447  Index minpos = -1;
1448 
1449  for (Index i = 0; i < diff.nelem(); i++)
1450  {
1451  if (abs(diff[i]) < minval)
1452  {
1453  minval = abs(diff[i]);
1454  minpos = i;
1455  }
1456  }
1457  pnd[Range(k,l)]=0;
1458  pnd[minpos+k]=pnd_temp[j];
1459 
1460 
1461  }
1462 
1463  // scale H13 distribution (which is independent of Ice or
1464  // Snow massdensity) to current massdensity.
1465  /* JM120411: we don't need this - it's doing exactly, what
1466  chk_pndsum is doing. instead we redefine verbosity
1467  for checksum here to suppress the 'scaling is off'
1468  warnings and let chk_pndsum do the proper scaling.
1469  scale_H13 ( pnd, IWC_field ( p,lat,lon ), vol, rho );*/
1470 
1471  // calculate proper scaling of pnd sum from real IWC and apply
1472  chk_pndsum ( pnd, IWC_field ( p,lat,lon ), vol, rho,
1473  p, lat, lon, partfield_name, verbosity );
1474 // p, lat, lon, partfield_name, temp_verb );
1475 
1476  // writing pnd vector to wsv pnd_field
1477  for ( Index i =0; i< npart; i++ )
1478  {
1479  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
1480  lat-limits[2], lon-limits[4] ) = pnd[i];
1481  }
1482  }
1483  else
1484  {
1485  for ( Index i = 0; i< npart; i++ )
1486  {
1487  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
1488  lat-limits[2], lon-limits[4] ) = 0.;
1489  }
1490  }
1491  }
1492  }
1493  }
1494  }
1495 }
1496 
1517 void pnd_fieldF07TR (Tensor4View pnd_field,
1518  const Tensor3& SWC_field,
1519  const Tensor3& t_field,
1520  const ArrayOfIndex& limits,
1521  const ArrayOfScatteringMetaData& scat_meta_array,
1522  const Index& scat_data_start,
1523  const Index& npart,
1524  const String& part_string,
1525  const String& delim,
1526  const Verbosity& verbosity)
1527 {
1528  ArrayOfIndex intarr;
1529  Index pos;
1530  Vector dmax_unsorted ( npart, 0.0 );
1531  Vector vol ( npart, 0.0 );
1532  Vector dmax ( npart, 0.0 );
1533  Vector rho ( npart, 0.0 );
1534  Vector pnd ( npart, 0.0 );
1535  Vector dN ( npart, 0.0 );
1536  String partfield_name;
1537  Numeric alpha;
1538  Numeric beta;
1539  Vector log_m( npart, 0.0 );
1540  Vector log_D( npart, 0.0 );
1541  Vector q;
1542 
1543  //unit conversion
1544  const Numeric D0=1; //[m]
1545 // Numeric dummy1;
1546 // Numeric dummy2;
1547 
1548  //split String and copy to ArrayOfString
1549  parse_partfield_name( partfield_name, part_string, delim);
1550 
1551  for ( Index i=0; i < npart; i++ )
1552  dmax_unsorted[i] = ( scat_meta_array[i+scat_data_start].diameter_max );
1553  get_sorted_indexes(intarr, dmax_unsorted);
1554 
1555  // extract scattering meta data
1556  for ( Index i=0; i< npart; i++ )
1557  {
1558  pos = intarr[i]+scat_data_start;
1559 
1560  vol[i]= scat_meta_array[pos].volume; //[m^3]
1561  // get maximum diameter from meta data [m]
1562  dmax[i] = scat_meta_array[pos].diameter_max;
1563 
1564 // dummy1=dmax[i];
1565 
1566  // get density from meta data [kg/m^3]
1567  rho[i] = scat_meta_array[pos].density;
1568  // get aspect ratio from meta data [ ]
1569 
1570  // logarithm of Dmax, needed for estimating mass-dimension-relationship
1571  log_D[i]=log(dmax[i]/D0);
1572 
1573 // dummy2=log_D[i];
1574 
1575  // logarithm of the mass, even though it is little weird to have
1576  // a logarithm of something with a unit...
1577 
1578 // dummy1=vol[i]*rho[i];
1579 // dummy2=log(vol[i]*rho[i]);
1580 
1581  log_m[i]=log(vol[i]*rho[i]);
1582 
1583  }
1584 
1585  //estimate mass-dimension relationship from meta data by liear regression
1586  // Assumption of a power law for the mass dimension rlationship
1587  // Ansatz: log(m) = log(alpha)+beta*log(dmax/D0)
1588  linreg(q,log_D, log_m);
1589 
1590  alpha=exp(q[0]);
1591  beta=q[1];
1592 
1593  CREATE_OUT2;
1594  out2<<"Mass-dimension relationship m=alpha*(dmax/D0)^beta:\n"
1595  <<"alpha = "<<alpha<<" kg \n"
1596  <<"beta = "<<beta<<"\n";
1597 
1598 
1599 
1600  if (dmax.nelem() > 0)
1601  // dm.nelem()=0 implies no selected particles for the respective particle
1602  // field. should not occur anymore.
1603  {
1604  // itertation over all atm. levels
1605  for ( Index p=limits[0]; p<limits[1]; p++ )
1606  {
1607  for ( Index lat=limits[2]; lat<limits[3]; lat++ )
1608  {
1609  for ( Index lon=limits[4]; lon<limits[5]; lon++ )
1610  {
1611  // we only need to go through PSD calc if there is any material
1612  if (SWC_field ( p, lat, lon ) > 0.)
1613  {
1614  // iteration over all given size bins
1615  for ( Index i=0; i<dmax.nelem(); i++ ) //loop over number of particles
1616  {
1617  // calculate particle size distribution for F07
1618  // [# m^-3 m^-1]
1619  dN[i] = IWCtopnd_F07TR( dmax[i], t_field ( p, lat, lon ),
1620  SWC_field ( p, lat, lon ), alpha, beta);
1621  }
1622  // scale pnds by scale width
1623  if (dmax.nelem() > 1)
1624  scale_pnd( pnd, dmax, dN ); //[# m^-3]
1625  else
1626  pnd = dN;
1627 
1628 
1629  // calculate proper scaling of pnd sum from real IWC and apply
1630  chk_pndsum ( pnd, SWC_field ( p,lat,lon ), vol, rho,
1631  p, lat, lon, partfield_name, verbosity );
1632 
1633  // writing pnd vector to wsv pnd_field
1634  for ( Index i =0; i< npart; i++ )
1635  {
1636  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
1637  lat-limits[2], lon-limits[4] ) = pnd[i];
1638  }
1639  }
1640  else
1641  {
1642  for ( Index i = 0; i< npart; i++ )
1643  {
1644  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
1645  lat-limits[2], lon-limits[4] ) = 0.;
1646  }
1647  }
1648  }
1649  }
1650  }
1651  }
1652 }
1653 
1675  const Tensor3& SWC_field,
1676  const Tensor3& t_field,
1677  const ArrayOfIndex& limits,
1678  const ArrayOfScatteringMetaData& scat_meta_array,
1679  const Index& scat_data_start,
1680  const Index& npart,
1681  const String& part_string,
1682  const String& delim,
1683  const Verbosity& verbosity)
1684 {
1685  ArrayOfIndex intarr;
1686  Index pos;
1687  Vector dmax_unsorted ( npart, 0.0 );
1688  Vector vol ( npart, 0.0 );
1689  Vector dmax ( npart, 0.0 );
1690  Vector rho ( npart, 0.0 );
1691  Vector pnd ( npart, 0.0 );
1692  Vector dN ( npart, 0.0 );
1693  String partfield_name;
1694  Numeric alpha;
1695  Numeric beta;
1696  Vector log_m( npart, 0.0 );
1697  Vector log_D( npart, 0.0 );
1698  Vector q;
1699 
1700  //unit conversion
1701  const Numeric D0=1; //[m]
1702 
1703 
1704 
1705  //split String and copy to ArrayOfString
1706  parse_partfield_name( partfield_name, part_string, delim);
1707 
1708  for ( Index i=0; i < npart; i++ )
1709  dmax_unsorted[i] = ( scat_meta_array[i+scat_data_start].diameter_max );
1710  get_sorted_indexes(intarr, dmax_unsorted);
1711 
1712  // extract scattering meta data
1713  for ( Index i=0; i< npart; i++ )
1714  {
1715  pos = intarr[i]+scat_data_start;
1716 
1717  vol[i]= scat_meta_array[pos].volume; //[m^3]
1718  // get maximum diameter from meta data [m]
1719  dmax[i] = scat_meta_array[pos].diameter_max;
1720  // get density from meta data [kg/m^3]
1721  rho[i] = scat_meta_array[pos].density;
1722  // get aspect ratio from meta data [ ]
1723 
1724  // logarithm of Dmax, needed for estimating mass-dimension-relationship
1725  log_D[i]=log(dmax[i]/D0);
1726 
1727  // logarithm of the mass, even though it is little weird to have
1728  // a logarithm of something with a unit...
1729  log_m[i]=log(vol[i]*rho[i]);
1730  }
1731 
1732  //estimate mass-dimension relationship from meta data by liear regression
1733  // Assumption of a power law for the mass dimension rlationship
1734  // Ansatz: log(m) = log(alpha)+beta*log(dmax/D0)
1735  linreg(q,log_D, log_m);
1736 
1737  alpha=q[0];
1738  beta=q[1];
1739 
1740  CREATE_OUT2;
1741  out2<<"Mass-dimension relationship m=alpha*(dmax/D0)^beta:\n"
1742  <<"alpha = "<<alpha<<" kg \n"
1743  <<"beta = "<<beta<<"\n";
1744 
1745 
1746 
1747  if (dmax.nelem() > 0)
1748  // dm.nelem()=0 implies no selected particles for the respective particle
1749  // field. should not occur anymore.
1750  {
1751  // itertation over all atm. levels
1752  for ( Index p=limits[0]; p<limits[1]; p++ )
1753  {
1754  for ( Index lat=limits[2]; lat<limits[3]; lat++ )
1755  {
1756  for ( Index lon=limits[4]; lon<limits[5]; lon++ )
1757  {
1758  // we only need to go through PSD calc if there is any material
1759  if (SWC_field ( p, lat, lon ) > 0.)
1760  {
1761  // iteration over all given size bins
1762  for ( Index i=0; i<dmax.nelem(); i++ ) //loop over number of particles
1763  {
1764  // calculate particle size distribution for H11
1765  // [# m^-3 m^-1]
1766  dN[i] = IWCtopnd_F07ML( dmax[i], t_field ( p, lat, lon ),
1767  SWC_field ( p, lat, lon ), alpha, beta);
1768  }
1769  // scale pnds by scale width
1770  if (dmax.nelem() > 1)
1771  scale_pnd( pnd, dmax, dN ); //[# m^-3]
1772  else
1773  pnd = dN;
1774 
1775 
1776  // calculate proper scaling of pnd sum from real IWC and apply
1777  chk_pndsum ( pnd, SWC_field ( p,lat,lon ), vol, rho,
1778  p, lat, lon, partfield_name, verbosity );
1779 
1780  // writing pnd vector to wsv pnd_field
1781  for ( Index i =0; i< npart; i++ )
1782  {
1783  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
1784  lat-limits[2], lon-limits[4] ) = pnd[i];
1785  }
1786  }
1787  else
1788  {
1789  for ( Index i = 0; i< npart; i++ )
1790  {
1791  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
1792  lat-limits[2], lon-limits[4] ) = 0.;
1793  }
1794  }
1795  }
1796  }
1797  }
1798  }
1799 }
1800 
1801 
1822  const Tensor3& LWC_field,
1823  const ArrayOfIndex& limits,
1824  const ArrayOfScatteringMetaData& scat_meta_array,
1825  const Index& scat_data_start,
1826  const Index& npart,
1827  const String& part_string,
1828  const String& delim,
1829  const Verbosity& verbosity)
1830 {
1831  ArrayOfIndex intarr;
1832  Index pos;
1833  Vector dmax_unsorted ( npart, 0.0 );
1834  Vector vol ( npart, 0.0 );
1835  Vector deq ( npart, 0.0 );
1836  Vector rho ( npart, 0.0 );
1837  Vector pnd ( npart, 0.0 );
1838  Vector dN ( npart, 0.0 );
1839  Numeric mean_rho;
1840  Numeric min_rho;
1841  Numeric max_rho;
1842  Numeric delta_rho;
1843  String partfield_name;
1844 
1845  CREATE_OUT1;
1846 
1847  //split String and copy to ArrayOfString
1848  parse_partfield_name( partfield_name, part_string, delim);
1849 
1850  for ( Index i=0; i < npart; i++ )
1851  dmax_unsorted[i] = ( scat_meta_array[i+scat_data_start].diameter_max );
1852  get_sorted_indexes(intarr, dmax_unsorted);
1853 
1854  // extract scattering meta data
1855  for ( Index i=0; i< npart; i++ )
1856  {
1857  pos = intarr[i]+scat_data_start;
1858 
1859  vol[i]= scat_meta_array[pos].volume; //[m^3]
1860  // get sphere equivalent diameter [m]
1861  deq[i] = pow( (6*vol[i]/PI),(1./3.));
1862 
1863  out1<<"deq["<<i<<"] = "<<deq[i] <<"\n";
1864 
1865 
1866  // dummy1=dmax[i];
1867 
1868  // get density from meta data [kg/m^3]
1869  rho[i] = scat_meta_array[pos].density;
1870 
1871  }
1872 
1873  // mean density
1874  mean_rho=rho.sum()/(Numeric)rho.nelem();
1875 
1876 
1877  // checking if the particles have the same density
1878  min_rho=min(rho);
1879  max_rho=max(rho);
1880  delta_rho=(max_rho-min(rho))/max_rho;
1881 
1882  if (delta_rho >= 0.1)
1883  {
1884  ostringstream os;
1885  os << "MGD_LWC is valid only for particles with the same\n"
1886  "at least almost the same density. The difference between\n"
1887  " maximum and minimum density must be lower than 10 percent.\n"
1888  "Your difference is " << delta_rho << ".\n"
1889  "Check your scattering particles";
1890  throw runtime_error( os.str() );
1891  }
1892 
1893 
1894  if (deq.nelem() > 0)
1895  // dm.nelem()=0 implies no selected particles for the respective particle
1896  // field. should not occur anymore.
1897  {
1898  // itertation over all atm. levels
1899  for ( Index p=limits[0]; p<limits[1]; p++ )
1900  {
1901 
1902  out1<<"p="<<p <<"\n";
1903 
1905  for ( Index lat=limits[2]; lat<limits[3]; lat++ )
1906  {
1907  for ( Index lon=limits[4]; lon<limits[5]; lon++ )
1908  {
1909  // we only need to go through PSD calc if there is any material
1910  if (LWC_field ( p, lat, lon ) > 0.)
1911  {
1912  // iteration over all given size bins
1913  for ( Index i=0; i<deq.nelem(); i++ ) //loop over number of particles
1914  {
1915  // calculate particle size distribution
1916  dN[i] = LWCtopnd_MGD_LWC( deq[i], mean_rho ,
1917  LWC_field ( p, lat, lon ));
1918  }
1919  // scale pnds by scale width
1920  if (deq.nelem() > 1)
1921  scale_pnd( pnd, deq, dN ); //[# m^-3]
1922  else
1923  pnd = dN;
1924 
1925 
1926  // calculate proper scaling of pnd sum from real IWC and apply
1927  chk_pndsum ( pnd, LWC_field ( p,lat,lon ), vol, rho,
1928  p, lat, lon, partfield_name, verbosity );
1929 
1930  // writing pnd vector to wsv pnd_field
1931  for ( Index i =0; i< npart; i++ )
1932  {
1933  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
1934  lat-limits[2], lon-limits[4] ) = pnd[i];
1935  }
1936  }
1937  else
1938  {
1939  for ( Index i = 0; i< npart; i++ )
1940  {
1941  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
1942  lat-limits[2], lon-limits[4] ) = 0.;
1943  }
1944  }
1945  }
1946  }
1947  }
1948  }
1949 }
1950 
1971  const Tensor3& IWC_field,
1972  const ArrayOfIndex& limits,
1973  const ArrayOfScatteringMetaData& scat_meta_array,
1974  const Index& scat_data_start,
1975  const Index& npart,
1976  const String& part_string,
1977  const String& delim,
1978  const Verbosity& verbosity)
1979 {
1980  ArrayOfIndex intarr;
1981  Index pos;
1982  Vector dmax_unsorted ( npart, 0.0 );
1983  Vector vol ( npart, 0.0 );
1984  Vector deq ( npart, 0.0 );
1985  Vector rho ( npart, 0.0 );
1986  Vector pnd ( npart, 0.0 );
1987  Vector dN ( npart, 0.0 );
1988  Numeric mean_rho;
1989  Numeric min_rho;
1990  Numeric max_rho;
1991  Numeric delta_rho;
1992  String partfield_name;
1993 
1994 
1995  //split String and copy to ArrayOfString
1996  parse_partfield_name( partfield_name, part_string, delim);
1997 
1998  for ( Index i=0; i < npart; i++ )
1999  dmax_unsorted[i] = ( scat_meta_array[i+scat_data_start].diameter_max );
2000  get_sorted_indexes(intarr, dmax_unsorted);
2001 
2002  // extract scattering meta data
2003  for ( Index i=0; i< npart; i++ )
2004  {
2005  pos = intarr[i]+scat_data_start;
2006 
2007  vol[i]= scat_meta_array[pos].volume; //[m^3]
2008  // get sphere equivalent diameter [m]
2009  deq[i] = pow( (6*vol[i]/PI),(1./3.));
2010 
2011 
2012  // get density from meta data [kg/m^3]
2013  rho[i] = scat_meta_array[pos].density;
2014 
2015  }
2016 
2017  // mean density
2018  mean_rho=rho.sum()/(Numeric)rho.nelem();
2019 
2020  // checking if the particles have the same density
2021  min_rho=min(rho);
2022  max_rho=max(rho);
2023  delta_rho=(max_rho-min(rho))/max_rho;
2024 
2025  if (delta_rho >= 0.1)
2026  {
2027  ostringstream os;
2028  os << "MGD_IWC is valid only for particles with the same\n"
2029  "at least almost the same density. The difference between\n"
2030  " maximum and minimum density must be lower than 10 percent.\n"
2031  "Your difference is " << delta_rho << ".\n"
2032  "Check your scattering particles";
2033  throw runtime_error( os.str() );
2034  }
2035 
2036 
2037 
2038  if (deq.nelem() > 0)
2039  // dm.nelem()=0 implies no selected particles for the respective particle
2040  // field. should not occur anymore.
2041  {
2042  // itertation over all atm. levels
2043  for ( Index p=limits[0]; p<limits[1]; p++ )
2044  {
2045  for ( Index lat=limits[2]; lat<limits[3]; lat++ )
2046  {
2047  for ( Index lon=limits[4]; lon<limits[5]; lon++ )
2048  {
2049  // we only need to go through PSD calc if there is any material
2050  if (IWC_field ( p, lat, lon ) > 0.)
2051  {
2052  // iteration over all given size bins
2053  for ( Index i=0; i<deq.nelem(); i++ ) //loop over number of particles
2054  {
2055  // calculate particle size distribution for MGD_IWC
2056  dN[i] = IWCtopnd_MGD_IWC( deq[i], mean_rho,
2057  IWC_field ( p, lat, lon ));
2058  }
2059  // scale pnds by scale width
2060  if (deq.nelem() > 1)
2061  scale_pnd( pnd, deq, dN ); //[# m^-3]
2062  else
2063  pnd = dN;
2064 
2065 
2066  // calculate proper scaling of pnd sum from real IWC and apply
2067  chk_pndsum ( pnd, IWC_field ( p,lat,lon ), vol, rho,
2068  p, lat, lon, partfield_name, verbosity );
2069 
2070  // writing pnd vector to wsv pnd_field
2071  for ( Index i =0; i< npart; i++ )
2072  {
2073  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
2074  lat-limits[2], lon-limits[4] ) = pnd[i];
2075  }
2076  }
2077  else
2078  {
2079  for ( Index i = 0; i< npart; i++ )
2080  {
2081  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
2082  lat-limits[2], lon-limits[4] ) = 0.;
2083  }
2084  }
2085  }
2086  }
2087  }
2088  }
2089 }
2090 
2091 
2110 void pnd_fieldGM58 (Tensor4View pnd_field,
2111  const Tensor3& PR_field,
2112  const ArrayOfIndex& limits,
2113  const ArrayOfScatteringMetaData& scat_meta_array,
2114  const Index& scat_data_start,
2115  const Index& npart,
2116  const String& part_string,
2117  const String& delim,
2118  const Verbosity& verbosity)
2119 {
2120  ArrayOfIndex intarr;
2121  Index pos;
2122  Vector vol_unsorted ( npart, 0.0 );
2123  Vector vol ( npart, 0.0 );
2124  Vector vol_me ( npart, 0.0 );
2125  Vector dve ( npart, 0.0 );
2126  Vector dme ( npart, 0.0 );
2127  Vector rho_snow ( npart, 0.0 );
2128  Vector pnd ( npart, 0.0 );
2129  Vector dN ( npart, 0.0 );
2130 
2131  String partfield_name;
2132 
2133  // Density of water, needed for melted Diameter
2134  const Numeric rho_water=1000.;// [kg/m^3]
2135 
2136  //split String and copy to ArrayOfString
2137  parse_partfield_name( partfield_name, part_string, delim);
2138 
2139  for ( Index i=0; i < npart; i++ )
2140  vol_unsorted[i] = ( scat_meta_array[i+scat_data_start].volume );
2141  get_sorted_indexes(intarr, vol_unsorted);
2142 
2143 
2144 
2145  // extract scattering meta data
2146  for ( Index i=0; i< npart; i++ )
2147  {
2148  pos = intarr[i]+scat_data_start;
2149 
2150  vol[i] = ( scat_meta_array[pos].volume ); //m^3
2151 
2152  // calculate volume equivalent diameter [m]
2153  dve[i] = pow(
2154  ( scat_meta_array[pos].volume *6./PI ),
2155  ( 1./3. ) );
2156  // get density from meta data [kg/m^3]
2157  rho_snow[i] = scat_meta_array[pos].density;
2158 
2159  // Calculate the melted Diameter
2160  dme[i] =pow( ( rho_snow[i]/rho_water ),( 1./3. ) )*dve[i];
2161 
2162  // calculate the mass equivalent(melted) Volume
2163  vol_me=( rho_snow[i]/rho_water )*vol[i];
2164 
2165 
2166  }
2167 
2168 
2169  Numeric fac, tPR, N0;
2170  Numeric PWC, lambda = NAN;
2171 
2172 
2173  // conversion factor from PR [kg/(m^2*s)] to PR[mm/hr]
2174  const Numeric convfac=3.6e6; // [PR [kg/(m^2*s)] to PR[mm/hr]*[kg/m3]
2175 
2176 
2177 
2178  // set parameterisation constants
2179  const Numeric lambda_fac = 25.5*1e2; // [cm^-1] converted to [m^-1] to fit d[m]
2180  const Numeric lambda_exp = -0.48;
2181  const Numeric N0_exp= -0.87;
2182  const Numeric N0_fac=0.038*1e8;
2183 
2184 
2185 
2186  if (dve.nelem() > 0)
2187  // dve.nelem()=0 implies no selected particles for the respective particle
2188  // field. should not occur.
2189  {
2190  // iteration over all atm. levels
2191  for ( Index p=limits[0]; p<limits[1]; p++ )
2192  {
2193  for ( Index lat=limits[2]; lat<limits[3]; lat++ )
2194  {
2195  for ( Index lon=limits[4]; lon<limits[5]; lon++ )
2196  {
2197  // we only need to go through PSD calc if there is any precipitation
2198  if (PR_field ( p, lat, lon ) > 0.)
2199  {
2200 
2201  fac = convfac/rho_water;
2202 
2203  // do PR [kg/(m^2*s)] to PR [mm/hr] conversion
2204  tPR = PR_field ( p, lat, lon ) * fac;
2205 
2206 
2207  //calculate Intercept
2208  N0 = pow(tPR,N0_exp)*N0_fac; // [#/cm^3/cm] converted to [#/m^3/m]
2209 
2210  // get slope of distribution [m^-1]
2211  lambda = lambda_fac * pow(tPR,lambda_exp);
2212 
2213  // derive particle number density for all given sizes
2214  for ( Index i=0; i<dve.nelem(); i++ )
2215  {
2216 
2217  dN[i] = N0 * exp(-lambda*dme[i]);
2218 
2219  }
2220 
2221  // scale pnds by bin width
2222  if (dme.nelem() > 1)
2223  scale_pnd( pnd, dme, dN );
2224  else
2225  pnd = dN;
2226 
2227 
2228 
2229 
2230  // Make sure lambda was initialized in the while loop
2231  assert(!isnan(lambda));
2232 
2233  // calculate error of pnd sum and real XWC
2234  PWC = rho_water*PI*N0 / pow(lambda,4.);
2235  chk_pndsum ( pnd, PWC, vol, rho_snow,
2236  p, lat, lon, partfield_name, verbosity );
2237 
2238 
2239  // writing pnd vector to wsv pnd_field
2240  for ( Index i = 0; i< npart; i++ )
2241  {
2242  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
2243  lat-limits[2], lon-limits[4] ) = pnd[i];
2244  }
2245  }
2246  else
2247  for ( Index i = 0; i< npart; i++ )
2248  {
2249  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
2250  lat-limits[2], lon-limits[4] ) = 0.;
2251  }
2252  }
2253  }
2254  }
2255  }
2256 }
2257 
2258 
2259 
2278 void pnd_fieldSS70 (Tensor4View pnd_field,
2279  const Tensor3& PR_field,
2280  const ArrayOfIndex& limits,
2281  const ArrayOfScatteringMetaData& scat_meta_array,
2282  const Index& scat_data_start,
2283  const Index& npart,
2284  const String& part_string,
2285  const String& delim,
2286  const Verbosity& verbosity)
2287 {
2288  ArrayOfIndex intarr;
2289  Index pos;
2290  Vector vol_unsorted ( npart, 0.0 );
2291  Vector vol ( npart, 0.0 );
2292  //Vector vol_me ( npart, 0.0 );
2293  Vector dve ( npart, 0.0 );
2294  Vector dme ( npart, 0.0 );
2295  Vector rho_snow ( npart, 0.0 );
2296  Vector pnd ( npart, 0.0 );
2297  Vector pnd_old ( npart, 0.0 );
2298  Vector dN ( npart, 0.0 );
2299 
2300  Numeric fac, tPR, N0;//, temp,temp2,temp3;
2301  Numeric PWC, lambda = NAN;
2302 
2303  String partfield_name;
2304 
2305  // Density of water, needed for melted Diameter
2306  const Numeric rho_water=1000.;// [kg/m^3]
2307 
2308  //split String and copy to ArrayOfString
2309  parse_partfield_name( partfield_name, part_string, delim);
2310 
2311  for ( Index i=0; i < npart; i++ )
2312  vol_unsorted[i] = ( scat_meta_array[i+scat_data_start].volume );
2313  get_sorted_indexes(intarr, vol_unsorted);
2314 
2315 
2316 
2317  // extract scattering meta data
2318  for ( Index i=0; i< npart; i++ )
2319  {
2320  pos = intarr[i]+scat_data_start;
2321 
2322  vol[i] = ( scat_meta_array[pos].volume ); //m^3
2323 
2324  // calculate volume equivalent diameter [m]
2325  dve[i] = pow(
2326  ( scat_meta_array[pos].volume *6./PI ),
2327  ( 1./3. ) );
2328 
2329  // get density from meta data [kg/m^3]
2330  rho_snow[i] = scat_meta_array[pos].density;
2331 
2332  // Calculate the melted Diameter
2333  dme[i] =pow( ( rho_snow[i]/rho_water ),( 1./3. ) )*dve[i];
2334 
2335  // calculate the mass equivalent(melted) Volume
2336  //vol_me=( rho_snow[i]/rho_water )*vol[i];
2337 
2338 
2339  }
2340 
2341 
2342 
2343  // conversion factor from PR [kg/(m^2*s)] to PR[mm/hr]
2344  const Numeric convfac=3.6e6; // [PR [kg/(m^2*s)] to PR[mm/hr]*[kg/m3]
2345 
2346 
2347 
2348  // set parameterisation constants
2349  const Numeric lambda_fac = 22.9*1e2; // [cm^-1] converted to [m^-1] to fit d[m]
2350  const Numeric lambda_exp = -0.45;
2351  const Numeric N0_exp= -0.94;
2352  const Numeric N0_fac=0.025*1e8;
2353 
2354 
2355 
2356  if (dve.nelem() > 0)
2357  // dve.nelem()=0 implies no selected particles for the respective particle
2358  // field. should not occur.
2359  {
2360  // iteration over all atm. levels
2361  for ( Index p=limits[0]; p<limits[1]; p++ )
2362  {
2363  for ( Index lat=limits[2]; lat<limits[3]; lat++ )
2364  {
2365  for ( Index lon=limits[4]; lon<limits[5]; lon++ )
2366  {
2367  // we only need to go through PSD calc if there is any precipitation
2368  if (PR_field ( p, lat, lon ) > 0.)
2369  {
2370 
2371  fac = convfac/rho_water;
2372 
2373  // do PR [kg/(m^2*s)] to PR [mm/hr] conversion
2374  tPR = PR_field ( p, lat, lon ) * fac;
2375 
2376 
2377 
2378  //calculate Intercept
2379  N0 = pow(tPR,N0_exp)*N0_fac; // [#/cm^3/cm] converted to [#/m^3/m]
2380 
2381  // get slope of distribution [m^-1]
2382  lambda = lambda_fac * pow(tPR,lambda_exp);
2383 
2384  // derive particle number density for all given sizes
2385  for ( Index i=0; i<dve.nelem(); i++ )
2386  {
2387  dN[i] = N0 * exp(-lambda*dme[i]);
2388  }
2389 
2390 
2391  // scale pnds by bin width
2392  if (dme.nelem() > 1)
2393  scale_pnd( pnd, dme, dN );
2394  else
2395  pnd = dN;
2396 
2397 
2398 
2399  // Make sure lambda was initialized in the while loop
2400  assert(!isnan(lambda));
2401 
2402 // pnd_old=pnd;
2403 
2404  // calculate error of pnd sum and real XWC
2405  PWC = rho_water*PI*N0 / pow(lambda,4.);
2406  chk_pndsum ( pnd, PWC, vol, rho_snow,
2407  p, lat, lon, partfield_name, verbosity );
2408 
2409 
2410  // writing pnd vector to wsv pnd_field
2411  for ( Index i = 0; i< npart; i++ )
2412  {
2413 // temp=pnd[i];
2414 // temp2=dN[i];
2415 // temp3=pnd_old[i];
2416  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
2417  lat-limits[2], lon-limits[4] ) = pnd[i];
2418 
2419  }
2420  }
2421  else
2422  for ( Index i = 0; i< npart; i++ )
2423  {
2424  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
2425  lat-limits[2], lon-limits[4] ) = 0.;
2426  }
2427  }
2428  }
2429  }
2430  }
2431 }
2432 
2449 void pnd_fieldMP48 (Tensor4View pnd_field,
2450  const Tensor3& PR_field,
2451  const ArrayOfIndex& limits,
2452  const ArrayOfScatteringMetaData& scat_meta_array,
2453  const Index& scat_data_start,
2454  const Index& npart,
2455  const String& part_string,
2456  const String& delim,
2457  const Verbosity& verbosity)
2458 {
2459  ArrayOfIndex intarr;
2460  Index pos;
2461  Vector vol_unsorted ( npart, 0.0 );
2462  Vector vol ( npart, 0.0 );
2463  Vector d ( npart, 0.0 );
2464  Vector rho ( npart, 0.0 );
2465  Vector pnd ( npart, 0.0 );
2466  Vector pnd_old ( npart, 0.0 );
2467  Vector dN ( npart, 0.0 );
2468 
2469  String partfield_name;
2470 
2471  //split String and copy to ArrayOfString
2472  parse_partfield_name( partfield_name, part_string, delim);
2473 
2474  for ( Index i=0; i < npart; i++ )
2475  vol_unsorted[i] = ( scat_meta_array[i+scat_data_start].volume );
2476  get_sorted_indexes(intarr, vol_unsorted);
2477 
2478  // extract scattering meta data
2479  for ( Index i=0; i< npart; i++ )
2480  {
2481  pos = intarr[i]+scat_data_start;
2482 
2483  vol[i] = ( scat_meta_array[pos].volume ); //m^3
2484  // calculate volume equivalent diameter [m]
2485  d[i] = pow (
2486  ( scat_meta_array[pos].volume *6./PI ),
2487  ( 1./3. ) );
2488  // get density from meta data [kg/m^3]
2489  rho[i] = scat_meta_array[pos].density;
2490  }
2491 
2492  // conversion factor from PR [kg/(m^2*s)] to PR[mm/hr]
2493  /* for the conversion we need to know the mean density of distribution:
2494  rho_mean = mass_total/vol_total
2495  = int(vol[D]*rho[D]*dN[D])dD/int(vol[D]*dN[D])dD
2496 
2497  however, we do not yet know dN[D], which in turn is dependent on rho[D].
2498  proper solution requires iterative approach (does it?), but is it really
2499  worth to implement this? at least rain drops density should not really
2500  vary with particle size. might be different for snow/hail/graupel.
2501  so, here we set conversion factor to pseudo-PR[mm/hr]*[kg/m^3] and divide by
2502  density later on
2503  */
2504  const Numeric convfac=3.6e6; // [PR [kg/(m^2*s)] to PR[mm/hr]*[kg/m3]
2505  Numeric fac, rho_mean, vol_total, mass_total, tPR;
2506 
2507  // set parameterisation constants here instead of in PRtopnd_MP48, since we
2508  // also need them for PR to PWC conversion
2509  const Numeric N0 = 0.08*1e8; // [#/cm^3/cm] converted to [#/m^3/m]
2510  const Numeric lambda_fac = 41.*1e2; // [cm^-1] converted to [m^-1] to fit d[m]
2511  const Numeric lambda_exp = -0.21;
2512  Numeric PWC, lambda = NAN;
2513 
2514  if (d.nelem() > 0)
2515  // d.nelem()=0 implies no selected particles for the respective particle
2516  // field. should not occur.
2517  {
2518  // iteration over all atm. levels
2519  for ( Index p=limits[0]; p<limits[1]; p++ )
2520  {
2521  for ( Index lat=limits[2]; lat<limits[3]; lat++ )
2522  {
2523  for ( Index lon=limits[4]; lon<limits[5]; lon++ )
2524  {
2525  // we only need to go through PSD calc if there is any precipitation
2526  if (PR_field ( p, lat, lon ) > 0.)
2527  {
2528  Index n_it = 0;
2529 
2530  // initializing for proper start of while loop
2531  lambda = 0.;
2532  rho_mean = 0.;
2533  mass_total = rho.sum()/Numeric(npart);
2534  vol_total = 1.;
2535  while (abs( rho_mean/(mass_total/vol_total)-1.)>1e-2)
2536  // did bulk mean density change?
2537  {
2538  if (n_it>10)
2539  {
2540  ostringstream os;
2541  os<< "ERROR: Bulk mean density for MP48 distribution is"
2542  << " not converging.\n"
2543  << "fractional change: "
2544  << abs( rho_mean/(mass_total/vol_total)-1.) << "\n"
2545  << "at p: " << p << " lat: " << lat << " lon" << lon
2546  << " for PR=" << PR_field ( p, lat, lon ) << "\n";
2547  throw runtime_error ( os.str() );
2548  }
2549  rho_mean = mass_total/vol_total;
2550  fac = convfac/rho_mean;
2551 
2552  // do PR [kg/(m^2*s)] to PR [mm/hr] conversion
2553  tPR = PR_field ( p, lat, lon ) * fac;
2554  // get slope of distribution [m^-1]
2555  lambda = lambda_fac * pow(tPR,lambda_exp);
2556 
2557  // derive particle number density for all given sizes
2558  for ( Index i=0; i<d.nelem(); i++ )
2559  {
2560  // calculate particle size distribution with MP48
2561  // output: [# m^-3 m^-1]
2562  //dN[i] = PRtopnd_MP48 ( tPR, d[i]);
2563  // too much a hassle to have a separate function. so we do
2564  // the calculation directly here.
2565  dN[i] = N0 * exp(-lambda*d[i]);
2566  //dN2[i] = dN[i] * vol[i] * rho[i];
2567  }
2568 
2569  // scale pnds by bin width
2570  if (d.nelem() > 1)
2571  scale_pnd( pnd, d, dN );
2572  else
2573  pnd = dN;
2574 
2575  // derive mass and volume over whole size distribution for
2576  // updated mean density
2577  mass_total = vol_total = 0.;
2578  for ( Index i=0; i<d.nelem(); i++ )
2579  {
2580  mass_total += vol[i]*rho[i]*pnd[i];
2581  vol_total += vol[i]*pnd[i];
2582  }
2583  //cout << n_it << ". iteration changing bulk density from "
2584  // << rho_mean << " to " << mass_total/vol_total << "\n";
2585  n_it++;
2586  }
2587  //cout << "fine at p: " << p << " lat: " << lat << " lon" << lon
2588  // << " for PR=" << PR_field ( p, lat, lon )*fac << "mm/hr.\n";
2589 
2590  // Make sure lambda was initialized in the while loop
2591  assert(!isnan(lambda));
2592 
2593 
2594  // calculate error of pnd sum and real XWC
2595  PWC = rho_mean*PI*N0 / pow(lambda,4.);
2596  chk_pndsum ( pnd, PWC, vol, rho,
2597  p, lat, lon, partfield_name, verbosity );
2598 
2599  // writing pnd vector to wsv pnd_field
2600  for ( Index i = 0; i< npart; i++ )
2601  {
2602  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
2603  lat-limits[2], lon-limits[4] ) = pnd[i];
2604  }
2605  }
2606  else
2607  for ( Index i = 0; i< npart; i++ )
2608  {
2609  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
2610  lat-limits[2], lon-limits[4] ) = 0.;
2611  }
2612  }
2613  }
2614  }
2615  }
2616 }
2617 
2618 
2619 
2620 
2638 void pnd_fieldH98 (Tensor4View pnd_field,
2639  const Tensor3& LWC_field,
2640  const ArrayOfIndex& limits,
2641  const ArrayOfScatteringMetaData& scat_meta_array,
2642  const Index& scat_data_start,
2643  const Index& npart,
2644  const String& part_string,
2645  const String& delim,
2646  const Verbosity& verbosity)
2647 {
2648  ArrayOfIndex intarr;
2649  Index pos;
2650  Vector vol_unsorted ( npart, 0.0 );
2651  Vector vol ( npart, 0.0 );
2652  Vector r ( npart, 0.0 );
2653  Vector rho ( npart, 0.0 );
2654  Vector pnd ( npart, 0.0 );
2655  Vector dN ( npart, 0.0 );
2656 
2657  String partfield_name;
2658 
2659  //split String and copy to ArrayOfString
2660  parse_partfield_name( partfield_name, part_string, delim);
2661 
2662  for ( Index i=0; i < npart; i++ )
2663  vol_unsorted[i] = ( scat_meta_array[i+scat_data_start].volume );
2664  get_sorted_indexes(intarr, vol_unsorted);
2665 
2666  // extract scattering meta data
2667  for ( Index i=0; i< npart; i++ )
2668  {
2669  pos = intarr[i]+scat_data_start;
2670 
2671  vol[i]= scat_meta_array[pos].volume; // [m^3]
2672  // calculate radius from volume [m]
2673  r[i] = 0.5 * pow (
2674  ( 6*scat_meta_array[pos].volume/PI ), ( 1./3. ) );
2675  // get density from meta data [kg/m^3]
2676  rho[i] = scat_meta_array[pos].density;
2677  }
2678 
2679  if (r.nelem() > 0)
2680  // r.nelem()=0 implies no selected particles for the respective particle
2681  // field. should not occur anymore
2682  {
2683  // iteration over all atm. levels
2684  for ( Index p=limits[0]; p<limits[1]; p++ )
2685  {
2686  for ( Index lat=limits[2]; lat<limits[3]; lat++ )
2687  {
2688  for ( Index lon=limits[4]; lon<limits[5]; lon++ )
2689  {
2690  if (LWC_field ( p,lat,lon )>0.)
2691  {
2692  // iteration over all given size bins
2693  for ( Index i=0; i<r.nelem(); i++ ) //loop over number of particles
2694  {
2695  // calculate particle size distribution for liquid
2696  // [# m^-3 m^-1]
2697  dN[i] = LWCtopnd ( LWC_field ( p,lat,lon ), rho[i], r[i] );
2698  //dN2[i] = LWCtopnd2 ( r[i] ); // [# m^-3 m^-1]
2699  //dN2[i] = dN[i] * vol[i] * rho[i];
2700  }
2701 
2702  // scale pnds by scale width. output: [# m^-3]
2703  if (r.nelem() > 1)
2704  scale_pnd( pnd, r, dN );
2705  else
2706  pnd = dN;
2707 
2708  // calculate error of pnd sum and real XWC
2709  chk_pndsum ( pnd, LWC_field ( p,lat,lon ), vol, rho,
2710  p, lat, lon, partfield_name, verbosity );
2711 
2712  // writing pnd vector to wsv pnd_field
2713  for ( Index i =0; i< npart; i++ )
2714  {
2715  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
2716  lat-limits[2], lon-limits[4] ) = pnd[i];
2717  //dlwc[q] = pnd2[q]*vol[q]*rho[q];
2718  }
2719  }
2720  else
2721  {
2722  for ( Index i = 0; i< npart; i++ )
2723  {
2724  pnd_field ( intarr[i]+scat_data_start, p-limits[0],
2725  lat-limits[2], lon-limits[4] ) = 0.;
2726  }
2727  }
2728  }
2729  }
2730  }
2731  }
2732 }
2733 
2734 
2735 
2736 
2737 
2738 
2756  const Numeric dm,
2757  const Numeric t,
2758  const Numeric density,
2759  const bool noisy )
2760 {
2761  // skip calculation if IWC is 0.0
2762  if ( iwc == 0.0 )
2763  {
2764  return 0.0;
2765  }
2766 
2767  //[kg/m3] -> [g/m3] as used by parameterisation
2768  Numeric ciwc = iwc*1e3;
2769  Numeric cdensity = density*1e3;
2770 
2771  Numeric sig_a=0.068, sig_b1=0.054;
2772  Numeric sig_b2=5.5e-3, sig_m=0.0029;
2773  Numeric sig_aamu=0.02, sig_bamu=0.0005;
2774  Numeric sig_abmu=0.023, sig_bbmu=0.5e-3;
2775  Numeric sig_aasigma=0.02, sig_basigma=0.5e-3;
2776  Numeric sig_absigma=0.023, sig_bbsigma=4.7e-4;
2777 
2778  if ( noisy )
2779  {
2780  Rng rng; //Random Number generator
2781  Index mc_seed;
2782  mc_seed = (Index)time(NULL);
2783  rng.seed(mc_seed, Verbosity());
2784 
2785  sig_a = ran_gaussian(rng,sig_a);
2786  sig_b1 = ran_gaussian(rng,sig_b1);
2787  sig_b2 = ran_gaussian(rng,sig_b2);
2788  sig_m = ran_gaussian(rng,sig_m);
2789  sig_aamu = ran_gaussian(rng,sig_aamu);
2790  sig_bamu = ran_gaussian(rng,sig_bamu);
2791  sig_abmu = ran_gaussian(rng,sig_abmu);
2792  sig_bbmu = ran_gaussian(rng,sig_bbmu);
2793  sig_aasigma = ran_gaussian(rng,sig_aasigma);
2794  sig_basigma = ran_gaussian(rng,sig_basigma);
2795  sig_absigma = ran_gaussian(rng,sig_absigma);
2796  sig_bbsigma = ran_gaussian(rng,sig_bbsigma);
2797  }
2798  else
2799  {
2800  sig_a=0., sig_b1=0.;
2801  sig_b2=0., sig_m=0.;
2802  sig_aamu=0., sig_bamu=0.;
2803  sig_abmu=0., sig_bbmu=0.;
2804  sig_aasigma=0., sig_basigma=0;
2805  sig_absigma=0., sig_bbsigma=0.;
2806  }
2807 
2808  Numeric dN1;
2809  Numeric dN2;
2810  Numeric dN;
2811 
2812  // convert m to microns
2813  Numeric Dm = dm * 1e6;
2814  //convert T from Kelvin to Celsius
2815  Numeric T = t-273.15;
2816  //split IWC in IWCs100 and IWCl100
2817 
2818  // determine iwc<100um and iwc>100um
2819  Numeric a=0.252+sig_a; //g/m^3
2820  Numeric b1=0.837+sig_b1;
2821  Numeric IWCs100=min ( ciwc,a*pow ( ciwc,b1 ) );
2822  Numeric IWCl100=ciwc-IWCs100;
2823 
2824 
2825  //Gamma distribution component
2826 
2827  Numeric b2=-4.99e-3+sig_b2; //micron^-1
2828  Numeric m=0.0494+sig_m; //micron^-1
2829  Numeric alphas100=b2-m*log10 ( IWCs100 ); //micron^-1
2830  // for large IWC alpha, hence dN1, goes negative. avoid that.
2831  // towards this limit, particles anyway get larger 100um, i.e.,
2832  // outside the size region gamma distrib is intended for
2833  if (alphas100>0.)
2834  {
2835  Numeric Ns100 = 6*IWCs100 * pow ( alphas100,5. ) /
2836  ( PI*cdensity*gamma_func ( 5. ) );//micron^-5
2837  Numeric Nm1 = Ns100*Dm*exp ( -alphas100*Dm ); //micron^-4
2838  dN1 = Nm1*1e18; // m^-3 micron^-1
2839  }
2840  else
2841  {
2842  dN1 = 0.;
2843  }
2844 
2845 
2846 
2847  //Log normal distribution component
2848 
2849  // for small IWC, IWCtotal==IWC<100 & IWC>100=0.
2850  // this will give dN2=NaN. avoid that by explicitly setting to 0
2851  if (IWCl100>0.)
2852  {
2853  //FIXME: Do we need to ensure mul100>0 and sigmal100>0?
2854 
2855  Numeric aamu=5.20+sig_aamu;
2856  Numeric bamu=0.0013+sig_bamu;
2857  Numeric abmu=0.026+sig_abmu;
2858  Numeric bbmu=-1.2e-3+sig_bbmu;
2859  Numeric amu=aamu+bamu*T;
2860  Numeric bmu=abmu+bbmu*T;
2861  Numeric mul100=amu+bmu*log10 ( IWCl100 );
2862 
2863  Numeric aasigma=0.47+sig_aasigma;
2864  Numeric basigma=2.1e-3+sig_basigma;
2865  Numeric absigma=0.018+sig_absigma;
2866  Numeric bbsigma=-2.1e-4+sig_bbsigma;
2867  Numeric asigma=aasigma+basigma*T;
2868  Numeric bsigma=absigma+bbsigma*T;
2869  Numeric sigmal100=asigma+bsigma*log10 ( IWCl100 );
2870 
2871  if ( (mul100>0.) & (sigmal100>0.) )
2872  {
2873  Numeric a1 = 6*IWCl100; //g/m^3
2874  Numeric a2 = pow ( PI,3./2. ) * cdensity*sqrt(2) *
2875  exp( 3*mul100+9./2. * pow ( sigmal100,2 ) ) *
2876  sigmal100 * pow ( 1.,3 ) * Dm; //g/m^3/micron^4
2877  Numeric Nm2 = a1/a2 *
2878  exp ( -0.5 * pow ( ( log ( Dm )-mul100 ) /sigmal100,2 ) );
2879  //micron^-4
2880  dN2 = Nm2*1e18; // m^-3 micron^-1
2881  }
2882  else
2883  {
2884  dN2 = 0.;
2885 /* ostringstream os;
2886  os<< "ERROR: not a valid MH97 size distribution:\n"
2887  << "mu>100=" << mul100 << " resulting from amu="<< amu << " and bmu="<< bmu << "\n"
2888  << "(aamu="<<aamu<<", bamu="<<bamu<<", abmu="<<abmu<<", bbmu="<<bbmu<<")\n"
2889  << "sigma>100=" << sigmal100 << " resulting from asigma="<< asigma << " and bsigma="<< bsigma << "\n"
2890  << "(aasigma="<<aasigma<<", basigma="<<basigma<<", absigma="<<absigma<<", bbsigma="<<bbsigma<<")\n";
2891  throw runtime_error ( os.str() ); */
2892  }
2893  }
2894  else
2895  {
2896  dN2 = 0.;
2897  }
2898 
2899 
2900 
2901  dN = ( dN1+dN2 ) *1e6; // m^-3 m^-1
2902  if (isnan(dN)) dN = 0.0;
2903  return dN;
2904 }
2905 
2906 
2907 
2922  const Numeric t)
2923 {
2924  Numeric dN;
2925  Numeric la;
2926  Numeric mu;
2927  // convert m to cm
2928  Numeric D = d * 1e2;
2929  //convert T from Kelvin to Celsius
2930  Numeric T = t-273.15;
2931  //choose parametrization depending on T
2932  if ( T >= -56. )
2933  {
2934  la= 12.13 * exp( -0.055*T );
2935  }
2936  else
2937  {
2938  la= 0.83 * exp( -0.103*T );
2939  }
2940  if ( T >= -68. )
2941  {
2942  mu= -0.57 - 0.028*T;
2943  }
2944  else
2945  {
2946  mu= -30.93 - 0.472*T;
2947  }
2948 
2949  //Distribution function H11
2950 
2951  dN = pow( D, mu ) * exp ( -la * D );
2952 
2953  if (isnan(dN)) dN = 0.0;
2954  return dN;
2955 }
2956 
2957 
2958 
2973  const Numeric t)
2974 {
2975  Numeric dN;
2976  Numeric la;
2977  Numeric mu;
2978  // convert m to cm
2979 
2980 
2981  Numeric D = d * 1e2;
2982  //convert T from Kelvin to Celsius
2983  Numeric T = t-273.15;
2984  //choose parametrization depending on T
2985  if ( T >= -58. )
2986  {
2987  la= 9.88 * exp( -0.060*T );
2988  }
2989  else
2990  {
2991  la= 0.75 * exp( -0.1057*T );
2992  }
2993  if ( T >= -61. )
2994  {
2995  mu= -0.59 - 0.030*T;
2996  }
2997  else
2998  {
2999  mu= -14.09 - 0.248*T;
3000  }
3001 
3002  //Distribution function H13
3003 
3004  dN = pow( D, mu ) * exp ( -la * D );
3005 
3006  if (isnan(dN)) dN = 0.0;
3007  return dN;
3008 }
3009 
3010 
3011 
3026  const Numeric t)
3027 {
3028  Numeric dN;
3029  Numeric la;
3030  Numeric mu;
3031  // convert m to cm
3032 
3033 
3034  Numeric D = d * 1e2;
3035  //convert T from Kelvin to Celsius
3036  Numeric T = t-273.15;
3037  //choose parametrization depending on T
3038  if ( T >= -58. )
3039  {
3040  la= 9.88 * exp( -0.060*T );
3041  }
3042  else
3043  {
3044  la= 0.75 * exp( -0.1057*T );
3045  }
3046  if ( T >= -61. )
3047  {
3048  mu= -0.59 - 0.030*T;
3049  }
3050  else
3051  {
3052  mu= -14.09 - 0.248*T;
3053  }
3054 
3055  //Distribution function H13Shape
3056 
3057  dN = pow( D, mu ) * exp ( -la * D );
3058 
3059  if (isnan(dN)) dN = 0.0;
3060  return dN;
3061 }
3062 
3063 
3064 
3079  const Numeric t)
3080 {
3081  Numeric Ar;
3082  Numeric alpha;
3083  Numeric beta;
3084  // convert m to cm
3085 
3086 
3087  Numeric D = d * 1e2;
3088  //convert T from Kelvin to Celsius
3089  Numeric T = t-273.15;
3090  //Parameterize for all temperatures
3091 
3092  alpha = 0.25*exp(0.0161*T);
3093 
3094  beta = -0.25+0.0033*T;
3095 
3096  // Area ratio function depending on temperature
3097 
3098  Ar = alpha*pow(D,beta);
3099 
3100  if (isnan(Ar)) Ar = 0.0;
3101  return Ar;
3102 }
3103 
3122  const Numeric swc,const Numeric alpha,
3123  const Numeric beta)
3124 {
3125  Numeric dN;
3126  Numeric phi23;
3127  Numeric An;
3128  Numeric Bn;
3129  Numeric Cn;
3130  Numeric M2;
3131  Numeric Mn;
3132 
3133  Numeric x;
3134 
3135  Numeric Mbeta;
3136  Numeric base;
3137  Numeric pp;
3138 
3139 
3140 
3141  //factors of phi23
3142  Vector q=MakeVector(152,-12.4,3.28,-0.78,-1.94);
3143 
3144  //Factors of factors of the moment estimation parametrization
3145  Vector Aq=MakeVector(13.6,-7.76,0.479);
3146  Vector Bq=MakeVector(-0.0361,0.0151,0.00149);
3147  Vector Cq=MakeVector(0.807,0.00581,0.0457);
3148 
3149 
3150  //convert T from Kelvin to Celsius
3151  Numeric Tc = t-273.15;
3152 
3153  // estimate second moment
3154  if (beta==2)
3155  M2=swc/alpha;
3156  else
3157  {
3158 
3159 
3160 
3161  Mbeta=swc/alpha;
3162 
3163  // calculate factors of the moment estimation parametrization
3164  An=exp(Aq[0]+Aq[1]*beta+Aq[2]*pow(beta,2));
3165  Bn=Bq[0]+Bq[1]*beta+Bq[2]*pow(beta,2);
3166  Cn=Cq[0]+Cq[1]*beta+Cq[2]*pow(beta,2);
3167 
3168  base=Mbeta*exp(-Bn*Tc)/An;
3169  pp=1./(Cn);
3170 
3171  M2=pow(base,pp);
3172  }
3173 
3174  // order of the moment parametrization
3175  const Numeric n=3;
3176 
3177  // calculate factors of the moment estimation parametrization
3178  An=exp(Aq[0]+Aq[1]*n+Aq[2]*pow(n,2));
3179  Bn=Bq[0]+Bq[1]*n+Bq[2]*pow(n,2);
3180  Cn=Cq[0]+Cq[1]*n+Cq[2]*pow(n,2);
3181 
3182  // moment parametrization
3183  Mn=An*exp(Bn*Tc)*pow(M2,Cn);
3184 
3185  //Define x
3186  x=d*M2/Mn;
3187 
3188  //Characteristic function
3189  phi23 = q[0]*exp(q[1]*x)+q[2]*pow(x,q[3])*exp(q[4]*x);
3190 
3191  //Distribution function
3192  dN=phi23*pow(M2,4.)/pow(Mn,3.);
3193 
3194  if (isnan(dN)) dN = 0.0;
3195  return dN;
3196 }
3197 
3216  const Numeric swc,const Numeric alpha,
3217  const Numeric beta)
3218 {
3219  Numeric dN;
3220  Numeric phi23;
3221  Numeric An;
3222  Numeric Bn;
3223  Numeric Cn;
3224  Numeric M2;
3225  Numeric Mn;
3226 
3227  Numeric x;
3228 
3229  Numeric Mbeta;
3230  Numeric base;
3231  Numeric pp;
3232 
3233  //factors of phi23
3234  Vector q=MakeVector(141,-16.8,102,2.07,-4.82);
3235 
3236  //Factors of factors of the moment estimation parametrization
3237  Vector Aq=MakeVector(13.6,-7.76,0.479);
3238  Vector Bq=MakeVector(-0.0361,0.0151,0.00149);
3239  Vector Cq=MakeVector(0.807,0.00581,0.0457);
3240 
3241 
3242  //convert T from Kelvin to Celsius
3243  Numeric Tc = t-273.15;
3244 
3245  // estimate second moment
3246  if (beta==2)
3247  M2=swc/alpha;
3248  else
3249  {
3250 
3251 
3252  Mbeta=swc/alpha;
3253 
3254  // calculate factors of the moment estimation parametrization
3255  An=exp(Aq[0]+Aq[1]*beta+Aq[2]*pow(beta,2));
3256  Bn=Bq[0]+Bq[1]*beta+Bq[2]*pow(beta,2);
3257  Cn=Cq[0]+Cq[1]*beta+Cq[2]*pow(beta,2);
3258 
3259  base=Mbeta*exp(-Bn*Tc)/An;
3260  pp=1./Cn;
3261 
3262  M2=pow(base,pp);
3263  }
3264 
3265  // order of the moment parametrization
3266  const Numeric n=3;
3267 
3268  // calculate factors of the moment estimation parametrization
3269  An=exp(Aq[0]+Aq[1]*n+Aq[2]*pow(n,2));
3270  Bn=Bq[0]+Bq[1]*n+Bq[2]*pow(n,2);
3271  Cn=Cq[0]+Cq[1]*n+Cq[2]*pow(n,2);
3272 
3273  // moment parametrization
3274  Mn=An*exp(Bn*Tc)*pow(M2,Cn);
3275 
3276  //Define x
3277  x=d*M2/Mn;
3278 
3279  //Characteristic function
3280  phi23 = q[0]*exp(q[1]*x)+q[2]*pow(x,q[3])*exp(q[4]*x);
3281 
3282  //Distribution function
3283  dN=phi23*pow(M2,4.)/pow(Mn,3.);
3284 
3285  if (isnan(dN)) dN = 0.0;
3286  return dN;
3287 }
3288 
3289 
3305 Numeric LWCtopnd (const Numeric lwc, //[kg/m^3]
3306  const Numeric density, //[kg/m^3]
3307  const Numeric r // [m]
3308  )
3309 {
3310  // skip calculation if LWC is 0.0
3311  if ( lwc == 0.0 )
3312  {
3313  return 0.0;
3314  }
3315  Numeric rc = 4.7*1e-6; //[um]->[m]
3316  Numeric alpha = 5.0;
3317  Numeric gam = 1.05;
3318 
3319  Numeric a4g = (alpha+4.)/gam;
3320  Numeric B = (alpha/gam) / pow(rc,gam);
3321  Numeric A = 0.75/PI * lwc/density * gam * pow(B,a4g) /
3322  gamma_func(a4g);
3323  Numeric n = A * (pow(r,alpha) * exp(-B*pow(r,gam)));
3324  // n in [# m^-3 m^-1]
3325 
3326  if (isnan(n)) n = 0.0;
3327  return n;
3328 }
3329 
3330 // ONLY FOR TESTING PURPOSES
3332  const Numeric r // [m]
3333  )
3334 {
3335  Numeric rc = 4.7; //micron
3336  Numeric alpha = 5.0;
3337  Numeric gam = 1.05;
3338 
3339  Numeric B=(alpha/gam)/pow(rc,gam);
3340  Numeric A=gam*pow(B,((alpha+1)/gam))/gamma_func((alpha+1)/gam);
3341  Numeric n=A*(pow(r*1e6,alpha)*exp(-B*pow(r*1e6,gam)))*1e6;
3342  // [# m^-3 m^-1]
3343 
3344  if (isnan(n)) n = 0.0;
3345  return n;
3346 }
3347 
3363 Numeric LWCtopnd_MGD_LWC ( const Numeric d, const Numeric rho, const Numeric lwc
3364  )
3365 {
3366  Numeric dN;
3367  Numeric N0;
3368 
3369  // coefficients of modified gamma distri
3370  const Numeric mu=2; //
3371  const Numeric lambda=2.13e5; //
3372  const Numeric gamma=1;
3373  const Numeric fc=1.4863e30;
3374 
3375 
3376  // N0
3377  N0=fc*lwc/rho; //[m^-4]
3378 
3379  //Distribution function
3380  dN=N0*pow(d ,mu)*exp(-lambda*pow(d,gamma));
3381 
3382  if (isnan(dN)) dN = 0.0;
3383  return dN;
3384 }
3385 
3386 
3402 Numeric IWCtopnd_MGD_IWC ( const Numeric d, const Numeric rho, const Numeric iwc)
3403 {
3404  Numeric dN;
3405  Numeric N0;
3406 
3407  // coefficients of modified gamma distri
3408  const Numeric mu=2; //
3409  const Numeric lambda=2.05e5; //
3410  const Numeric gamma=1;
3411  const Numeric fc=1.1813e30;
3412 
3413 
3414  // N0
3415  N0=fc*iwc/rho; //[m^-4]
3416 
3417  //Distribution function
3418  dN=N0*pow(d ,mu)*exp(-lambda*pow(d,gamma));
3419 
3420  if (isnan(dN)) dN = 0.0;
3421  return dN;
3422 }
3423 
3424 
3439  const Numeric D)
3440 {
3441  // skip calculation if PR is 0.0
3442  if ( R == 0.0 )
3443  {
3444  return 0.0;
3445  }
3446 
3447  Numeric N0 = 0.08*1e-2; // [#/cm^3/cm] converted to [#/m^3/um]
3448  Numeric lambda = 41.*1e2*pow(R,-0.21); // [cm^-1] converted to [m^-1] to fit D[m]
3449 
3450  Numeric n = N0*exp(-lambda*D);
3451  return n;
3452 }
3453 
3454 
3455 
3470  const Vector& x,
3471  const Vector& y)
3472 {
3473  // check if vectors have same size
3474  if (x.nelem() != y.nelem())
3475  {
3476  throw std::logic_error("x and y must be the same size");
3477  }
3478 
3479  if (x.nelem()>1) // calc. integration weights (using trapezoid integration)
3480  {
3481  for (Index i = 0; i<x.nelem(); i++)
3482  {
3483  if (i == 0) // first value
3484  {
3485  w[i] = 0.5*(x[i+1]-x[i])*y[i]; // m^-3
3486  }
3487  else if (i == x.nelem()-1) //last value
3488  {
3489  w[i] = 0.5*(x[i]-x[i-1])*y[i]; // m^-3
3490  }
3491  else // all values inbetween
3492  {
3493  w[i] = 0.5*(x[i+1]-x[i-1])*y[i]; // m^-3
3494  }
3495  }
3496  }
3497  else // for monodisperse pnd=dN
3498  {
3499  w[0] = y[0];
3500  }
3501 }
3502 
3503 
3504 
3517 void chk_pndsum (Vector& pnd,
3518  const Numeric xwc,
3519  const Vector& vol,
3520  const Vector& density,
3521  const Index& p,
3522  const Index& lat,
3523  const Index& lon,
3524  const String& partfield_name,
3525  const Verbosity& verbosity)
3526 
3527 {
3528  CREATE_OUT2;
3529 
3530  // set vector x to pnd size
3531  Vector x ( pnd.nelem(), 0.0 );
3532  Numeric error;
3533 
3534  //cout << "p = " << p << ", pnd.nelem:" << pnd.nelem() << ", xwc: " << xwc << "\n";
3535  for ( Index i = 0; i<pnd.nelem(); i++ )
3536  {
3537  // convert from particles/m^3 to kg/m^3
3538  x[i] = pnd[i]*density[i]*vol[i];
3539  /*cout<< "p = " << p << ", i: " << i << "\n"
3540  << "pnd[i]: " << pnd[i] << ", density[i]: " << density[i] << ", vol[i]: " << vol[i] << "\n"
3541  << "x[i]: " << x[i] << "\n";*/
3542  }
3543 
3544  //cout<<"at p = "<<p<<", lat = "<<lat<<", lon = "<<lon
3545  //<<" given mass density: "<<xwc<<", calc mass density: "<<x.sum() << "\n";
3546  if ( x.sum() == 0.0 )
3547  if ( xwc == 0.0 )
3548  {
3549  // set error and all pnd values to zero, IF there is
3550  // no scattering particles at this atmospheric level.
3551  error = 0.0;
3552  pnd = 0.0;
3553  }
3554  else
3555  { // when x.sum()==0, but xwc!=0, obviously something went wrong in pnd calc
3556  ostringstream os;
3557  os<< "ERROR: in WSM chk_pndsum in pnd_fieldSetup!\n"
3558  << "Given mass density != 0, but calculated mass density == 0.\n"
3559  << "Seems, something went wrong in pnd_fieldSetup. Check!\n"
3560  << "The problem occured for profile '"<< partfield_name <<"' at: "
3561  << "p = "<<p<<", lat = "<<lat<<", lon = "<<lon<<".\n";
3562  throw runtime_error ( os.str() );
3563  }
3564  else
3565  {
3566  error = xwc/x.sum();
3567  // correct all pnd values with error
3568  pnd *= error;
3569  // give warning if deviations are more than 10%
3570  if ( error > 1.10 || error < 0.90 )
3571  {
3572  CREATE_OUT1;
3573  //ostringstream os;
3574  out1<< "WARNING: in WSM chk_pndsum in pnd_fieldSetup!\n"
3575  << "The deviation of the sum of nodes in the particle size distribution\n"
3576  << "to the initial input mass density (IWC/LWC) is larger than 10%!\n"
3577  << "The deviation of: "<< error-1.0<<" occured in the atmospheric level: "
3578  << "p = "<<p<<", lat = "<<lat<<", lon = "<<lon<<".\n";
3579  //cerr<<os;
3580  }
3581  }
3582  //cout<<", error: "<<error<<"corrected to: "<<x.sum()*error<<"\n";
3583 
3584  out2 << "PND scaling factor in atm. level "
3585  << "(p = "<<p<<", lat = "<<lat<<", lon = "<<lon<<"): "<< error <<"\n";
3586  //cout<<"\npnd_scaled\t"<<pnd<<endl;
3587  //cout<<"\nPND\t"<<pnd.sum()<<"\nXWC\t"<<xwc<<"\nerror\t"<<error<<endl;
3588  //cout<<"\n"<<x.sum()<<"\n"<<xwc<<"\n"<<error<<endl;
3589 }
3590 
3591 
3592 
3605  Vector& pnd,
3606  const Numeric xwc,
3607  const Vector& density,
3608  const Vector& vol)
3609  // const Index& p,
3610  // const Index& lat,
3611  // const Index& lon,
3612  // const Verbosity& verbosity)
3613 
3614 {
3615  // set vector x to pnd size
3616  Vector x (pnd.nelem(), 0.0);
3617 
3618  for ( Index i = 0; i<pnd.nelem(); i++ )
3619  {
3620  // convert from particles/m^3 to kg/m^3
3621  x[i] = pnd[i]*density[i]*vol[i];
3622  //out0<<x[i]<<"\n"<< pnd[i]<< "\n";
3623  }
3624 
3625  if ( x.sum() == 0.0 )
3626  {
3627  // set ratio and all pnd values to zero, IF there are
3628  // no scattering particles at this atmospheric level.
3629  pnd = 0.0;
3630  }
3631  else
3632  {
3633  // calculate the ratio of initial massdensity (xwc) to sum of pnds
3634  const Numeric ratio = xwc/x.sum();
3635  // scale each pnd to represent the total massdensity
3636  pnd *= ratio;
3637  }
3638 
3639 }
3640 
3653  Vector& pnd,
3654  const Numeric xwc,
3655  const Vector& density,
3656  const Vector& vol)
3657  // const Index& p,
3658  // const Index& lat,
3659  // const Index& lon,
3660  // const Verbosity& verbosity)
3661 
3662 {
3663  // set vector x to pnd size
3664  Vector x (pnd.nelem(), 0.0);
3665 
3666  for ( Index i = 0; i<pnd.nelem(); i++ )
3667  {
3668  // convert from particles/m^3 to kg/m^3
3669  x[i] = pnd[i]*density[i]*vol[i];
3670  //out0<<x[i]<<"\n"<< pnd[i]<< "\n";
3671  }
3672 
3673  if ( x.sum() == 0.0 )
3674  {
3675  // set ratio and all pnd values to zero, IF there are
3676  // no scattering particles at this atmospheric level.
3677  pnd = 0.0;
3678  }
3679  else
3680  {
3681  // calculate the ratio of initial massdensity (xwc) to sum of pnds
3682  const Numeric ratio = xwc/x.sum();
3683  // scale each pnd to represent the total massdensity
3684  pnd *= ratio;
3685  }
3686 
3687 }
3688 
3689 
3700 void parse_partfield_name (//WS Output:
3701  String& partfield_name,
3702  // WS Input:
3703  const String& part_string,
3704  const String& delim)
3705 {
3706  ArrayOfString strarr;
3707 
3708  // split part_species string at delim and write to ArrayOfString
3709  part_string.split ( strarr, delim );
3710 
3711  //first entry is particle field name (e.g. "IWC", "LWC" etc.)
3712  if (strarr.size()>0 && part_string[0]!=delim[0])
3713  {
3714  partfield_name = strarr[0];
3715  }
3716  else
3717  {
3718  ostringstream os;
3719  os << "No information on particle field name in '" << part_string << "'\n";
3720  throw runtime_error ( os.str() );
3721 
3722  }
3723 
3724  /* no restrictions on profile naming anymore
3725  if ( partfield_name != "IWC" &&
3726  partfield_name != "Snow" &&
3727  partfield_name != "LWC" &&
3728  partfield_name != "Rain" )
3729  {
3730  ostringstream os;
3731  os << "First substring in " << part_string
3732  << " must be rather 'LWC', 'IWC', 'Rain' or 'Snow'\n"
3733  <<"Check input in *part_species*!\n";
3734  throw runtime_error ( os.str() );
3735  }*/
3736 }
3737 
3738 
3739 
3749 void parse_psd_param (//WS Output:
3750  String& psd_param,
3751  // WS Input:
3752  const String& part_string,
3753  const String& delim)
3754 {
3755  ArrayOfString strarr;
3756 
3757  // split part_species string at delim and write to ArrayOfString
3758  part_string.split ( strarr, delim );
3759 
3760  // second entry is particle size distribution parametrisation ( e.g."MH97")
3761  // check, whether we have a second entry
3762  if (strarr.size()>1)
3763  psd_param = strarr[1];
3764  else
3765  psd_param = "";
3766 
3767 /* // jm120218: FIX! <- DONE (120401)
3768  // this should not be checked here, but in pnd_fieldSetup (e.g., via
3769  // a case switch default)
3770  if ( psd_param.substr(0,4) != "MH97" && psd_param != "liquid" &&
3771  psd_param != "H11" )
3772  {
3773  ostringstream os;
3774  os <<"The chosen PSD parametrisation in " << part_string
3775  << " can not be handeled in the moment.\n"
3776  <<"Choose either 'MH97', 'H11' or 'liquid'!\n" ;
3777  throw runtime_error ( os.str() );
3778  }*/
3779 }
3780 
3781 
3782 
3793 void parse_part_material (//WS Output:
3794  String& part_material,
3795  // WS Input:
3796  const String& part_string,
3797  const String& delim)
3798 {
3799  ArrayOfString strarr;
3800 
3801  // split part_species string at delim and write to ArrayOfString
3802  part_string.split ( strarr, delim );
3803 
3804  // third entry is requested particle (material) type ( e.g."Water", "Ice")
3805  // check, whether we have a third entry
3806  if (strarr.size()>2)
3807  {
3808  part_material = strarr[2];
3809  }
3810  else
3811  {
3812  ostringstream os;
3813  os << "No information on particle type in '" << part_string << "'\n";
3814  throw runtime_error ( os.str() );
3815  }
3816 }
3817 
3818 
3819 
3830 void parse_part_size (//WS Output:
3831  Numeric& sizemin,
3832  Numeric& sizemax,
3833  // WS Input:
3834  const String& part_string,
3835  const String& delim)
3836 {
3837  ArrayOfString strarr;
3838 
3839  // split part_species string at delim and write to ArrayOfString
3840  part_string.split ( strarr, delim );
3841 
3842  // convert String for size range, into Numeric
3843  // 1. third entry is minimum particle radius
3844  if ( strarr.nelem() < 4 || strarr[3] == "*" )
3845  {
3846  sizemin = 0.;
3847  }
3848  else
3849  {
3850  istringstream os1 ( strarr[3] );
3851  os1 >> sizemin;
3852  }
3853  // 2. fourth entry is maximum particle radius
3854  if ( strarr.nelem() < 5 || strarr[4] == "*" )
3855  {
3856  sizemax = -1.;
3857  }
3858  else
3859  {
3860  istringstream os2 ( strarr[4] );
3861  os2 >> sizemax;
3862  }
3863 }
3864 
INDEX Index
The type to use for all integer numbers and indices.
Definition: matpack.h:35
Numeric IWCtopnd_MGD_IWC(const Numeric d, const Numeric rho, const Numeric iwc)
Definition: cloudbox.cc:3402
void chk_massdensity_field(bool &empty_flag, const Index &dim, const Tensor3 &massdensity, const Vector &p_grid, const Vector &lat_grid, const Vector &lon_grid)
Check whether hydromet grid size is equal to atmospheric grid size and if hydromet profile is zero (n...
Definition: cloudbox.cc:70
ArrayOfGridPos gp_lat
Definition: ppath.h:77
Numeric LWCtopnd_MGD_LWC(const Numeric d, const Numeric rho, const Numeric lwc)
Definition: cloudbox.cc:3363
void pnd_fieldH11(Tensor4View pnd_field, const Tensor3 &IWC_field, const Tensor3 &t_field, const ArrayOfIndex &limits, const ArrayOfScatteringMetaData &scat_meta_array, const Index &scat_data_start, const Index &npart, const String &part_string, const String &delim, const Verbosity &verbosity)
Definition: cloudbox.cc:1045
void pnd_fieldMGD_LWC(Tensor4View pnd_field, const Tensor3 &LWC_field, const ArrayOfIndex &limits, const ArrayOfScatteringMetaData &scat_meta_array, const Index &scat_data_start, const Index &npart, const String &part_string, const String &delim, const Verbosity &verbosity)
Definition: cloudbox.cc:1821
void pnd_fieldSS70(Tensor4View pnd_field, const Tensor3 &PR_field, const ArrayOfIndex &limits, const ArrayOfScatteringMetaData &scat_meta_array, const Index &scat_data_start, const Index &npart, const String &part_string, const String &delim, const Verbosity &verbosity)
Definition: cloudbox.cc:2278
void pnd_fieldH13(Tensor4View pnd_field, const Tensor3 &IWC_field, const Tensor3 &t_field, const ArrayOfIndex &limits, const ArrayOfScatteringMetaData &scat_meta_array, const Index &scat_data_start, const Index &npart, const String &part_string, const String &delim, const Verbosity &verbosity)
Definition: cloudbox.cc:1165
The Tensor4View class.
Definition: matpackIV.h:243
Index nelem() const
Number of elements.
Definition: array.h:176
void pnd_fieldF07TR(Tensor4View pnd_field, const Tensor3 &SWC_field, const Tensor3 &t_field, const ArrayOfIndex &limits, const ArrayOfScatteringMetaData &scat_meta_array, const Index &scat_data_start, const Index &npart, const String &part_string, const String &delim, const Verbosity &verbosity)
Definition: cloudbox.cc:1517
void parse_part_size(Numeric &sizemin, Numeric &sizemax, const String &part_string, const String &delim)
Definition: cloudbox.cc:3830
Declarations having to do with the four output streams.
Numeric IWCtopnd_F07TR(const Numeric d, const Numeric t, const Numeric swc, const Numeric alpha, const Numeric beta)
Definition: cloudbox.cc:3121
Numeric PRtopnd_MP48(const Numeric R, const Numeric D)
Definition: cloudbox.cc:3438
#define q
Definition: continua.cc:21469
void pnd_fieldMH97(Tensor4View pnd_field, const Tensor3 &IWC_field, const Tensor3 &t_field, const ArrayOfIndex &limits, const ArrayOfScatteringMetaData &scat_meta_array, const Index &scat_data_start, const Index &npart, const String &part_string, const String &delim, const Verbosity &verbosity)
Definition: cloudbox.cc:924
ConstVectorView get_numeric_grid(Index i) const
Get a numeric grid.
The Vector class.
Definition: matpackI.h:556
Numeric IWCtopnd_H11(const Numeric d, const Numeric t)
Definition: cloudbox.cc:2921
Numeric fac(const Index n)
fac
Definition: math_funcs.cc:68
void chk_scattering_meta_data(const ScatteringMetaData &scat_meta, const String &scat_meta_file, const Verbosity &verbosity)
Check scattering data meta files.
Definition: cloudbox.cc:567
Numeric last(ConstVectorView x)
last
Definition: math_funcs.cc:183
The range class.
Definition: matpackI.h:148
void chk_pnd_field_raw_only_in_cloudbox(const Index &dim, const ArrayOfGriddedField3 &pnd_field_raw, ConstVectorView p_grid, ConstVectorView lat_grid, ConstVectorView lon_grid, const ArrayOfIndex &cloudbox_limits)
chk_pnd_field_raw_only_in_cloudbox
Definition: cloudbox.cc:390
Linear algebra functions.
void chk_part_species(const ArrayOfString &part_species, const String &delim)
Check validity of part_species setting.
Definition: cloudbox.cc:480
Numeric fractional_gp(const GridPos &gp)
fractional_gp
cmplx FADDEEVA() w(cmplx z, double relerr)
Definition: Faddeeva.cc:679
ConstIterator1D begin() const
Return const iterator to first element.
Definition: matpackI.cc:346
ConstIterator1D end() const
Return const iterator behind last element.
Definition: matpackI.cc:354
Structure which describes the single scattering properties of a particle or a particle distribution...
Definition: optproperties.h:84
Index nrows() const
Returns the number of rows.
Definition: matpackIII.h:146
void pnd_fieldMGD_IWC(Tensor4View pnd_field, const Tensor3 &IWC_field, const ArrayOfIndex &limits, const ArrayOfScatteringMetaData &scat_meta_array, const Index &scat_data_start, const Index &npart, const String &part_string, const String &delim, const Verbosity &verbosity)
Definition: cloudbox.cc:1970
Workspace functions for the solution of cloud-box radiative transfer by Monte Carlo methods...
void parse_psd_param(String &psd_param, const String &part_string, const String &delim)
Definition: cloudbox.cc:3749
Index nelem() const
Returns the number of elements.
Definition: matpackI.cc:180
Contains sorting routines.
void chk_interpolation_grids(const String &which_interpolation, ConstVectorView old_grid, ConstVectorView new_grid, const Index order, const Numeric &extpolfac, const bool islog)
Check interpolation grids.
Structure to store a grid position.
Definition: interpolation.h:74
void parse_part_material(String &part_material, const String &part_string, const String &delim)
Definition: cloudbox.cc:3793
void scale_pnd(Vector &w, const Vector &x, const Vector &y)
Definition: cloudbox.cc:3469
Numeric ran_gaussian(Rng &rng, const Numeric sigma)
ran_gaussian
Definition: mc_antenna.cc:65
The implementation for String, the ARTS string class.
Definition: mystring.h:63
void chk_scattering_data(const ArrayOfSingleScatteringData &scat_data_array, const ArrayOfScatteringMetaData &scat_meta_array, const Verbosity &)
Check scattering data general.
Definition: cloudbox.cc:541
void scale_H11(Vector &pnd, const Numeric xwc, const Vector &density, const Vector &vol)
Definition: cloudbox.cc:3604
The Tensor3 class.
Definition: matpackIII.h:348
The global header file for ARTS.
void scale_H13(Vector &pnd, const Numeric xwc, const Vector &density, const Vector &vol)
Definition: cloudbox.cc:3652
Numeric IWCtopnd_H13(const Numeric d, const Numeric t)
Definition: cloudbox.cc:2972
void chk_pnd_raw_data(const ArrayOfGriddedField3 &pnd_field_raw, const String &pnd_field_file, const Index &atmosphere_dim, const Verbosity &verbosity)
Check particle number density files (pnd_field_raw)
Definition: cloudbox.cc:352
void chk_pndsum(Vector &pnd, const Numeric xwc, const Vector &vol, const Vector &density, const Index &p, const Index &lat, const Index &lon, const String &partfield_name, const Verbosity &verbosity)
Definition: cloudbox.cc:3517
void chk_scat_data(const SingleScatteringData &scat_data_array, const String &scat_data_file, ConstVectorView f_grid, const Verbosity &verbosity)
Check single scattering data files.
Definition: cloudbox.cc:605
Index ncols() const
Returns the number of columns.
Definition: matpackIII.h:149
const Numeric PI
Numeric sum() const
The sum of all elements of a Vector.
Definition: matpackI.cc:186
#define max(a, b)
Definition: continua.cc:20461
#define CREATE_OUT1
Definition: messages.h:212
void pnd_fieldGM58(Tensor4View pnd_field, const Tensor3 &PR_field, const ArrayOfIndex &limits, const ArrayOfScatteringMetaData &scat_meta_array, const Index &scat_data_start, const Index &npart, const String &part_string, const String &delim, const Verbosity &verbosity)
Definition: cloudbox.cc:2110
bool is_inside_cloudbox(const Ppath &ppath_step, const ArrayOfIndex &cloudbox_limits, const bool include_boundaries)
Definition: cloudbox.cc:892
#define abs(x)
Definition: continua.cc:20458
Defines the Rng random number generator class.
NUMERIC Numeric
The type to use for all floating point numbers.
Definition: matpack.h:29
void pnd_fieldH98(Tensor4View pnd_field, const Tensor3 &LWC_field, const ArrayOfIndex &limits, const ArrayOfScatteringMetaData &scat_meta_array, const Index &scat_data_start, const Index &npart, const String &part_string, const String &delim, const Verbosity &verbosity)
Definition: cloudbox.cc:2638
const Index GFIELD3_LAT_GRID
void pnd_fieldF07ML(Tensor4View pnd_field, const Tensor3 &SWC_field, const Tensor3 &t_field, const ArrayOfIndex &limits, const ArrayOfScatteringMetaData &scat_meta_array, const Index &scat_data_start, const Index &npart, const String &part_string, const String &delim, const Verbosity &verbosity)
Definition: cloudbox.cc:1674
Numeric IWCtopnd_H13Shape(const Numeric d, const Numeric t)
Definition: cloudbox.cc:3025
void pnd_fieldH13Shape(Tensor4View pnd_field, const Tensor3 &IWC_field, const Tensor3 &t_field, const ArrayOfIndex &limits, const ArrayOfScatteringMetaData &scat_meta_array, const Index &scat_data_start, const Index &npart, const String &part_string, const String &delim, const Verbosity &verbosity)
Definition: cloudbox.cc:1303
void pnd_fieldMP48(Tensor4View pnd_field, const Tensor3 &PR_field, const ArrayOfIndex &limits, const ArrayOfScatteringMetaData &scat_meta_array, const Index &scat_data_start, const Index &npart, const String &part_string, const String &delim, const Verbosity &verbosity)
Definition: cloudbox.cc:2449
Numeric area_ratioH13(const Numeric d, const Numeric t)
Definition: cloudbox.cc:3078
Propagation path structure and functions.
void get_sorted_indexes(ArrayOfIndex &sorted, const T &data)
get_sorted_indexes
Definition: sorting.h:79
void chk_if_pnd_zero_p(const Index &i_p, const GriddedField3 &pnd_field_raw, const String &pnd_field_file, const Verbosity &verbosity)
Check whether particle number density is zero at a specified pressure level.
Definition: cloudbox.cc:145
Numeric LWCtopnd2(const Numeric r)
Definition: cloudbox.cc:3331
Header file for logic.cc.
Index npages() const
Returns the number of pages.
Definition: matpackIII.h:143
This can be used to make arrays out of anything.
Definition: array.h:40
void chk_if_pnd_zero_lat(const Index &i_lat, const GriddedField3 &pnd_field_raw, const String &pnd_field_file, const Verbosity &verbosity)
Check whether particle number density is zero at a specified latitude.
Definition: cloudbox.cc:193
void split(Array< my_basic_string< charT > > &aos, const my_basic_string< charT > &delim) const
Split string into substrings.
Definition: mystring.h:233
Definition: rng.h:569
void chk_size(const String &x_name, ConstVectorView x, const Index &c)
Runtime check for size of Vector.
Definition: check_input.cc:465
const Index GFIELD3_P_GRID
void parse_partfield_name(String &partfield_name, const String &part_string, const String &delim)
Definition: cloudbox.cc:3700
The class MakeVector is a special kind of Vector that can be initialized explicitly from one or more ...
Numeric IWCtopnd_MH97(const Numeric iwc, const Numeric dm, const Numeric t, const Numeric density, const bool noisy)
Definition: cloudbox.cc:2755
#define min(a, b)
Definition: continua.cc:20460
A constant view of a Vector.
Definition: matpackI.h:292
Index np
Definition: ppath.h:61
ArrayOfGridPos gp_lon
Definition: ppath.h:78
Numeric IWCtopnd_F07ML(const Numeric d, const Numeric t, const Numeric swc, const Numeric alpha, const Numeric beta)
Definition: cloudbox.cc:3215
void chk_pnd_data(const GriddedField3 &pnd_field_raw, const String &pnd_field_file, const Index &atmosphere_dim, const Verbosity &verbosity)
Check particle number density files.
Definition: cloudbox.cc:294
#define _U_
Definition: config.h:167
const Index GFIELD3_LON_GRID
#define beta
Definition: continua.cc:21194
#define CREATE_OUT3
Definition: messages.h:214
void seed(unsigned long int n, const Verbosity &verbosity)
Definition: rng.cc:72
The structure to describe a propagation path and releated quantities.
Definition: ppath.h:59
void chk_if_pnd_zero_lon(const Index &i_lon, const GriddedField3 &pnd_field_raw, const String &pnd_field_file, const Verbosity &verbosity)
Check whether particle number density is zero at a specified longitude.
Definition: cloudbox.cc:241
void linreg(Vector &p, ConstVectorView x, ConstVectorView y)
Definition: lin_alg.cc:371
Internal cloudbox functions.
Numeric gamma_func(Numeric xx)
Gamma Function.
Definition: math_funcs.cc:497
ParticleType particle_type
Definition: optproperties.h:85
bool is_gp_inside_cloudbox(const GridPos &gp_p, const GridPos &gp_lat, const GridPos &gp_lon, const ArrayOfIndex &cloudbox_limits, const bool &include_boundaries, const Index &atmosphere_dim)
Definition: cloudbox.cc:808
The iterator class for sub vectors.
Definition: matpackI.h:214
#define dmax(a, b)
Definition: continua.cc:20463
#define CREATE_OUT2
Definition: messages.h:213
Numeric LWCtopnd(const Numeric lwc, const Numeric density, const Numeric r)
Definition: cloudbox.cc:3305
ArrayOfGridPos gp_p
Definition: ppath.h:76