ARTS  2.3.1285(git:92a29ea9-dirty)
m_optproperties.cc
Go to the documentation of this file.
1 /* Copyright (C) 2002-2012
2  Sreerekha T.R. <rekha@uni-bremen.de>
3  Claudia Emde <claudia.emde@dlr.de>
4  Cory Davies <cory@met.ed.ac.uk>
5 
6  This program is free software; you can redistribute it and/or
7  modify it under the terms of the GNU General Public License as
8  published by the Free Software Foundation; either version 2 of the
9  License, or (at your option) any later version.
10 
11  This program is distributed in the hope that it will be useful,
12  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  GNU General Public License for more details.
15 
16  You should have received a copy of the GNU General Public License
17  along with this program; if not, write to the Free Software
18  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19  USA. */
20 
35 /*===========================================================================
36  === External declarations
37  ===========================================================================*/
38 
39 #include <cfloat>
40 #include <cmath>
41 #include "array.h"
42 #include "arts.h"
43 #include "auto_md.h"
44 #include "check_input.h"
45 #include "exceptions.h"
46 #include "interpolation.h"
47 #include "logic.h"
48 #include "math_funcs.h"
49 #include "matpackIII.h"
50 #include "matpackVII.h"
51 #include "messages.h"
52 #include "montecarlo.h"
53 #include "optproperties.h"
54 #include "sorting.h"
55 #include "xml_io.h"
56 
57 extern const Numeric PI;
58 extern const Numeric DEG2RAD;
59 extern const Numeric RAD2DEG;
60 
61 #define PART_TYPE scat_data[i_ss][i_se].ptype
62 #define F_DATAGRID scat_data[i_ss][i_se].f_grid
63 #define T_DATAGRID scat_data[i_ss][i_se].T_grid
64 #define ZA_DATAGRID scat_data[i_ss][i_se].za_grid
65 #define AA_DATAGRID scat_data[i_ss][i_se].aa_grid
66 #define PHA_MAT_DATA scat_data[i_ss][i_se].pha_mat_data
67 #define EXT_MAT_DATA scat_data[i_ss][i_se].ext_mat_data
68 #define ABS_VEC_DATA scat_data[i_ss][i_se].abs_vec_data
69 
70 // If particle number density is below this value,
71 // no transformations will be performed
72 #define PND_LIMIT 1e-12
73 
74 /* Workspace method: Doxygen documentation will be auto-generated */
75 void pha_mat_sptFromData( // Output:
76  Tensor5& pha_mat_spt,
77  // Input:
78  const ArrayOfArrayOfSingleScatteringData& scat_data,
79  const Vector& za_grid,
80  const Vector& aa_grid,
81  const Index& za_index, // propagation directions
82  const Index& aa_index,
83  const Index& f_index,
84  const Vector& f_grid,
85  const Numeric& rtp_temperature,
86  const Tensor4& pnd_field,
87  const Index& scat_p_index,
88  const Index& scat_lat_index,
89  const Index& scat_lon_index,
90  const Verbosity& verbosity) {
91  const Index stokes_dim = pha_mat_spt.ncols();
92  if (stokes_dim > 4 || stokes_dim < 1) {
93  throw runtime_error(
94  "The dimension of the stokes vector \n"
95  "must be 1,2,3 or 4");
96  }
97 
98  // Determine total number of scattering elements
99  const Index N_se_total = TotalNumberOfElements(scat_data);
100  if (N_se_total != pnd_field.nbooks()) {
101  ostringstream os;
102  os << "Total number of scattering elements in scat_data "
103  << "inconsistent with size of pnd_field.";
104  throw runtime_error(os.str());
105  }
106  // as pha_mat_spt is typically initiallized from pnd_field, this theoretically
107  // checks the same as the runtime_error above. Still, we keep it to be on the
108  // save side.
109  assert(pha_mat_spt.nshelves() == N_se_total);
110 
111  // Check that we don't have scat_data_mono here. Only checking the first
112  // scat element, assuming the other elements have been processed in the same
113  // manner. That's save against having mono data, but not against having
114  // individual elements produced with only a single frequency. This, however,
115  // has been checked by scat_data_raw reading routines (ScatSpecies/Element*Add/Read).
116  // Unsafe, however, remain when ReadXML is used directly or if scat_data(_raw) is
117  // (partly) produced from scat_data_singleTmatrix.
118  if (scat_data[0][0].f_grid.nelem() < 2) {
119  ostringstream os;
120  os << "Scattering data seems to be *scat_data_mono* (1 freq point only),\n"
121  << "but frequency interpolable data (*scat_data* with >=2 freq points) "
122  << "is expected here.";
123  throw runtime_error(os.str());
124  }
125 
126  const Index N_ss = scat_data.nelem();
127 
128  // Phase matrix in laboratory coordinate system. Dimensions:
129  // [frequency, za_inc, aa_inc, stokes_dim, stokes_dim]
130  Tensor5 pha_mat_data_int;
131 
132  Index i_se_flat = 0;
133  // Loop over scattering species
134  for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
135  const Index N_se = scat_data[i_ss].nelem();
136 
137  // Loop over the included scattering elements
138  for (Index i_se = 0; i_se < N_se; i_se++) {
139  // If the particle number density at a specific point in the
140  // atmosphere for the i_se scattering element is zero, we don't need
141  // to do the transfromation!
142  if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
143  PND_LIMIT) {
144  // First we have to transform the data from the coordinate system
145  // used in the database (depending on the kind of ptype) to the
146  // laboratory coordinate system.
147 
148  // Frequency and temperature interpolation:
149 
150  // Container for data at one frequency and one temperature.
151  pha_mat_data_int.resize(PHA_MAT_DATA.nshelves(),
152  PHA_MAT_DATA.nbooks(),
153  PHA_MAT_DATA.npages(),
154  PHA_MAT_DATA.nrows(),
155  PHA_MAT_DATA.ncols());
156 
157  // Gridpositions:
158  GridPos freq_gp;
159  gridpos(freq_gp, F_DATAGRID, f_grid[f_index]);
160  GridPos t_gp;
161  Vector itw;
162 
163  Index ti = -1;
164 
165  if (PHA_MAT_DATA.nvitrines() == 1) // just 1 T_grid element
166  {
167  ti = 0;
168  } else if (rtp_temperature < 0.) // coding for 'not interpolate, but
169  // pick one temperature'
170  {
171  if (rtp_temperature > -10.) // lowest T-point
172  {
173  ti = 0;
174  } else if (rtp_temperature > -20.) // highest T-point
175  {
176  ti = T_DATAGRID.nelem() - 1;
177  } else // median T-point
178  {
179  ti = T_DATAGRID.nelem() / 2;
180  }
181  }
182 
183  if (ti < 0) // temperature interpolation
184  {
185  ostringstream os;
186  os << "In pha_mat_sptFromData.\n"
187  << "The temperature grid of the scattering data does not\n"
188  << "cover the atmospheric temperature at cloud location.\n"
189  << "The data should include the value T = " << rtp_temperature
190  << " K.";
191  chk_interpolation_grids(os.str(), T_DATAGRID, rtp_temperature);
192 
193  gridpos(t_gp, T_DATAGRID, rtp_temperature);
194 
195  // Interpolation weights:
196  itw.resize(4);
197  interpweights(itw, freq_gp, t_gp);
198 
199  for (Index i_za_sca = 0; i_za_sca < PHA_MAT_DATA.nshelves();
200  i_za_sca++)
201  for (Index i_aa_sca = 0; i_aa_sca < PHA_MAT_DATA.nbooks();
202  i_aa_sca++)
203  for (Index i_za_inc = 0; i_za_inc < PHA_MAT_DATA.npages();
204  i_za_inc++)
205  for (Index i_aa_inc = 0; i_aa_inc < PHA_MAT_DATA.nrows();
206  i_aa_inc++)
207  for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++)
208  // Interpolation of phase matrix:
209  pha_mat_data_int(
210  i_za_sca, i_aa_sca, i_za_inc, i_aa_inc, i) =
211  interp(itw,
213  joker,
214  i_za_sca,
215  i_aa_sca,
216  i_za_inc,
217  i_aa_inc,
218  i),
219  freq_gp,
220  t_gp);
221  } else {
222  // Interpolation weights:
223  itw.resize(2);
224  interpweights(itw, freq_gp);
225  for (Index i_za_sca = 0; i_za_sca < PHA_MAT_DATA.nshelves();
226  i_za_sca++)
227  for (Index i_aa_sca = 0; i_aa_sca < PHA_MAT_DATA.nbooks();
228  i_aa_sca++)
229  for (Index i_za_inc = 0; i_za_inc < PHA_MAT_DATA.npages();
230  i_za_inc++)
231  for (Index i_aa_inc = 0; i_aa_inc < PHA_MAT_DATA.nrows();
232  i_aa_inc++)
233  for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++)
234  // Interpolation of phase matrix:
235  pha_mat_data_int(
236  i_za_sca, i_aa_sca, i_za_inc, i_aa_inc, i) =
237  interp(itw,
239  ti,
240  i_za_sca,
241  i_aa_sca,
242  i_za_inc,
243  i_aa_inc,
244  i),
245  freq_gp);
246  }
247 
248  // Do the transformation into the laboratory coordinate system.
249  for (Index za_inc_idx = 0; za_inc_idx < za_grid.nelem();
250  za_inc_idx++) {
251  for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
252  aa_inc_idx++) {
254  pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, joker, joker),
255  pha_mat_data_int,
256  ZA_DATAGRID,
257  AA_DATAGRID,
258  PART_TYPE,
259  za_index,
260  aa_index,
261  za_inc_idx,
262  aa_inc_idx,
263  za_grid,
264  aa_grid,
265  verbosity);
266  }
267  }
268  }
269  i_se_flat++;
270  }
271  }
272 }
273 
274 /* Workspace method: Doxygen documentation will be auto-generated */
276  Tensor5& pha_mat_spt,
277  // Input:
278  const ArrayOfTensor7& pha_mat_sptDOITOpt,
279  const ArrayOfArrayOfSingleScatteringData& scat_data_mono,
280  const Index& doit_za_grid_size,
281  const Vector& aa_grid,
282  const Index& za_index, // propagation directions
283  const Index& aa_index,
284  const Numeric& rtp_temperature,
285  const Tensor4& pnd_field,
286  const Index& scat_p_index,
287  const Index& scat_lat_index,
288  const Index& scat_lon_index,
289  const Verbosity&) {
290  const Index N_se_total = TotalNumberOfElements(scat_data_mono);
291 
292  if (N_se_total != pnd_field.nbooks()) {
293  ostringstream os;
294  os << "Total number of scattering elements in scat_data_mono "
295  << "inconsistent with size of pnd_field.";
296  throw runtime_error(os.str());
297  }
298  // as pha_mat_spt is typically initiallized from pnd_field, this theoretically
299  // checks the same as the runtime_error above. Still, we keep it to be on the
300  // save side.
301  assert(pha_mat_spt.nshelves() == N_se_total);
302 
303  // atmosphere_dim = 3
304  if (pnd_field.ncols() > 1) {
305  assert(pha_mat_sptDOITOpt.nelem() == N_se_total);
306  // Assuming that if the size is o.k. for one scattering element, it will
307  // also be o.k. for the other scattering elements.
308  assert(pha_mat_sptDOITOpt[0].nlibraries() ==
309  scat_data_mono[0][0].T_grid.nelem());
310  assert(pha_mat_sptDOITOpt[0].nvitrines() == doit_za_grid_size);
311  assert(pha_mat_sptDOITOpt[0].nshelves() == aa_grid.nelem());
312  assert(pha_mat_sptDOITOpt[0].nbooks() == doit_za_grid_size);
313  assert(pha_mat_sptDOITOpt[0].npages() == aa_grid.nelem());
314  }
315 
316  // atmosphere_dim = 1, only zenith angle required for scattered directions.
317  else if (pnd_field.ncols() == 1) {
318  //assert(is_size(scat_theta, doit_za_grid_size, 1,
319  // doit_za_grid_size, aa_grid.nelem()));
320 
321  assert(pha_mat_sptDOITOpt.nelem() == TotalNumberOfElements(scat_data_mono));
322  // Assuming that if the size is o.k. for one scattering element, it will
323  // also be o.k. for the other scattering elements.
324  assert(pha_mat_sptDOITOpt[0].nlibraries() ==
325  scat_data_mono[0][0].T_grid.nelem());
326  assert(pha_mat_sptDOITOpt[0].nvitrines() == doit_za_grid_size);
327  assert(pha_mat_sptDOITOpt[0].nshelves() == 1);
328  assert(pha_mat_sptDOITOpt[0].nbooks() == doit_za_grid_size);
329  assert(pha_mat_sptDOITOpt[0].npages() == aa_grid.nelem());
330  }
331 
332  assert(doit_za_grid_size > 0);
333 
334  // Check that we do indeed have scat_data_mono here. Only checking the first
335  // scat element, assuming the other elements have been processed in the same
336  // manner. That's save against having scat_data here if that originated from
337  // scat_data_raw reading routines (ScatSpecies/Element*Add/Read), it's not safe
338  // against data read by ReadXML directly or if scat_data(_raw) has been (partly)
339  // produced from scat_data_singleTmatrix. That would be too costly here,
340  // though.
341  // Also, we can't check here whether data is at the correct frequency since we
342  // don't know f_grid and f_index here (we could pass it in, though).
343  if (scat_data_mono[0][0].f_grid.nelem() > 1) {
344  ostringstream os;
345  os << "Scattering data seems to be *scat_data* (several freq points),\n"
346  << "but *scat_data_mono* (1 freq point only) is expected here.";
347  throw runtime_error(os.str());
348  }
349 
350  // Create equidistant zenith angle grid
351  Vector za_grid;
352  nlinspace(za_grid, 0, 180, doit_za_grid_size);
353 
354  const Index N_ss = scat_data_mono.nelem();
355  const Index stokes_dim = pha_mat_spt.ncols();
356 
357  if (stokes_dim > 4 || stokes_dim < 1) {
358  throw runtime_error(
359  "The dimension of the stokes vector \n"
360  "must be 1,2,3 or 4");
361  }
362 
363  GridPos T_gp;
364  Vector itw(2);
365 
366  // Initialisation
367  pha_mat_spt = 0.;
368 
369  Index i_se_flat = 0;
370 
371  // Do the transformation into the laboratory coordinate system.
372  for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
373  const Index N_se = scat_data_mono[i_ss].nelem();
374 
375  for (Index i_se = 0; i_se < N_se; i_se++) {
376  // If the particle number density at a specific point in the
377  // atmosphere for the i_se scattering element is zero, we don't need
378  // to do the transformation!
379  if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
380  PND_LIMIT) //TRS
381  {
382  Index nT = scat_data_mono[i_ss][i_se].pha_mat_data.nvitrines();
383  Index ti = -1;
384 
385  if (nT == 1) // just 1 T_grid element
386  {
387  ti = 0;
388  } else if (rtp_temperature < 0.) // coding for 'not interpolate, but
389  // pick one temperature'
390  {
391  if (rtp_temperature > -10.) // lowest T-point
392  {
393  ti = 0;
394  } else if (rtp_temperature > -20.) // highest T-point
395  {
396  ti = nT - 1;
397  } else // median T-point
398  {
399  ti = nT / 2;
400  }
401  } else {
402  ostringstream os;
403  os << "In pha_mat_sptFromDataDOITOpt.\n"
404  << "The temperature grid of the scattering data does not\n"
405  << "cover the atmospheric temperature at cloud location.\n"
406  << "The data should include the value T = " << rtp_temperature
407  << " K.";
409  os.str(), scat_data_mono[i_ss][i_se].T_grid, rtp_temperature);
410 
411  // Gridpositions:
412  gridpos(T_gp, scat_data_mono[i_ss][i_se].T_grid, rtp_temperature);
413  // Interpolation weights:
414  interpweights(itw, T_gp);
415  }
416 
417  for (Index za_inc_idx = 0; za_inc_idx < doit_za_grid_size;
418  za_inc_idx++) {
419  for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
420  aa_inc_idx++) {
421  if (ti < 0) // Temperature interpolation
422  {
423  for (Index i = 0; i < stokes_dim; i++) {
424  for (Index j = 0; j < stokes_dim; j++) {
425  pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, i, j) =
426  interp(itw,
427  pha_mat_sptDOITOpt[i_se_flat](joker,
428  za_index,
429  aa_index,
430  za_inc_idx,
431  aa_inc_idx,
432  i,
433  j),
434  T_gp);
435  }
436  }
437  } else {
438  pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, joker, joker) =
439  pha_mat_sptDOITOpt[i_se_flat](ti,
440  za_index,
441  aa_index,
442  za_inc_idx,
443  aa_inc_idx,
444  joker,
445  joker);
446  }
447  }
448  }
449  } // TRS
450 
451  i_se_flat++;
452  }
453  }
454 }
455 
456 /* Workspace method: Doxygen documentation will be auto-generated */
457 void opt_prop_sptFromData( // Output and Input:
458  ArrayOfPropagationMatrix& ext_mat_spt,
459  ArrayOfStokesVector& abs_vec_spt,
460  // Input:
461  const ArrayOfArrayOfSingleScatteringData& scat_data,
462  const Vector& za_grid,
463  const Vector& aa_grid,
464  const Index& za_index, // propagation directions
465  const Index& aa_index,
466  const Index& f_index,
467  const Vector& f_grid,
468  const Numeric& rtp_temperature,
469  const Tensor4& pnd_field,
470  const Index& scat_p_index,
471  const Index& scat_lat_index,
472  const Index& scat_lon_index,
473  const Verbosity& verbosity) {
474  const Index N_ss = scat_data.nelem();
475  const Numeric za_sca = za_grid[za_index];
476  const Numeric aa_sca = aa_grid[aa_index];
477 
478  DEBUG_ONLY(const Index N_se_total = TotalNumberOfElements(scat_data);
479  if (N_ss) {
480  assert(ext_mat_spt[0].NumberOfFrequencies() == N_se_total);
481  assert(abs_vec_spt[0].NumberOfFrequencies() == N_se_total);
482  });
483 
484  // Check that we don't have scat_data_mono here. Only checking the first
485  // scat element, assuming the other elements have been processed in the same
486  // manner. That's save against having mono data, but not against having
487  // individual elements produced with only a single frequency. This, however,
488  // has been checked by scat_data_raw reading routines (ScatSpecies/Element*Add/Read).
489  // Unsafe, however, remain when ReadXML is used directly or if scat_data(_raw) is
490  // (partly) produced from scat_data_singleTmatrix.
491  if (scat_data[0][0].f_grid.nelem() < 2) {
492  ostringstream os;
493  os << "Scattering data seems to be *scat_data_mono* (1 freq point only),\n"
494  << "but frequency interpolable data (*scat_data* with >=2 freq points) "
495  << "is expected here.";
496  throw runtime_error(os.str());
497  }
498 
499  // Phase matrix in laboratory coordinate system. Dimensions:
500  // [frequency, za_inc, aa_inc, stokes_dim, stokes_dim]
501  Tensor3 ext_mat_data_int;
502  Tensor3 abs_vec_data_int;
503 
504  // Initialisation
505  ext_mat_spt = 0.;
506  abs_vec_spt = 0.;
507 
508  Index i_se_flat = 0;
509  // Loop over the included scattering species
510  for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
511  const Index N_se = scat_data[i_ss].nelem();
512 
513  // Loop over the included scattering elements
514  for (Index i_se = 0; i_se < N_se; i_se++) {
515  // If the particle number density at a specific point in the
516  // atmosphere for the i_se scattering element is zero, we don't need
517  // to do the transformation
518 
519  if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
520  PND_LIMIT) {
521  // First we have to transform the data from the coordinate system
522  // used in the database (depending on the kind of ptype) to the
523  // laboratory coordinate system.
524 
525  // Frequency interpolation:
526 
527  // The data is interpolated on one frequency.
528  //
529  // Resize the variables for the interpolated data:
530  //
531  ext_mat_data_int.resize(
532  EXT_MAT_DATA.npages(), EXT_MAT_DATA.nrows(), EXT_MAT_DATA.ncols());
533  //
534  abs_vec_data_int.resize(
535  ABS_VEC_DATA.npages(), ABS_VEC_DATA.nrows(), ABS_VEC_DATA.ncols());
536 
537  // Gridpositions:
538  GridPos freq_gp;
539  gridpos(freq_gp, F_DATAGRID, f_grid[f_index]);
540  GridPos t_gp;
541  Vector itw;
542 
543  if (T_DATAGRID.nelem() > 1) {
544  ostringstream os;
545  os << "In opt_prop_sptFromData.\n"
546  << "The temperature grid of the scattering data does not\n"
547  << "cover the atmospheric temperature at cloud location.\n"
548  << "The data should include the value T = " << rtp_temperature
549  << " K.";
550  chk_interpolation_grids(os.str(), T_DATAGRID, rtp_temperature);
551 
552  gridpos(t_gp, T_DATAGRID, rtp_temperature);
553 
554  // Interpolation weights:
555  itw.resize(4);
556  interpweights(itw, freq_gp, t_gp);
557 
558  for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages();
559  i_za_sca++) {
560  for (Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
561  i_aa_sca++) {
562  //
563  // Interpolation of extinction matrix:
564  //
565  for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++) {
566  ext_mat_data_int(i_za_sca, i_aa_sca, i) =
567  interp(itw,
568  EXT_MAT_DATA(joker, joker, i_za_sca, i_aa_sca, i),
569  freq_gp,
570  t_gp);
571  }
572  }
573  }
574 
575  for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages();
576  i_za_sca++) {
577  for (Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
578  i_aa_sca++) {
579  //
580  // Interpolation of absorption vector:
581  //
582  for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++) {
583  abs_vec_data_int(i_za_sca, i_aa_sca, i) =
584  interp(itw,
585  ABS_VEC_DATA(joker, joker, i_za_sca, i_aa_sca, i),
586  freq_gp,
587  t_gp);
588  }
589  }
590  }
591  } else {
592  // Interpolation weights:
593  itw.resize(2);
594  interpweights(itw, freq_gp);
595 
596  for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages();
597  i_za_sca++) {
598  for (Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
599  i_aa_sca++) {
600  //
601  // Interpolation of extinction matrix:
602  //
603  for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++) {
604  ext_mat_data_int(i_za_sca, i_aa_sca, i) =
605  interp(itw,
606  EXT_MAT_DATA(joker, 0, i_za_sca, i_aa_sca, i),
607  freq_gp);
608  }
609  }
610  }
611 
612  for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages();
613  i_za_sca++) {
614  for (Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
615  i_aa_sca++) {
616  //
617  // Interpolation of absorption vector:
618  //
619  for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++) {
620  abs_vec_data_int(i_za_sca, i_aa_sca, i) =
621  interp(itw,
622  ABS_VEC_DATA(joker, 0, i_za_sca, i_aa_sca, i),
623  freq_gp);
624  }
625  }
626  }
627  }
628 
629  //
630  // Do the transformation into the laboratory coordinate system.
631  //
632  // Extinction matrix:
633  //
634  ext_matTransform(ext_mat_spt[i_se_flat],
635  ext_mat_data_int,
636  ZA_DATAGRID,
637  AA_DATAGRID,
638  PART_TYPE,
639  za_sca,
640  aa_sca,
641  verbosity);
642  //
643  // Absorption vector:
644  //
645  abs_vecTransform(abs_vec_spt[i_se_flat],
646  abs_vec_data_int,
647  ZA_DATAGRID,
648  AA_DATAGRID,
649  PART_TYPE,
650  za_sca,
651  aa_sca,
652  verbosity);
653  }
654 
655  i_se_flat++;
656  }
657  }
658 }
659 
660 /* Workspace method: Doxygen documentation will be auto-generated */
661 void opt_prop_sptFromScat_data( // Output and Input:
662  ArrayOfPropagationMatrix& ext_mat_spt,
663  ArrayOfStokesVector& abs_vec_spt,
664  // Input:
665  const ArrayOfArrayOfSingleScatteringData& scat_data,
666  const Index& scat_data_checked,
667  const Vector& za_grid,
668  const Vector& aa_grid,
669  const Index& za_index, // propagation directions
670  const Index& aa_index,
671  const Index& f_index,
672  const Numeric& rtp_temperature,
673  const Tensor4& pnd_field,
674  const Index& scat_p_index,
675  const Index& scat_lat_index,
676  const Index& scat_lon_index,
677  const Verbosity& verbosity) {
678  if (scat_data_checked != 1)
679  throw runtime_error(
680  "The scattering data must be flagged to have "
681  "passed a consistency check (scat_data_checked=1).");
682 
683  const Index N_ss = scat_data.nelem();
684  const Index stokes_dim = ext_mat_spt[0].StokesDimensions();
685  const Numeric za_sca = za_grid[za_index];
686  const Numeric aa_sca = aa_grid[aa_index];
687 
688  if (stokes_dim > 4 || stokes_dim < 1) {
689  throw runtime_error(
690  "The dimension of the stokes vector \n"
691  "must be 1,2,3 or 4");
692  }
693 
694  DEBUG_ONLY(const Index N_se_total = TotalNumberOfElements(scat_data);)
695  assert(ext_mat_spt.nelem() == N_se_total);
696  assert(abs_vec_spt.nelem() == N_se_total);
697 
698  // Phase matrix in laboratory coordinate system. Dimensions:
699  // [frequency, za_inc, aa_inc, stokes_dim, stokes_dim]
700  Tensor3 ext_mat_data_int;
701  Tensor3 abs_vec_data_int;
702 
703  // Initialisation
704  for (auto& pm : ext_mat_spt) pm.SetZero();
705  for (auto& sv : abs_vec_spt) sv.SetZero();
706 
707  Index this_f_index;
708 
709  Index i_se_flat = 0;
710  // Loop over the included scattering species
711  for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
712  const Index N_se = scat_data[i_ss].nelem();
713 
714  // Loop over the included scattering elements
715  for (Index i_se = 0; i_se < N_se; i_se++) {
716  // If the particle number density at a specific point in the
717  // atmosphere for the i_se scattering element is zero, we don't need
718  // to do the transformation
719 
720  if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
721  PND_LIMIT) {
722  // First we have to transform the data from the coordinate system
723  // used in the database (depending on the kind of ptype) to the
724  // laboratory coordinate system.
725 
726  // Resize the variables for the interpolated data (1freq, 1T):
727  ext_mat_data_int.resize(
728  EXT_MAT_DATA.npages(), EXT_MAT_DATA.nrows(), EXT_MAT_DATA.ncols());
729  abs_vec_data_int.resize(
730  ABS_VEC_DATA.npages(), ABS_VEC_DATA.nrows(), ABS_VEC_DATA.ncols());
731 
732  // Gridpositions and interpolation weights;
733  GridPos t_gp;
734  Vector itw;
735  if (EXT_MAT_DATA.nbooks() > 1 || ABS_VEC_DATA.nbooks() > 1) {
736  ostringstream os;
737  os << "In opt_prop_sptFromScat_data.\n"
738  << "The temperature grid of the scattering data does not\n"
739  << "cover the atmospheric temperature at cloud location.\n"
740  << "The data should include the value T = " << rtp_temperature
741  << " K.";
742  chk_interpolation_grids(os.str(), T_DATAGRID, rtp_temperature);
743 
744  gridpos(t_gp, T_DATAGRID, rtp_temperature);
745 
746  // Interpolation weights:
747  itw.resize(2);
748  interpweights(itw, t_gp);
749  }
750 
751  // Frequency extraction and temperature interpolation
752 
753  if (EXT_MAT_DATA.nshelves() == 1)
754  this_f_index = 0;
755  else
756  this_f_index = f_index;
757 
758  if (EXT_MAT_DATA.nbooks() > 1) {
759  // Interpolation of extinction matrix:
760  for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages();
761  i_za_sca++) {
762  for (Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
763  i_aa_sca++) {
764  for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++) {
765  ext_mat_data_int(i_za_sca, i_aa_sca, i) = interp(
766  itw,
767  EXT_MAT_DATA(this_f_index, joker, i_za_sca, i_aa_sca, i),
768  t_gp);
769  }
770  }
771  }
772  } else {
773  ext_mat_data_int = EXT_MAT_DATA(this_f_index, 0, joker, joker, joker);
774  /*
775  for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages();
776  i_za_sca++)
777  {
778  for(Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
779  i_aa_sca++)
780  {
781  for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++)
782  {
783  ext_mat_data_int(i_za_sca, i_aa_sca, i) =
784  EXT_MAT_DATA(this_f_index, 0,
785  i_za_sca, i_aa_sca, i);
786  }
787  }
788  } */
789  }
790 
791  if (ABS_VEC_DATA.nshelves() == 1)
792  this_f_index = 0;
793  else
794  this_f_index = f_index;
795 
796  if (ABS_VEC_DATA.nbooks() > 1) {
797  // Interpolation of absorption vector:
798  for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages();
799  i_za_sca++) {
800  for (Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
801  i_aa_sca++) {
802  for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++) {
803  abs_vec_data_int(i_za_sca, i_aa_sca, i) = interp(
804  itw,
805  ABS_VEC_DATA(this_f_index, joker, i_za_sca, i_aa_sca, i),
806  t_gp);
807  }
808  }
809  }
810  } else {
811  abs_vec_data_int = ABS_VEC_DATA(this_f_index, 0, joker, joker, joker);
812  /*
813  for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages();
814  i_za_sca++)
815  {
816  for(Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
817  i_aa_sca++)
818  {
819  for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++)
820  {
821  abs_vec_data_int(i_za_sca, i_aa_sca, i) =
822  ABS_VEC_DATA(this_f_index, 0,
823  i_za_sca, i_aa_sca, i);
824  }
825  }
826  } */
827  }
828 
829  //
830  // Do the transformation into the laboratory coordinate system.
831  //
832  // Extinction matrix:
833  ext_matTransform(ext_mat_spt[i_se_flat],
834  ext_mat_data_int,
835  ZA_DATAGRID,
836  AA_DATAGRID,
837  PART_TYPE,
838  za_sca,
839  aa_sca,
840  verbosity);
841  // Absorption vector:
842  abs_vecTransform(abs_vec_spt[i_se_flat],
843  abs_vec_data_int,
844  ZA_DATAGRID,
845  AA_DATAGRID,
846  PART_TYPE,
847  za_sca,
848  aa_sca,
849  verbosity);
850  }
851  i_se_flat++;
852  }
853  }
854 }
855 
856 /* Workspace method: Doxygen documentation will be auto-generated */
857 void opt_prop_bulkCalc( // Output and Input:
858  PropagationMatrix& ext_mat,
859  StokesVector& abs_vec,
860  // Input:
861  const ArrayOfPropagationMatrix& ext_mat_spt,
862  const ArrayOfStokesVector& abs_vec_spt,
863  const Tensor4& pnd_field,
864  const Index& scat_p_index,
865  const Index& scat_lat_index,
866  const Index& scat_lon_index,
867  const Verbosity&) {
868  Index N_se = abs_vec_spt.nelem();
869  //assert( ext_mat_spt.npages()==N_se )
870  if (ext_mat_spt.nelem() not_eq N_se) {
871  ostringstream os;
872  os << "Number of scattering elements in *abs_vec_spt* and *ext_mat_spt*\n"
873  << "does not agree.";
874  throw runtime_error(os.str());
875  }
876 
877  Index stokes_dim = abs_vec_spt[0].StokesDimensions();
878  //assert( ext_mat_spt.ncols()==stokes_dim && ext_mat_spt.nrows()==stokes_dim )
879  if (ext_mat_spt[0].StokesDimensions() not_eq stokes_dim) {
880  ostringstream os;
881  os << "*stokes_dim* of *abs_vec_spt* and *ext_mat_spt* does not agree.";
882  throw runtime_error(os.str());
883  }
884  if (stokes_dim > 4 || stokes_dim < 1) {
885  ostringstream os;
886  os << "The dimension of stokes vector can only be 1, 2, 3, or 4.";
887  throw runtime_error(os.str());
888  }
889 
890  ext_mat = PropagationMatrix(1, stokes_dim);
891  ext_mat.SetZero(); // Initialize to zero!
892  abs_vec = StokesVector(1, stokes_dim);
893  abs_vec.SetZero(); // Initialize to zero!
894 
895  PropagationMatrix ext_mat_part(1, stokes_dim);
896  ext_mat_part.SetZero();
897  StokesVector abs_vec_part(1, stokes_dim);
898  abs_vec_part.SetZero();
899 
900  // this is the loop over the different scattering elements
901  for (Index l = 0; l < N_se; l++) {
902  abs_vec_part.MultiplyAndAdd(
903  pnd_field(l, scat_p_index, scat_lat_index, scat_lon_index),
904  abs_vec_spt[l]);
905  ext_mat_part.MultiplyAndAdd(
906  pnd_field(l, scat_p_index, scat_lat_index, scat_lon_index),
907  ext_mat_spt[l]);
908  }
909 
910  //Add absorption due single scattering element.
911  abs_vec += abs_vec_part;
912  //Add extinction matrix due single scattering element to *ext_mat*.
913  ext_mat += ext_mat_part;
914 }
915 
916 /* Workspace method: Doxygen documentation will be auto-generated */
918  const ArrayOfPropagationMatrix& propmat_clearsky,
919  const Verbosity&) {
920  // Number of Stokes parameters:
921  const Index stokes_dim = ext_mat.StokesDimensions();
922 
923  // The second dimension of ext_mat must also match the number of
924  // Stokes parameters:
925  if (stokes_dim != propmat_clearsky[0].StokesDimensions())
926  throw runtime_error(
927  "Col dimension of propmat_clearsky "
928  "inconsistent with col dimension in ext_mat.");
929 
930  // Number of frequencies:
931  const Index f_dim = ext_mat.NumberOfFrequencies();
932 
933  // This must be consistent with the second dimension of
934  // propmat_clearsky. Check this:
935  if (f_dim != propmat_clearsky[0].NumberOfFrequencies())
936  throw runtime_error(
937  "Frequency dimension of ext_mat and propmat_clearsky\n"
938  "are inconsistent in ext_matAddGas.");
939 
940  for (auto& pm : propmat_clearsky) ext_mat += pm;
941 }
942 
943 /* Workspace method: Doxygen documentation will be auto-generated */
945  const ArrayOfPropagationMatrix& propmat_clearsky,
946  const Verbosity&) {
947  // Number of frequencies:
948  const Index f_dim = abs_vec.NumberOfFrequencies();
949  const Index stokes_dim = abs_vec.StokesDimensions();
950 
951  // This must be consistent with the second dimension of
952  // propmat_clearsky. Check this:
953  if (f_dim != propmat_clearsky[0].NumberOfFrequencies())
954  throw runtime_error(
955  "Frequency dimension of abs_vec and propmat_clearsky\n"
956  "are inconsistent in abs_vecAddGas.");
957  if (stokes_dim != propmat_clearsky[0].StokesDimensions())
958  throw runtime_error(
959  "Stokes dimension of abs_vec and propmat_clearsky\n"
960  "are inconsistent in abs_vecAddGas.");
961 
962  // Loop all frequencies. Of course this includes the special case
963  // that there is only one frequency.
964  for (auto& pm : propmat_clearsky)
965  abs_vec += pm; // Defined to only add to the
966 
967  // We don't have to do anything about higher elements of abs_vec,
968  // since scalar gas absorption only influences the first element.
969 }
970 
971 /* Workspace method: Doxygen documentation will be auto-generated */
972 /*
973 void ext_matAddGasZeeman( Tensor3& ext_mat,
974  const Tensor3& ext_mat_zee,
975  const Verbosity&)
976 {
977  // Number of Stokes parameters:
978  const Index stokes_dim = ext_mat.ncols();
979 
980  // The second dimension of ext_mat must also match the number of
981  // Stokes parameters:
982  if ( stokes_dim != ext_mat.nrows() )
983  throw runtime_error("Row dimension of ext_mat inconsistent with "
984  "column dimension.");
985 
986  for ( Index i=0; i<stokes_dim; ++i )
987  {
988  for ( Index j=0; j<stokes_dim; ++j )
989  {
990  // Add the zeeman extinction to extinction matrix.
991  ext_mat(joker,i,j) += ext_mat_zee(joker, i, j);
992  }
993 
994  }
995 }
996 */
997 
998 /* Workspace method: Doxygen documentation will be auto-generated */
999 /*
1000 void abs_vecAddGasZeeman( Matrix& abs_vec,
1001  const Matrix& abs_vec_zee,
1002  const Verbosity&)
1003 {
1004  // Number of Stokes parameters:
1005  const Index stokes_dim = abs_vec_zee.ncols();
1006  // that there is only one frequency.
1007  for ( Index j=0; j<stokes_dim; ++j )
1008  {
1009  abs_vec(joker,j) += abs_vec_zee(joker,j);
1010  }
1011 }
1012 */
1013 
1014 /* Workspace method: Doxygen documentation will be auto-generated */
1015 void pha_matCalc(Tensor4& pha_mat,
1016  const Tensor5& pha_mat_spt,
1017  const Tensor4& pnd_field,
1018  const Index& atmosphere_dim,
1019  const Index& scat_p_index,
1020  const Index& scat_lat_index,
1021  const Index& scat_lon_index,
1022  const Verbosity&) {
1023  Index N_se = pha_mat_spt.nshelves();
1024  Index Nza = pha_mat_spt.nbooks();
1025  Index Naa = pha_mat_spt.npages();
1026  Index stokes_dim = pha_mat_spt.nrows();
1027 
1028  pha_mat.resize(Nza, Naa, stokes_dim, stokes_dim);
1029 
1030  // Initialisation
1031  pha_mat = 0.0;
1032 
1033  Index ilat = 0;
1034  Index ilon = 0;
1035  if (atmosphere_dim > 1) ilat = scat_lat_index;
1036  if (atmosphere_dim > 2) ilon = scat_lon_index;
1037 
1038  if (atmosphere_dim == 1) {
1039  // For 1d atmospheres, we additinally integrate the phase matrix over the
1040  // azimuth, because there is no azimuth dependency of the incoming
1041  // field.
1042 
1043  Numeric grid_step_size_azimuth = 360. / (Numeric)(Naa - 1) * DEG2RAD;
1044 
1045  // this is a loop over the different scattering elements
1046  for (Index pt_index = 0; pt_index < N_se; ++pt_index)
1047  // these are loops over zenith angle and azimuth angle
1048  for (Index za_index = 0; za_index < Nza; ++za_index)
1049  for (Index aa_index = 0; aa_index < Naa - 1; ++aa_index)
1050  // now the last two loops over the stokes dimension.
1051  for (Index stokes_index_1 = 0; stokes_index_1 < stokes_dim;
1052  ++stokes_index_1)
1053  for (Index stokes_index_2 = 0; stokes_index_2 < stokes_dim;
1054  ++stokes_index_2)
1055  //summation of the product of pnd_field and
1056  //pha_mat_spt.
1057  pha_mat(za_index, 0, stokes_index_1, stokes_index_2) +=
1058  ((pha_mat_spt(pt_index,
1059  za_index,
1060  aa_index,
1061  stokes_index_1,
1062  stokes_index_2) +
1063  pha_mat_spt(pt_index,
1064  za_index,
1065  aa_index + 1,
1066  stokes_index_1,
1067  stokes_index_2)) /
1068  2 * grid_step_size_azimuth *
1069  pnd_field(pt_index, scat_p_index, ilat, ilon));
1070  } else {
1071  // this is a loop over the different scattering elements
1072  for (Index pt_index = 0; pt_index < N_se; ++pt_index)
1073  // these are loops over zenith angle and azimuth angle
1074  for (Index za_index = 0; za_index < Nza; ++za_index)
1075  for (Index aa_index = 0; aa_index < Naa; ++aa_index)
1076  // now the last two loops over the stokes dimension.
1077  for (Index stokes_index_1 = 0; stokes_index_1 < stokes_dim;
1078  ++stokes_index_1)
1079  for (Index stokes_index_2 = 0; stokes_index_2 < stokes_dim;
1080  ++stokes_index_2)
1081  //summation of the product of pnd_field and
1082  //pha_mat_spt.
1083  pha_mat(za_index, aa_index, stokes_index_1, stokes_index_2) +=
1084  (pha_mat_spt(pt_index,
1085  za_index,
1086  aa_index,
1087  stokes_index_1,
1088  stokes_index_2) *
1089  pnd_field(pt_index, scat_p_index, ilat, ilon));
1090  }
1091 }
1092 
1093 /* Workspace method: Doxygen documentation will be auto-generated */
1094 void scat_dataCheck( //Input:
1095  const ArrayOfArrayOfSingleScatteringData& scat_data,
1096  const String& check_type,
1097  const Numeric& threshold,
1098  const Verbosity& verbosity) {
1099  CREATE_OUT0;
1100  CREATE_OUT1;
1101  CREATE_OUT2;
1102  //CREATE_OUT3;
1103 
1104  // FIXME:
1105  // so far, this works for both scat_data and scat_data_raw. Needs to be
1106  // adapted, though, once we have WSM that can create Z/K/a with different
1107  // f/T dimensions than scat_data_single.f/T_grid.
1108 
1109  const Index N_ss = scat_data.nelem();
1110 
1111  // 1) any negative values in Z11, K11, or a1? K11>=a1?
1112  // 2) scat_data containing any NaN?
1113  // 3) sca_mat norm sufficiently good (int(Z11)~=K11-a1?)
1114 
1115  // Loop over the included scattering species
1116  out2 << " checking for negative values in Z11, K11, and a1, and for K11<a1\n";
1117  for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
1118  const Index N_se = scat_data[i_ss].nelem();
1119 
1120  // Loop over the included scattering elements
1121  for (Index i_se = 0; i_se < N_se; i_se++) {
1122  for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1123  for (Index zai = 0; zai < ABS_VEC_DATA.npages(); zai++)
1124  for (Index aai = 0; aai < ABS_VEC_DATA.nrows(); aai++) {
1125  for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1126  if (EXT_MAT_DATA(f, t, zai, aai, 0) < 0 ||
1127  ABS_VEC_DATA(f, t, zai, aai, 0) < 0) {
1128  ostringstream os;
1129  os << "Scatt. species #" << i_ss << " element #" << i_se
1130  << " contains negative K11 or a1 at f#" << f << ", T#" << t
1131  << ", za#" << zai << ", aa#" << aai << "\n";
1132  throw runtime_error(os.str());
1133  }
1134  if (EXT_MAT_DATA(f, t, zai, aai, 0) <
1135  ABS_VEC_DATA(f, t, zai, aai, 0)) {
1136  ostringstream os;
1137  os << "Scatt. species #" << i_ss << " element #" << i_se
1138  << " has K11<a1 at f#" << f << ", T#" << t << ", za#" << zai
1139  << ", aa#" << aai << "\n";
1140  throw runtime_error(os.str());
1141  }
1142  }
1143  // Since allowing pha_mat to have a single T entry only (while
1144  // T_grid, ext_mat, abs_vec have more), we need a separate T loop
1145  // for pha_mat
1146  Index nTpha = PHA_MAT_DATA.nvitrines();
1147  for (Index t = 0; t < nTpha; t++) {
1148  for (Index zas = 0; zas < PHA_MAT_DATA.nshelves(); zas++)
1149  for (Index aas = 0; aas < PHA_MAT_DATA.nbooks(); aas++)
1150  if (PHA_MAT_DATA(f, t, zas, aas, zai, aai, 0) < 0) {
1151  ostringstream os;
1152  os << "Scatt. species #" << i_ss << " element #" << i_se
1153  << " contains negative Z11 at f#" << f << ", T#" << t
1154  << " (of " << nTpha << "), za_sca#" << zas << ", aa_sca#"
1155  << aas << ", za_inc#" << zai << ", aa_inc#" << aai
1156  << "\n";
1157  throw runtime_error(os.str());
1158  }
1159  }
1160  }
1161  }
1162  }
1163  }
1164 
1165  // Loop over the included scattering species
1166  out2 << " checking for NaN anywhere in Z, K, and a\n";
1167  for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
1168  const Index N_se = scat_data[i_ss].nelem();
1169 
1170  // Loop over the included scattering elements
1171  for (Index i_se = 0; i_se < N_se; i_se++) {
1172  for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1173  for (Index zai = 0; zai < ABS_VEC_DATA.npages(); zai++)
1174  for (Index aai = 0; aai < ABS_VEC_DATA.nrows(); aai++) {
1175  for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1176  for (Index st = 0; st < ABS_VEC_DATA.ncols(); st++)
1177  if (std::isnan(ABS_VEC_DATA(f, t, zai, aai, st))) {
1178  ostringstream os;
1179  os << "Scatt. species #" << i_ss << " element #" << i_se
1180  << " contains NaN in abs_vec at f#" << f << ", T#" << t
1181  << ", za#" << zai << ", aa#" << aai << ", stokes #" << st
1182  << "\n";
1183  throw runtime_error(os.str());
1184  }
1185  for (Index st = 0; st < EXT_MAT_DATA.ncols(); st++)
1186  if (std::isnan(EXT_MAT_DATA(f, t, zai, aai, st))) {
1187  ostringstream os;
1188  os << "Scatt. species #" << i_ss << " element #" << i_se
1189  << " contains NaN in ext_mat at f#" << f << ", T#" << t
1190  << ", za#" << zai << ", aa#" << aai << ", stokes #" << st
1191  << "\n";
1192  throw runtime_error(os.str());
1193  }
1194  }
1195  Index nTpha = PHA_MAT_DATA.nvitrines();
1196  for (Index t = 0; t < nTpha; t++) {
1197  for (Index zas = 0; zas < PHA_MAT_DATA.nshelves(); zas++)
1198  for (Index aas = 0; aas < PHA_MAT_DATA.nbooks(); aas++)
1199  for (Index st = 0; st < PHA_MAT_DATA.ncols(); st++)
1200  if (std::isnan(
1201  PHA_MAT_DATA(f, t, zas, aas, zai, aai, st))) {
1202  ostringstream os;
1203  os << "Scatt. species #" << i_ss << " element #" << i_se
1204  << " contains NaN in pha_mat at f#" << f << ", T#" << t
1205  << " (of " << nTpha << "), za_sca#" << zas
1206  << ", aa_sca#" << aas << ", za_inc#" << zai
1207  << ", aa_inc#" << aai << ", stokes #"
1208  << "\n";
1209  throw runtime_error(os.str());
1210  }
1211  }
1212  }
1213  }
1214  }
1215  }
1216 
1217  if (check_type.toupper() == "ALL") {
1218  // Loop over the included scattering species
1219  out2 << " checking normalization of scattering matrix\n";
1220  for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
1221  const Index N_se = scat_data[i_ss].nelem();
1222 
1223  // Loop over the included scattering elements
1224  for (Index i_se = 0; i_se < N_se; i_se++)
1225  if (T_DATAGRID.nelem() == PHA_MAT_DATA.nvitrines()) switch (PART_TYPE) {
1226  case PTYPE_TOTAL_RND: {
1227  for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1228  for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1230  PHA_MAT_DATA(f, t, joker, 0, 0, 0, 0), ZA_DATAGRID);
1231  Numeric Cext_data = EXT_MAT_DATA(f, t, 0, 0, 0);
1232  //Numeric Cabs = Cext_data - Csca;
1233  Numeric Cabs_data = ABS_VEC_DATA(f, t, 0, 0, 0);
1234  Numeric Csca_data = Cext_data - Cabs_data;
1235 
1236  /*
1237  out3 << " Coefficients in database: "
1238  << "Cext: " << Cext_data << " Cabs: " << Cabs_data
1239  << " Csca: " << Csca_data << "\n"
1240  << " Calculated coefficients: "
1241  << "Cabs calc: " << Cabs
1242  << " Csca calc: " << Csca << "\n"
1243  << " Deviations "
1244  << "Cabs: " << 1e2*Cabs/Cabs_data-1e2
1245  << "% Csca: " << 1e2*Csca/Csca_data-1e2
1246  << "% Alb: " << (Csca-Csca_data)/Cext_data << "\n";
1247  */
1248 
1249  //if (abs(Csca/Csca_data-1.)*Csca_data/Cext_data > threshold)
1250  // below equivalent to the above
1251  // (it's actually the (absolute) albedo deviation!)
1252  if (abs(Csca - Csca_data) / Cext_data > threshold) {
1253  ostringstream os;
1254  os << " Deviations in scat_data too large:\n"
1255  << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
1256  << " at nominal (actual) albedo of "
1257  << Csca_data / Cext_data << " (" << Csca / Cext_data
1258  << ").\n"
1259  << " Check entry for scattering element " << i_se
1260  << " of scattering species " << i_ss << " at " << f
1261  << ".frequency and " << t << ".temperature!\n";
1262  throw runtime_error(os.str());
1263  }
1264  }
1265  }
1266  break;
1267  }
1268 
1269  case PTYPE_AZIMUTH_RND: {
1270  for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1271  for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1272  for (Index iza = 0; iza < ABS_VEC_DATA.npages(); iza++) {
1273  Numeric Csca =
1275  PHA_MAT_DATA(f, t, joker, joker, iza, 0, 0),
1276  ZA_DATAGRID,
1277  AA_DATAGRID);
1278  Numeric Cext_data = EXT_MAT_DATA(f, t, iza, 0, 0);
1279  //Numeric Cabs = Cext_data - Csca;
1280  Numeric Cabs_data = ABS_VEC_DATA(f, t, iza, 0, 0);
1281  Numeric Csca_data = Cext_data - Cabs_data;
1282 
1283  /*
1284  out3 << " Coefficients in database: "
1285  << "Cext: " << Cext_data << " Cabs: " << Cabs_data
1286  << " Csca: " << Csca_data << "\n"
1287  << " Calculated coefficients: "
1288  << "Cabs calc: " << Cabs
1289  << " Csca calc: " << Csca << "\n"
1290  << " Deviations "
1291  << "Cabs: " << 1e2*Cabs/Cabs_data-1e2
1292  << "% Csca: " << 1e2*Csca/Csca_data-1e2
1293  << "% Alb: " << (Csca-Csca_data)/Cext_data << "\n";
1294  */
1295 
1296  //if (abs(Csca/Csca_data-1.)*Csca_data/Cext_data > threshold)
1297  // below equivalent to the above
1298  // (it's actually the (absolute) albedo deviation!)
1299  if (abs(Csca - Csca_data) / Cext_data > threshold) {
1300  ostringstream os;
1301  os << " Deviations in scat_data too large:\n"
1302  << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
1303  << " at nominal (actual) albedo of "
1304  << Csca_data / Cext_data << " (" << Csca / Cext_data
1305  << ").\n"
1306  << " Check entry for scattering element " << i_se
1307  << " of scattering species " << i_ss << " at " << f
1308  << ". frequency, " << t << ". temperature, and " << iza
1309  << ". incident polar angle!\n";
1310  throw runtime_error(os.str());
1311  }
1312  }
1313  }
1314  }
1315  break;
1316  }
1317 
1318  default: {
1319  out0
1320  << " WARNING:\n"
1321  << " scat_data consistency check not implemented (yet?!) for\n"
1322  << " ptype " << PART_TYPE << "!\n";
1323  }
1324  }
1325  else
1326  out2 << " WARNING:\n"
1327  << " scat_data norm check can not be performed for pha_mat-only"
1328  << " T-reduced scattering elements\n"
1329  << " as found in scatt element #" << i_se
1330  << " of scatt species #" << i_ss << "!\n";
1331  }
1332  } else if (check_type.toupper() == "SANE") {
1333  out1 << " WARNING:\n"
1334  << " Normalization check on pha_mat switched off.\n"
1335  << " Scattering solution might be wrong.\n";
1336  } else {
1337  ostringstream os;
1338  os << "Invalid value for argument *check_type*: '" << check_type << "'.\n";
1339  os << "Valid values are 'all' or 'none'.";
1340  throw runtime_error(os.str());
1341  }
1342 }
1343 
1344 /* Workspace method: Doxygen documentation will be auto-generated */
1346  Workspace& ws, //Output:
1347  ArrayOfTensor7& pha_mat_sptDOITOpt,
1348  ArrayOfArrayOfSingleScatteringData& scat_data_mono,
1349  Tensor7& pha_mat_doit,
1350  //Output and Input:
1351  Vector& aa_grid,
1352  //Input:
1353  const Index& doit_za_grid_size,
1354  const ArrayOfArrayOfSingleScatteringData& scat_data,
1355  const Index& scat_data_checked,
1356  const Index& f_index,
1357  const Index& atmosphere_dim,
1358  const Index& stokes_dim,
1359  const Tensor3& t_field,
1360  const ArrayOfIndex& cloudbox_limits,
1361  const Tensor4& pnd_field,
1362  const Agenda& pha_mat_spt_agenda,
1363  const Verbosity& verbosity) {
1364  if (scat_data_checked != 1)
1365  throw runtime_error(
1366  "The scattering data must be flagged to have "
1367  "passed a consistency check (scat_data_checked=1).");
1368 
1369  // Number of azimuth angles.
1370  const Index Naa = aa_grid.nelem();
1371  Vector grid_stepsize(2);
1372  grid_stepsize[0] = 180. / (Numeric)(doit_za_grid_size - 1);
1373  //grid_stepsize[1] = 360./(Numeric)(Naa - 1);
1374 
1375  // Initialize variable *pha_mat_spt*
1376  Tensor5 pha_mat_spt_local(pnd_field.nbooks(),
1377  doit_za_grid_size,
1378  aa_grid.nelem(),
1379  stokes_dim,
1380  stokes_dim,
1381  0.);
1382  Tensor4 pha_mat_local(doit_za_grid_size, Naa, stokes_dim, stokes_dim, 0.);
1383  Tensor6 pha_mat_local_out(cloudbox_limits[1] - cloudbox_limits[0] + 1,
1384  doit_za_grid_size,
1385  doit_za_grid_size,
1386  Naa,
1387  stokes_dim,
1388  stokes_dim,
1389  0.);
1390 
1391  // Interpolate all the data in frequency
1392  scat_data_monoExtract(scat_data_mono, scat_data, f_index, verbosity);
1393 
1394  // For 1D calculation the scat_aa dimension is not required:
1395  Index N_aa_sca;
1396  if (atmosphere_dim == 1)
1397  N_aa_sca = 1;
1398  else
1399  N_aa_sca = aa_grid.nelem();
1400 
1401  Vector za_grid;
1402  nlinspace(za_grid, 0, 180, doit_za_grid_size);
1403 
1404  assert(scat_data.nelem() == scat_data_mono.nelem());
1405 
1406  const Index N_ss = scat_data.nelem();
1407  // FIXME: We need this still as a workspace variable because pha_mat_spt_agenda
1408  // contains a WS method that requires it as input
1409  pha_mat_sptDOITOpt.resize(TotalNumberOfElements(scat_data));
1410 
1411  Index i_se_flat = 0;
1412  for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
1413  const Index N_se = scat_data[i_ss].nelem();
1414 
1415  for (Index i_se = 0; i_se < N_se; i_se++) {
1416  Index N_T = scat_data_mono[i_ss][i_se].T_grid.nelem();
1417  pha_mat_sptDOITOpt[i_se_flat].resize(N_T,
1418  doit_za_grid_size,
1419  N_aa_sca,
1420  doit_za_grid_size,
1421  aa_grid.nelem(),
1422  stokes_dim,
1423  stokes_dim);
1424 
1425  // Initialize:
1426  pha_mat_sptDOITOpt[i_se_flat] = 0.;
1427 
1428  // Calculate all scattering angles for all combinations of incoming
1429  // and scattered directions and interpolation.
1430  for (Index t_idx = 0; t_idx < N_T; t_idx++) {
1431  // These are the scattered directions as called in *scat_field_calc*
1432  for (Index za_sca_idx = 0; za_sca_idx < doit_za_grid_size;
1433  za_sca_idx++) {
1434  for (Index aa_sca_idx = 0; aa_sca_idx < N_aa_sca; aa_sca_idx++) {
1435  // Integration is performed over all incoming directions
1436  for (Index za_inc_idx = 0; za_inc_idx < doit_za_grid_size;
1437  za_inc_idx++) {
1438  for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
1439  aa_inc_idx++) {
1441  pha_mat_sptDOITOpt[i_se_flat](t_idx,
1442  za_sca_idx,
1443  aa_sca_idx,
1444  za_inc_idx,
1445  aa_inc_idx,
1446  joker,
1447  joker),
1448  scat_data_mono[i_ss][i_se].pha_mat_data(
1449  0, t_idx, joker, joker, joker, joker, joker),
1450  scat_data_mono[i_ss][i_se].za_grid,
1451  scat_data_mono[i_ss][i_se].aa_grid,
1452  scat_data_mono[i_ss][i_se].ptype,
1453  za_sca_idx,
1454  aa_sca_idx,
1455  za_inc_idx,
1456  aa_inc_idx,
1457  za_grid,
1458  aa_grid,
1459  verbosity);
1460  }
1461  }
1462  }
1463  }
1464  }
1465 
1466  i_se_flat++;
1467  }
1468  }
1469  // Interpolate phase matrix to current grid
1470  pha_mat_doit.resize(cloudbox_limits[1] - cloudbox_limits[0] + 1,
1471  doit_za_grid_size,
1472  N_aa_sca,
1473  doit_za_grid_size,
1474  Naa,
1475  stokes_dim,
1476  stokes_dim);
1477  pha_mat_doit = 0;
1478 
1479  if (atmosphere_dim == 1) {
1480  Index aa_index_local = 0;
1481 
1482  // Get pha_mat at the grid positions
1483  // Since atmosphere_dim = 1, there is no loop over lat and lon grids
1484  for (Index p_index = 0; p_index <= cloudbox_limits[1] - cloudbox_limits[0];
1485  p_index++) {
1486  Numeric rtp_temperature_local =
1487  t_field(p_index + cloudbox_limits[0], 0, 0);
1488  //There is only loop over zenith angle grid ; no azimuth angle grid.
1489  for (Index za_index_local = 0;
1490  za_index_local < doit_za_grid_size;
1491  za_index_local++) {
1492  // Dummy index
1493  Index index_zero = 0;
1494 
1495  // Calculate the phase matrix of individual scattering elements
1497  pha_mat_spt_local,
1498  za_index_local,
1499  index_zero,
1500  index_zero,
1501  p_index,
1502  aa_index_local,
1503  rtp_temperature_local,
1504  pha_mat_spt_agenda);
1505 
1506  // Sum over all scattering elements
1507  pha_matCalc(pha_mat_local,
1508  pha_mat_spt_local,
1509  pnd_field,
1510  atmosphere_dim,
1511  p_index,
1512  0,
1513  0,
1514  verbosity);
1515  pha_mat_doit(
1516  p_index, za_index_local, 0, joker, joker, joker, joker) =
1517  pha_mat_local;
1518  }
1519  }
1520 
1521  // Set incoming azimuth grid scattering to zero, because there is no
1522  // no azimuth dependcy for 1d atmospheres
1523  aa_grid.resize(1);
1524  aa_grid = 0;
1525  }
1526 }
1527 
1528 /* Workspace method: Doxygen documentation will be auto-generated */
1530  const ArrayOfArrayOfSingleScatteringData& scat_data_raw,
1531  const Vector& f_grid,
1532  const Index& interp_order,
1533  const Verbosity&)
1534 // FIXME: when we allow K, a, Z to be on different f and T grids, their use in
1535 // the scatt solvers needs to be reviewed again and adaptedto this!
1536 {
1537  //Extrapolation factor:
1538  //const Numeric extpolfac = 0.5;
1539 
1540  Index nf = f_grid.nelem();
1541 
1542  // Check, whether single scattering data contains the right frequencies:
1543  // The check was changed to allow extrapolation at the boundaries of the
1544  // frequency grid.
1545  const String which_interpolation = "scat_data_raw.f_grid to f_grid";
1546  for (Index i_ss = 0; i_ss < scat_data_raw.nelem(); i_ss++) {
1547  for (Index i_se = 0; i_se < scat_data_raw[i_ss].nelem(); i_se++) {
1548  // Check for the special case that ssd.f_grid f_grid have only one
1549  // element. If identical, that's fine. If not, throw error.
1550  if (scat_data_raw[i_ss][i_se].f_grid.nelem() == 1 && nf == 1)
1551  if (!is_same_within_epsilon(scat_data_raw[i_ss][i_se].f_grid[0],
1552  f_grid[0],
1553  2 * DBL_EPSILON)) {
1554  ostringstream os;
1555  os << "There is a problem with the grids for the following "
1556  << "interpolation:\n"
1557  << which_interpolation << "\n"
1558  << "If original grid has only 1 element, the new grid must also have\n"
1559  << "only a single element and hold the same value as the original grid.";
1560  throw runtime_error(os.str());
1561  }
1562 
1563  // check with extrapolation
1564  chk_interpolation_grids(which_interpolation,
1565  scat_data_raw[i_ss][i_se].f_grid,
1566  f_grid,
1567  interp_order);
1568  }
1569  }
1570 
1571  //Initialise scat_data
1572  scat_data.resize(scat_data_raw.nelem());
1573 
1574  // Loop over the included scattering species
1575  for (Index i_ss = 0; i_ss < scat_data_raw.nelem(); i_ss++) {
1576  const Index N_se = scat_data_raw[i_ss].nelem();
1577 
1578  //Initialise scat_data
1579  scat_data[i_ss].resize(N_se);
1580 
1581  // Loop over the included scattering elements
1582  for (Index i_se = 0; i_se < N_se; i_se++) {
1583  //Stuff that doesn't need interpolating
1584  PART_TYPE = scat_data_raw[i_ss][i_se].ptype;
1585  F_DATAGRID = f_grid;
1586  T_DATAGRID = scat_data_raw[i_ss][i_se].T_grid;
1587  ZA_DATAGRID = scat_data_raw[i_ss][i_se].za_grid;
1588  AA_DATAGRID = scat_data_raw[i_ss][i_se].aa_grid;
1589 
1590  //Sizing SSD data containers
1591  PHA_MAT_DATA.resize(nf,
1592  scat_data_raw[i_ss][i_se].pha_mat_data.nvitrines(),
1593  scat_data_raw[i_ss][i_se].pha_mat_data.nshelves(),
1594  scat_data_raw[i_ss][i_se].pha_mat_data.nbooks(),
1595  scat_data_raw[i_ss][i_se].pha_mat_data.npages(),
1596  scat_data_raw[i_ss][i_se].pha_mat_data.nrows(),
1597  scat_data_raw[i_ss][i_se].pha_mat_data.ncols());
1598  EXT_MAT_DATA.resize(nf,
1599  scat_data_raw[i_ss][i_se].ext_mat_data.nbooks(),
1600  scat_data_raw[i_ss][i_se].ext_mat_data.npages(),
1601  scat_data_raw[i_ss][i_se].ext_mat_data.nrows(),
1602  scat_data_raw[i_ss][i_se].ext_mat_data.ncols());
1603  ABS_VEC_DATA.resize(nf,
1604  scat_data_raw[i_ss][i_se].abs_vec_data.nbooks(),
1605  scat_data_raw[i_ss][i_se].abs_vec_data.npages(),
1606  scat_data_raw[i_ss][i_se].abs_vec_data.nrows(),
1607  scat_data_raw[i_ss][i_se].abs_vec_data.ncols());
1608 
1609  const bool single_se_fgrid =
1610  (scat_data_raw[i_ss][i_se].f_grid.nelem() == 1);
1611  if (!single_se_fgrid) {
1612  // Gridpositions:
1613  ArrayOfGridPosPoly freq_gp(nf);
1614  ;
1615  gridpos_poly(
1616  freq_gp, scat_data_raw[i_ss][i_se].f_grid, f_grid, interp_order);
1617 
1618  // Interpolation weights:
1619  Matrix itw(nf, interp_order + 1);
1620  interpweights(itw, freq_gp);
1621 
1622  //Phase matrix data
1623  for (Index t_index = 0;
1624  t_index < scat_data_raw[i_ss][i_se].pha_mat_data.nvitrines();
1625  t_index++) {
1626  for (Index i_za_sca = 0;
1627  i_za_sca < scat_data_raw[i_ss][i_se].pha_mat_data.nshelves();
1628  i_za_sca++) {
1629  for (Index i_aa_sca = 0;
1630  i_aa_sca < scat_data_raw[i_ss][i_se].pha_mat_data.nbooks();
1631  i_aa_sca++) {
1632  for (Index i_za_inc = 0;
1633  i_za_inc < scat_data_raw[i_ss][i_se].pha_mat_data.npages();
1634  i_za_inc++) {
1635  for (Index i_aa_inc = 0;
1636  i_aa_inc < scat_data_raw[i_ss][i_se].pha_mat_data.nrows();
1637  i_aa_inc++) {
1638  for (Index i = 0;
1639  i < scat_data_raw[i_ss][i_se].pha_mat_data.ncols();
1640  i++) {
1641  interp(scat_data[i_ss][i_se].pha_mat_data(joker,
1642  t_index,
1643  i_za_sca,
1644  i_aa_sca,
1645  i_za_inc,
1646  i_aa_inc,
1647  i),
1648  itw,
1649  scat_data_raw[i_ss][i_se].pha_mat_data(joker,
1650  t_index,
1651  i_za_sca,
1652  i_aa_sca,
1653  i_za_inc,
1654  i_aa_inc,
1655  i),
1656  freq_gp);
1657  }
1658  }
1659  }
1660  }
1661  }
1662  }
1663 
1664  //Extinction matrix data
1665  for (Index t_index = 0;
1666  t_index < scat_data_raw[i_ss][i_se].ext_mat_data.nbooks();
1667  t_index++) {
1668  for (Index i_za_sca = 0;
1669  i_za_sca < scat_data_raw[i_ss][i_se].ext_mat_data.npages();
1670  i_za_sca++) {
1671  for (Index i_aa_sca = 0;
1672  i_aa_sca < scat_data_raw[i_ss][i_se].ext_mat_data.nrows();
1673  i_aa_sca++) {
1674  for (Index i = 0;
1675  i < scat_data_raw[i_ss][i_se].ext_mat_data.ncols();
1676  i++) {
1677  interp(scat_data[i_ss][i_se].ext_mat_data(
1678  joker, t_index, i_za_sca, i_aa_sca, i),
1679  itw,
1680  scat_data_raw[i_ss][i_se].ext_mat_data(
1681  joker, t_index, i_za_sca, i_aa_sca, i),
1682  freq_gp);
1683  }
1684  }
1685  }
1686  }
1687 
1688  //Absorption vector data
1689  for (Index t_index = 0;
1690  t_index < scat_data_raw[i_ss][i_se].abs_vec_data.nbooks();
1691  t_index++) {
1692  for (Index i_za_sca = 0;
1693  i_za_sca < scat_data_raw[i_ss][i_se].abs_vec_data.npages();
1694  i_za_sca++) {
1695  for (Index i_aa_sca = 0;
1696  i_aa_sca < scat_data_raw[i_ss][i_se].abs_vec_data.nrows();
1697  i_aa_sca++) {
1698  for (Index i = 0;
1699  i < scat_data_raw[i_ss][i_se].abs_vec_data.ncols();
1700  i++) {
1701  interp(scat_data[i_ss][i_se].abs_vec_data(
1702  joker, t_index, i_za_sca, i_aa_sca, i),
1703  itw,
1704  scat_data_raw[i_ss][i_se].abs_vec_data(
1705  joker, t_index, i_za_sca, i_aa_sca, i),
1706  freq_gp);
1707  }
1708  }
1709  }
1710  }
1711  } else {
1712  assert(nf == 1);
1713  // we do only have one f_grid value in old and new data (and they have
1714  // been confirmed to be the same), hence only need to copy over/reassign
1715  // the data.
1716  scat_data[i_ss][i_se].pha_mat_data =
1717  scat_data_raw[i_ss][i_se].pha_mat_data;
1718  scat_data[i_ss][i_se].ext_mat_data =
1719  scat_data_raw[i_ss][i_se].ext_mat_data;
1720  scat_data[i_ss][i_se].abs_vec_data =
1721  scat_data_raw[i_ss][i_se].abs_vec_data;
1722  }
1723  }
1724  }
1725 }
1726 
1727 /* Workspace method: Doxygen documentation will be auto-generated */
1729  const Index& i_ss,
1730  const Numeric& T,
1731  const Index& interp_order,
1732  const Index& phamat_only,
1733  const Numeric& threshold,
1734  const Verbosity&) {
1735  // We are directly acting on the scat_data entries, modifying them
1736  // individually. That is, we don't need to resize these arrays. Only the
1737  // pha_mat and probably ext_mat and abs_vec Tensors (in the latter case also
1738  // T_grid!).
1739 
1740  // Check that species i_ss exists at all in scat_data
1741  const Index nss = scat_data.nelem();
1742  if (nss <= i_ss) {
1743  ostringstream os;
1744  os << "Can not T-reduce scattering species #" << i_ss << ".\n"
1745  << "*scat_data* contains only " << nss << " scattering species.";
1746  throw runtime_error(os.str());
1747  }
1748 
1749  // Loop over the included scattering elements
1750  for (Index i_se = 0; i_se < scat_data[i_ss].nelem(); i_se++) {
1751  // At very first check validity of the scatt elements ptype (so far we only
1752  // handle PTYPE_TOTAL_RND and PTYPE_AZIMUTH_RND).
1754  ostringstream os;
1755  os << "Only ptypes " << PTYPE_TOTAL_RND << " and " << PTYPE_AZIMUTH_RND
1756  << " can be handled.\n"
1757  << "Scattering element #" << i_se << " has ptype " << PART_TYPE << ".";
1758  throw runtime_error(os.str());
1759  }
1760 
1761  // If ssd.T_grid already has only a single point, we do nothing.
1762  // This is not necessarily expected behaviour. BUT, it is in line with
1763  // previous use (that if nT==1, then assume ssd constant in T).
1764  Index nT = T_DATAGRID.nelem();
1765  if (nT > 1) {
1766  // Check, that we not have data that has already been T-reduced (in
1767  // pha_mat only. complete ssd T-reduce should have been sorted away
1768  // already above).
1769  if (PHA_MAT_DATA.nvitrines() != nT) {
1770  ostringstream os;
1771  os << "Single scattering data of scat element #" << i_se
1772  << " of scat species #" << i_ss << "\n"
1773  << "seems to have undergone some temperature grid manipulation in\n"
1774  << "*pha_mat_data* already. That can not be done twice!";
1775  throw runtime_error(os.str());
1776  }
1777 
1778  // Check that ext_mat and abs_vec have the same temp dimensions as T_grid.
1779  // This should always be true, if not it's a bug not a user mistake, hence
1780  // use assert.
1781  assert(EXT_MAT_DATA.nbooks() == nT and ABS_VEC_DATA.nbooks() == nT);
1782 
1783  // Check that T_grid is consistent with requested interpolation order
1784  ostringstream ost;
1785  ost << "Scattering data temperature interpolation for\n"
1786  << "scat element #" << i_se << " of scat species #" << i_ss << ".";
1787  chk_interpolation_grids(ost.str(), T_DATAGRID, T, interp_order);
1788 
1789  // Gridpositions:
1790  GridPosPoly gp_T;
1791  gridpos_poly(gp_T, T_DATAGRID, T, interp_order);
1792  Vector itw(interp_order + 1);
1793  interpweights(itw, gp_T);
1794 
1795  //Sizing of temporary SSD data containers
1796  Tensor7 phamat_tmp(PHA_MAT_DATA.nlibraries(),
1797  1,
1798  PHA_MAT_DATA.nshelves(),
1799  PHA_MAT_DATA.nbooks(),
1800  PHA_MAT_DATA.npages(),
1801  PHA_MAT_DATA.nrows(),
1802  PHA_MAT_DATA.ncols(),
1803  0.);
1804  Tensor5 extmat_tmp(EXT_MAT_DATA.nshelves(),
1805  1,
1806  EXT_MAT_DATA.npages(),
1807  EXT_MAT_DATA.nrows(),
1808  EXT_MAT_DATA.ncols(),
1809  0.);
1810  Tensor5 absvec_tmp(ABS_VEC_DATA.nshelves(),
1811  1,
1812  ABS_VEC_DATA.npages(),
1813  ABS_VEC_DATA.nrows(),
1814  ABS_VEC_DATA.ncols(),
1815  0.);
1816 
1817  // a1) temp interpol of pha mat
1818  //We have to apply the interpolation separately for each of the pha_mat
1819  //entries, i.e. loop over all remaining size dimensions
1820  //We don't apply any transformation here, but interpolate the actual
1821  //stored ssd (i.e. not the 4x4matrices, but the 7-16 elements separately).
1822  for (Index i_f = 0; i_f < PHA_MAT_DATA.nlibraries(); i_f++)
1823  for (Index i_za1 = 0; i_za1 < PHA_MAT_DATA.nshelves(); i_za1++)
1824  for (Index i_aa1 = 0; i_aa1 < PHA_MAT_DATA.nbooks(); i_aa1++)
1825  for (Index i_za2 = 0; i_za2 < PHA_MAT_DATA.npages(); i_za2++)
1826  for (Index i_aa2 = 0; i_aa2 < PHA_MAT_DATA.nrows(); i_aa2++)
1827  for (Index i_st = 0; i_st < PHA_MAT_DATA.ncols(); i_st++)
1828  phamat_tmp(i_f, 0, i_za1, i_aa1, i_za2, i_aa2, i_st) =
1829  interp(itw,
1830  PHA_MAT_DATA(
1831  i_f, joker, i_za1, i_aa1, i_za2, i_aa2, i_st),
1832  gp_T);
1833 
1834  // a2) temp interpol of ext and abs.
1835  //We do that regardless of whether they should be reduced or not, because
1836  //we need them also for norm checking / renorming.
1837  for (Index i_f = 0; i_f < EXT_MAT_DATA.nshelves(); i_f++)
1838  for (Index i_za = 0; i_za < EXT_MAT_DATA.npages(); i_za++)
1839  for (Index i_aa = 0; i_aa < EXT_MAT_DATA.nrows(); i_aa++) {
1840  for (Index i_st = 0; i_st < EXT_MAT_DATA.ncols(); i_st++)
1841  extmat_tmp(i_f, 0, i_za, i_aa, i_st) =
1842  interp(itw, EXT_MAT_DATA(i_f, joker, i_za, i_aa, i_st), gp_T);
1843  for (Index i_st = 0; i_st < ABS_VEC_DATA.ncols(); i_st++)
1844  absvec_tmp(i_f, 0, i_za, i_aa, i_st) =
1845  interp(itw, ABS_VEC_DATA(i_f, joker, i_za, i_aa, i_st), gp_T);
1846  }
1847 
1848  // Norm & other consistency checks.
1849  // All done separately for PTYPE_TOTAL_RND and PTYPE_AZIMUTH_RND (the
1850  // latter needs to loop over scat_za_inc).
1851  //
1852  // b) calculate norm of T-reduced pha mat
1853  // c) check pha mat norm vs. sca xs from ext-abs at T_interpol
1854  // d) Ensure that T-reduced data is consistent/representative of all data.
1855  // and throw error/disallow reduction if sca xs varying too much.
1856  // d1) in case of pha_mat only reduction, the scat xs (pha_mat norm) needs
1857  // to be consistent with the sca xs from over the ext/abs T_grid. This is
1858  // essentially an energy conservation issue. That is, we should be as
1859  // strict here as with pha_mat norm deviations in general (however, should
1860  // we allow the norm at T_grid to deviate by a threshold from the
1861  // T_interpol norm (that should be a little looser) or from the ext-abs
1862  // derived expected norm?).
1863  // d2) in case of all-ssp reduction, the data should still be
1864  // representative. b)&c) ensure data consistency in itself, making this
1865  // rather an error on the SSP as such. Hence, we might be a little more
1866  // loose here.
1867  // d) the resulting check for d1) and d2) is the same (ext-abs sca xs at
1868  // T_interpol vs ext-abs sca xs at T_grid), but we use different
1869  // thresholds.
1870  //
1871  // FIXME?
1872  // Regarding b)&c) should we also calc norm of original-T pha mats? To get
1873  // a measure how strong they already deviate from expected norm (As we can
1874  // not be more exact here than what is already in the original data...).
1875  // On the other hand, a certain accuracy should be guaranteed from
1876  // scat_dataCheck already.
1877  // Hence, for now we skip that (but maybe added later when proves
1878  // necessary).
1879  //
1880  // FIXME?
1881  // Regarding d1), we could alternatively make sure here that norm at
1882  // T_interpol is good. And later on ignore any deviations between norm and
1883  // ext-abs sca xs and instead blindly renorm to expected norm (would that
1884  // be ok? correct norm here, doesn't imply correct norm at whatever scat
1885  // angle grids the user is applying. for that, we could in place also calc
1886  // the original-data norm. but that might be expensive (as we can't do
1887  // that from ext-abs sca xs, because we don't know to which T that refers.
1888  // that would go away if we'd actually store pha_mat normed to 1 or 4Pi.
1889  // but that's prob not going to happen. is it? Another option would be to
1890  // introduce an additional T_grid, eg T_grid_phamat.). which we actually
1891  // want to avoid :-/
1892 
1893  Numeric this_threshold;
1894  String errmsg;
1895  if (phamat_only) {
1896  this_threshold = threshold;
1897  errmsg =
1898  "T-reduced *pha_mat_data* norm (=sca xs) deviates too "
1899  "much from non-reduced *ext_mat_data* and *abs_vec_data*:";
1900  } else {
1901  this_threshold = 2 * threshold;
1902  errmsg =
1903  "T-reduced *scat_data* deviates too much from original "
1904  "*scat_data*:";
1905  }
1906 
1907  // The norm-check code is copied and slightly adapted from scat_dataCheck.
1908  // Might be better to make a functon out of this and use in both places
1909  // for consistency.
1910  //
1911  // FIXME: no checks on higher Stokes elements are done. Should there?
1912  // Which?
1913  switch (PART_TYPE) {
1914  case PTYPE_TOTAL_RND: {
1915  for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1916  // b) calculate norm of T-reduced pha mat
1918  phamat_tmp(f, 0, joker, 0, 0, 0, 0), ZA_DATAGRID);
1919  Numeric Cext_data = extmat_tmp(f, 0, 0, 0, 0);
1920  //Numeric Cabs = Cext_data - Csca;
1921  Numeric Cabs_data = absvec_tmp(f, 0, 0, 0, 0);
1922  Numeric Csca_data = Cext_data - Cabs_data;
1923 
1924  /*
1925  cout << " Coefficients in data: "
1926  << "Cext: " << Cext_data << " Cabs: " << Cabs_data
1927  << " Csca: " << Csca_data << "\n"
1928  << " Calculated coefficients: "
1929  << "Cabs calc: " << Cabs
1930  << " Csca calc: " << Csca << "\n"
1931  << " Deviations "
1932  << "Cabs: " << 1e2*Cabs/Cabs_data-1e2
1933  << "% Csca: " << 1e2*Csca/Csca_data-1e2
1934  << "% Alb: " << (Csca-Csca_data)/Cext_data << "\n";
1935  */
1936 
1937  // c) check pha mat norm vs. sca xs from ext-abs at T_interpol (as
1938  // albedo dev check)
1939  if (abs(Csca - Csca_data) / Cext_data > threshold) {
1940  ostringstream os;
1941  os << " Deviations in T-reduced scat_data too large:\n"
1942  << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
1943  << " at nominal (actual) albedo of " << Csca_data / Cext_data
1944  << " (" << Csca / Cext_data << ").\n"
1945  << " Problem occurs for scattering element #" << i_se
1946  << " at " << f << ".frequency!\n";
1947  throw runtime_error(os.str());
1948  }
1949  Numeric norm_dev = (Csca - Csca) / Cext_data;
1950 
1951  // d) Ensure that T-reduced data is consistent/representative of all data.
1952  // below use theoretical (ext-abs derived) sca xs as reference.
1953  Csca = Csca_data;
1954  for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1955  Cext_data = EXT_MAT_DATA(f, t, 0, 0, 0);
1956  Csca_data = Cext_data - ABS_VEC_DATA(f, t, 0, 0, 0);
1957  Numeric xs_dev = (Csca - Csca_data) / Cext_data;
1958  if (abs(norm_dev + (Csca - Csca_data) / Cext_data) >
1959  this_threshold)
1960  cout << "Accumulated deviation (abs(" << norm_dev << "+"
1961  << xs_dev << ")=" << abs(norm_dev + xs_dev)
1962  << " exceeding threshold (" << this_threshold << ").\n";
1963  if (abs(Csca - Csca_data) / Cext_data > this_threshold) {
1964  ostringstream os;
1965  os << " " << errmsg << "\n"
1966  << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
1967  << " at nominal (actual) albedo of " << Csca_data / Cext_data
1968  << " (" << Csca / Cext_data << ").\n"
1969  << " Problem occurs for scattering element #" << i_se
1970  << " at " << f << ".frequency and " << t
1971  << ".temperature!\n";
1972  throw runtime_error(os.str());
1973  }
1974  }
1975  }
1976  break;
1977  }
1978 
1979  case PTYPE_AZIMUTH_RND: {
1980  for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1981  for (Index iza = 0; iza < ABS_VEC_DATA.npages(); iza++) {
1982  // b) calculate norm of T-reduced pha mat
1983  Numeric Csca = 2 * AngIntegrate_trapezoid(
1984  phamat_tmp(f, 0, joker, joker, iza, 0, 0),
1985  ZA_DATAGRID,
1986  AA_DATAGRID);
1987  Numeric Cext_data = extmat_tmp(f, 0, iza, 0, 0);
1988  //Numeric Cabs = Cext_data - Csca;
1989  Numeric Cabs_data = absvec_tmp(f, 0, iza, 0, 0);
1990  Numeric Csca_data = Cext_data - Cabs_data;
1991 
1992  /*
1993  cout << " Coefficients in data: "
1994  << "Cext: " << Cext_data << " Cabs: " << Cabs_data
1995  << " Csca: " << Csca_data << "\n"
1996  << " Calculated coefficients: "
1997  << "Cabs calc: " << Cabs
1998  << " Csca calc: " << Csca << "\n"
1999  << " Deviations "
2000  << "Cabs: " << 1e2*Cabs/Cabs_data-1e2
2001  << "% Csca: " << 1e2*Csca/Csca_data-1e2
2002  << "% Alb: " << (Csca-Csca_data)/Cext_data << "\n";
2003  */
2004 
2005  // c) check pha mat norm vs. sca xs from ext-abs at T_interpol (as
2006  // albedo dev check)
2007  if (abs(Csca - Csca_data) / Cext_data > threshold) {
2008  ostringstream os;
2009  os << " Deviations in T-reduced scat_data too large:\n"
2010  << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
2011  << " at nominal (actual) albedo of " << Csca_data / Cext_data
2012  << " (" << Csca / Cext_data << ").\n"
2013  << " Problem occurs for scattering element #" << i_se
2014  << " at " << f << ".frequency, and " << iza
2015  << ". incident polar angle!\n";
2016  throw runtime_error(os.str());
2017  }
2018 
2019  // d) Ensure that T-reduced data is consistent/representative of all data.
2020  // below use theoretical (ext-abs derived) sca xs as reference.
2021  Csca = Csca_data;
2022  for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
2023  Cext_data = EXT_MAT_DATA(f, t, 0, 0, 0);
2024  Csca_data = Cext_data - ABS_VEC_DATA(f, t, 0, 0, 0);
2025  if (abs(Csca - Csca_data) / Cext_data > this_threshold) {
2026  ostringstream os;
2027  os << " " << errmsg << "\n"
2028  << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
2029  << " at nominal (actual) albedo of "
2030  << Csca_data / Cext_data << " (" << Csca / Cext_data
2031  << ").\n"
2032  << " Problem occurs for scattering element #" << i_se
2033  << " at " << f << ".frequency and " << t
2034  << ".temperature, and " << iza
2035  << ". incident polar angle!\n";
2036  throw runtime_error(os.str());
2037  }
2038  }
2039  }
2040  }
2041  break;
2042  }
2043 
2044  default: {
2045  // other ptype cases already excluded above. i.e. we shouldn't end up
2046  // here. If we do, that's a bug.
2047  assert(0);
2048  }
2049  }
2050 
2051  PHA_MAT_DATA = phamat_tmp;
2052  //We don't need to reset the scat element's grids!
2053  //Except for T_grid in the case that we reduce ALL three ssd variables.
2054  if (!phamat_only) {
2055  T_DATAGRID.resize(1);
2056  T_DATAGRID = T;
2057  EXT_MAT_DATA = extmat_tmp;
2058  ABS_VEC_DATA = absvec_tmp;
2059  }
2060  }
2061  }
2062 }
2063 
2064 /* Workspace method: Doxygen documentation will be auto-generated */
2066  const ArrayOfArrayOfSingleScatteringData& scat_data,
2067  const Vector& f_grid,
2068  const Index& f_index,
2069  const Verbosity&) {
2070  //Extrapolation factor:
2071  //const Numeric extpolfac = 0.5;
2072 
2073  // Check, whether single scattering data contains the right frequencies:
2074  // The check was changed to allow extrapolation at the boundaries of the
2075  // frequency grid.
2076  for (Index h = 0; h < scat_data.nelem(); h++) {
2077  for (Index i = 0; i < scat_data[h].nelem(); i++) {
2078  // check with extrapolation
2079  chk_interpolation_grids("scat_data.f_grid to f_grid",
2080  scat_data[h][i].f_grid,
2081  f_grid[f_index]);
2082  }
2083  }
2084 
2085  //Initialise scat_data_mono
2086  scat_data_mono.resize(scat_data.nelem());
2087 
2088  // Loop over the included scattering species
2089  for (Index i_ss = 0; i_ss < scat_data.nelem(); i_ss++) {
2090  const Index N_se = scat_data[i_ss].nelem();
2091 
2092  //Initialise scat_data_mono
2093  scat_data_mono[i_ss].resize(N_se);
2094 
2095  // Loop over the included scattering elements
2096  for (Index i_se = 0; i_se < N_se; i_se++) {
2097  // Gridpositions:
2098  GridPos freq_gp;
2099  gridpos(freq_gp, F_DATAGRID, f_grid[f_index]);
2100 
2101  // Interpolation weights:
2102  Vector itw(2);
2103  interpweights(itw, freq_gp);
2104 
2105  //Stuff that doesn't need interpolating
2106  scat_data_mono[i_ss][i_se].ptype = PART_TYPE;
2107  scat_data_mono[i_ss][i_se].f_grid.resize(1);
2108  scat_data_mono[i_ss][i_se].f_grid = f_grid[f_index];
2109  scat_data_mono[i_ss][i_se].T_grid = scat_data[i_ss][i_se].T_grid;
2110  scat_data_mono[i_ss][i_se].za_grid = ZA_DATAGRID;
2111  scat_data_mono[i_ss][i_se].aa_grid = AA_DATAGRID;
2112 
2113  //Phase matrix data
2114  scat_data_mono[i_ss][i_se].pha_mat_data.resize(1,
2115  PHA_MAT_DATA.nvitrines(),
2116  PHA_MAT_DATA.nshelves(),
2117  PHA_MAT_DATA.nbooks(),
2118  PHA_MAT_DATA.npages(),
2119  PHA_MAT_DATA.nrows(),
2120  PHA_MAT_DATA.ncols());
2121 
2122  for (Index t_index = 0; t_index < PHA_MAT_DATA.nvitrines(); t_index++) {
2123  for (Index i_za_sca = 0; i_za_sca < PHA_MAT_DATA.nshelves();
2124  i_za_sca++) {
2125  for (Index i_aa_sca = 0; i_aa_sca < PHA_MAT_DATA.nbooks();
2126  i_aa_sca++) {
2127  for (Index i_za_inc = 0; i_za_inc < PHA_MAT_DATA.npages();
2128  i_za_inc++) {
2129  for (Index i_aa_inc = 0; i_aa_inc < PHA_MAT_DATA.nrows();
2130  i_aa_inc++) {
2131  for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++) {
2132  scat_data_mono[i_ss][i_se].pha_mat_data(
2133  0, t_index, i_za_sca, i_aa_sca, i_za_inc, i_aa_inc, i) =
2134  interp(itw,
2136  t_index,
2137  i_za_sca,
2138  i_aa_sca,
2139  i_za_inc,
2140  i_aa_inc,
2141  i),
2142  freq_gp);
2143  }
2144  }
2145  }
2146  }
2147  }
2148  //Extinction matrix data
2149  scat_data_mono[i_ss][i_se].ext_mat_data.resize(1,
2150  T_DATAGRID.nelem(),
2151  EXT_MAT_DATA.npages(),
2152  EXT_MAT_DATA.nrows(),
2153  EXT_MAT_DATA.ncols());
2154  for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages(); i_za_sca++) {
2155  for (Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
2156  i_aa_sca++) {
2157  //
2158  // Interpolation of extinction matrix:
2159  //
2160  for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++) {
2161  scat_data_mono[i_ss][i_se].ext_mat_data(
2162  0, t_index, i_za_sca, i_aa_sca, i) =
2163  interp(itw,
2164  EXT_MAT_DATA(joker, t_index, i_za_sca, i_aa_sca, i),
2165  freq_gp);
2166  }
2167  }
2168  }
2169  //Absorption vector data
2170  scat_data_mono[i_ss][i_se].abs_vec_data.resize(1,
2171  T_DATAGRID.nelem(),
2172  ABS_VEC_DATA.npages(),
2173  ABS_VEC_DATA.nrows(),
2174  ABS_VEC_DATA.ncols());
2175  for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages(); i_za_sca++) {
2176  for (Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
2177  i_aa_sca++) {
2178  //
2179  // Interpolation of absorption vector:
2180  //
2181  for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++) {
2182  scat_data_mono[i_ss][i_se].abs_vec_data(
2183  0, t_index, i_za_sca, i_aa_sca, i) =
2184  interp(itw,
2185  ABS_VEC_DATA(joker, t_index, i_za_sca, i_aa_sca, i),
2186  freq_gp);
2187  }
2188  }
2189  }
2190  }
2191  }
2192  }
2193 }
2194 
2195 /* Workspace method: Doxygen documentation will be auto-generated */
2197  const ArrayOfArrayOfSingleScatteringData& scat_data,
2198  const Index& f_index,
2199  const Verbosity&) {
2200  //Initialise scat_data_mono
2201  scat_data_mono.resize(scat_data.nelem());
2202 
2203  // Loop over the included scattering species
2204  for (Index i_ss = 0; i_ss < scat_data.nelem(); i_ss++) {
2205  const Index N_se = scat_data[i_ss].nelem();
2206 
2207  //Initialise scat_data_mono
2208  scat_data_mono[i_ss].resize(N_se);
2209 
2210  // Loop over the included scattering elements
2211  for (Index i_se = 0; i_se < N_se; i_se++) {
2212  Index nf = F_DATAGRID.nelem();
2213  if (nf == 1) {
2214  scat_data_mono[i_ss][i_se] = scat_data[i_ss][i_se];
2215  } else {
2216  //Stuff that doesn't need interpolating
2217  scat_data_mono[i_ss][i_se].ptype = PART_TYPE;
2218  scat_data_mono[i_ss][i_se].T_grid = T_DATAGRID;
2219  scat_data_mono[i_ss][i_se].za_grid = ZA_DATAGRID;
2220  scat_data_mono[i_ss][i_se].aa_grid = AA_DATAGRID;
2221 
2222  scat_data_mono[i_ss][i_se].f_grid.resize(1);
2223  scat_data_mono[i_ss][i_se].f_grid = F_DATAGRID[f_index];
2224 
2225  Index this_f_index;
2226 
2227  //Phase matrix data
2228  /*scat_data_mono[i_ss][i_se].pha_mat_data.resize(1,
2229  PHA_MAT_DATA.nvitrines(),
2230  PHA_MAT_DATA.nshelves(),
2231  PHA_MAT_DATA.nbooks(),
2232  PHA_MAT_DATA.npages(),
2233  PHA_MAT_DATA.nrows(),
2234  PHA_MAT_DATA.ncols());*/
2235  this_f_index = (PHA_MAT_DATA.nlibraries() == 1) ? 0 : f_index;
2236  scat_data_mono[i_ss][i_se].pha_mat_data = PHA_MAT_DATA(
2237  Range(this_f_index, 1), joker, joker, joker, joker, joker, joker);
2238 
2239  //Extinction matrix data
2240  /*scat_data_mono[i_ss][i_se].ext_mat_data.resize(1, T_DATAGRID.nelem(),
2241  EXT_MAT_DATA.npages(),
2242  EXT_MAT_DATA.nrows(),
2243  EXT_MAT_DATA.ncols());*/
2244  this_f_index = (EXT_MAT_DATA.nshelves() == 1) ? 0 : f_index;
2245  scat_data_mono[i_ss][i_se].ext_mat_data =
2246  EXT_MAT_DATA(Range(this_f_index, 1), joker, joker, joker, joker);
2247 
2248  //Absorption vector data
2249  /*scat_data_mono[i_ss][i_se].abs_vec_data.resize(1, T_DATAGRID.nelem(),
2250  ABS_VEC_DATA.npages(),
2251  ABS_VEC_DATA.nrows(),
2252  ABS_VEC_DATA.ncols());*/
2253  this_f_index = (ABS_VEC_DATA.nshelves() == 1) ? 0 : f_index;
2254  scat_data_mono[i_ss][i_se].abs_vec_data =
2255  ABS_VEC_DATA(Range(this_f_index, 1), joker, joker, joker, joker);
2256  }
2257  }
2258  }
2259 }
2260 
2261 /* Workspace method: Doxygen documentation will be auto-generated */
2262 void opt_prop_sptFromMonoData( // Output and Input:
2263  ArrayOfPropagationMatrix& ext_mat_spt,
2264  ArrayOfStokesVector& abs_vec_spt,
2265  // Input:
2266  const ArrayOfArrayOfSingleScatteringData& scat_data_mono,
2267  const Vector& za_grid,
2268  const Vector& aa_grid,
2269  const Index& za_index, // propagation directions
2270  const Index& aa_index,
2271  const Numeric& rtp_temperature,
2272  const Tensor4& pnd_field,
2273  const Index& scat_p_index,
2274  const Index& scat_lat_index,
2275  const Index& scat_lon_index,
2276  const Verbosity& verbosity) {
2277  DEBUG_ONLY(const Index N_se_total = TotalNumberOfElements(scat_data_mono);)
2278  const Index stokes_dim = ext_mat_spt[0].StokesDimensions();
2279  const Numeric za_sca = za_grid[za_index];
2280  const Numeric aa_sca = aa_grid[aa_index];
2281 
2282  if (stokes_dim > 4 or stokes_dim < 1) {
2283  throw runtime_error(
2284  "The dimension of the stokes vector \n"
2285  "must be 1,2,3 or 4");
2286  }
2287 
2288  assert(ext_mat_spt.nelem() == N_se_total);
2289  assert(abs_vec_spt.nelem() == N_se_total);
2290 
2291  // Check that we do indeed have scat_data_mono here. Only checking the first
2292  // scat element, assuming the other elements have been processed in the same
2293  // manner. That's save against having scat_data here if that originated from
2294  // scat_data_raw reading routines (ScatSpecies/Element*Add/Read), it's not safe
2295  // against data read by ReadXML directly or if scat_data(_raw) has been (partly)
2296  // produced from scat_data_singleTmatrix. That would be too costly here,
2297  // though.
2298  // Also, we can't check here whether data is at the correct frequency since we
2299  // don't know f_grid and f_index here (we could pass it in, though).
2300  if (scat_data_mono[0][0].f_grid.nelem() > 1) {
2301  ostringstream os;
2302  os << "Scattering data seems to be *scat_data* (several freq points),\n"
2303  << "but *scat_data_mono* (1 freq point only) is expected here.";
2304  throw runtime_error(os.str());
2305  }
2306 
2307  // Initialisation
2308  for (auto& pm : ext_mat_spt) pm.SetZero();
2309  for (auto& av : abs_vec_spt) av.SetZero();
2310 
2311  GridPos t_gp;
2312 
2313  Vector itw(2);
2314 
2315  Index i_se_flat = 0;
2316  // Loop over the included scattering elements
2317  for (Index i_ss = 0; i_ss < scat_data_mono.nelem(); i_ss++) {
2318  // Loop over the included scattering elements
2319  for (Index i_se = 0; i_se < scat_data_mono[i_ss].nelem(); i_se++) {
2320  // If the particle number density at a specific point in the
2321  // atmosphere for the i_se scattering element is zero, we don't need
2322  // to do the transformation!
2323  if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
2324  PND_LIMIT) {
2325  // First we have to transform the data from the coordinate system
2326  // used in the database (depending on the kind of ptype) to the
2327  // laboratory coordinate system.
2328 
2329  //
2330  // Do the transformation into the laboratory coordinate system.
2331  //
2332  // Extinction matrix:
2333  //
2334  Index ext_npages = scat_data_mono[i_ss][i_se].ext_mat_data.npages();
2335  Index ext_nrows = scat_data_mono[i_ss][i_se].ext_mat_data.nrows();
2336  Index ext_ncols = scat_data_mono[i_ss][i_se].ext_mat_data.ncols();
2337  Index abs_npages = scat_data_mono[i_ss][i_se].abs_vec_data.npages();
2338  Index abs_nrows = scat_data_mono[i_ss][i_se].abs_vec_data.nrows();
2339  Index abs_ncols = scat_data_mono[i_ss][i_se].abs_vec_data.ncols();
2340 
2341  //Check that scattering data temperature range covers required temperature
2342  ConstVectorView t_grid = scat_data_mono[i_ss][i_se].T_grid;
2343 
2344  if (t_grid.nelem() > 1) {
2345  ostringstream os;
2346  os << "In opt_prop_sptFromMonoData.\n"
2347  << "The temperature grid of the scattering data does not\n"
2348  << "cover the atmospheric temperature at cloud location.\n"
2349  << "The data should include the value T = " << rtp_temperature
2350  << " K.";
2351  chk_interpolation_grids(os.str(), t_grid, rtp_temperature);
2352 
2353  //interpolate over temperature
2354  Tensor3 ext_mat_data1temp(ext_npages, ext_nrows, ext_ncols);
2355  gridpos(t_gp, t_grid, rtp_temperature);
2356  interpweights(itw, t_gp);
2357  for (Index i_p = 0; i_p < ext_npages; i_p++) {
2358  for (Index i_r = 0; i_r < ext_nrows; i_r++) {
2359  for (Index i_c = 0; i_c < ext_ncols; i_c++) {
2360  ext_mat_data1temp(i_p, i_r, i_c) =
2361  interp(itw,
2362  scat_data_mono[i_ss][i_se].ext_mat_data(
2363  0, joker, i_p, i_r, i_c),
2364  t_gp);
2365  }
2366  }
2367  }
2368  ext_matTransform(ext_mat_spt[i_se_flat],
2369  ext_mat_data1temp,
2370  scat_data_mono[i_ss][i_se].za_grid,
2371  scat_data_mono[i_ss][i_se].aa_grid,
2372  scat_data_mono[i_ss][i_se].ptype,
2373  za_sca,
2374  aa_sca,
2375  verbosity);
2376  } else {
2377  ext_matTransform(ext_mat_spt[i_se_flat],
2378  scat_data_mono[i_ss][i_se].ext_mat_data(
2379  0, 0, joker, joker, joker),
2380  scat_data_mono[i_ss][i_se].za_grid,
2381  scat_data_mono[i_ss][i_se].aa_grid,
2382  scat_data_mono[i_ss][i_se].ptype,
2383  za_sca,
2384  aa_sca,
2385  verbosity);
2386  }
2387  //
2388  // Absorption vector:
2389  //
2390 
2391  if (t_grid.nelem() > 1) {
2392  Tensor3 abs_vec_data1temp(abs_npages, abs_nrows, abs_ncols);
2393  //interpolate over temperature
2394  for (Index i_p = 0; i_p < abs_npages; i_p++) {
2395  for (Index i_r = 0; i_r < abs_nrows; i_r++) {
2396  for (Index i_c = 0; i_c < abs_ncols; i_c++) {
2397  abs_vec_data1temp(i_p, i_r, i_c) =
2398  interp(itw,
2399  scat_data_mono[i_ss][i_se].abs_vec_data(
2400  0, joker, i_p, i_r, i_c),
2401  t_gp);
2402  }
2403  }
2404  }
2405  abs_vecTransform(abs_vec_spt[i_se_flat],
2406  abs_vec_data1temp,
2407  scat_data_mono[i_ss][i_se].za_grid,
2408  scat_data_mono[i_ss][i_se].aa_grid,
2409  scat_data_mono[i_ss][i_se].ptype,
2410  za_sca,
2411  aa_sca,
2412  verbosity);
2413  } else {
2414  abs_vecTransform(abs_vec_spt[i_se_flat],
2415  scat_data_mono[i_ss][i_se].abs_vec_data(
2416  0, 0, joker, joker, joker),
2417  scat_data_mono[i_ss][i_se].za_grid,
2418  scat_data_mono[i_ss][i_se].aa_grid,
2419  scat_data_mono[i_ss][i_se].ptype,
2420  za_sca,
2421  aa_sca,
2422  verbosity);
2423  }
2424  }
2425 
2426  i_se_flat++;
2427  }
2428  }
2429 }
2430 
2431 /* Workspace method: Doxygen documentation will be auto-generated */
2432 void pha_mat_sptFromMonoData( // Output:
2433  Tensor5& pha_mat_spt,
2434  // Input:
2435  const ArrayOfArrayOfSingleScatteringData& scat_data_mono,
2436  const Index& doit_za_grid_size,
2437  const Vector& aa_grid,
2438  const Index& za_index, // propagation directions
2439  const Index& aa_index,
2440  const Numeric& rtp_temperature,
2441  const Tensor4& pnd_field,
2442  const Index& scat_p_index,
2443  const Index& scat_lat_index,
2444  const Index& scat_lon_index,
2445  const Verbosity& verbosity) {
2446  Vector za_grid;
2447  nlinspace(za_grid, 0, 180, doit_za_grid_size);
2448 
2449  const Index N_se_total = TotalNumberOfElements(scat_data_mono);
2450  if (N_se_total != pnd_field.nbooks()) {
2451  ostringstream os;
2452  os << "Total number of scattering elements in *scat_data_mono* "
2453  << "inconsistent with size of pnd_field.";
2454  throw runtime_error(os.str());
2455  }
2456  // as pha_mat_spt is typically initialized from pnd_field, this theoretically
2457  // checks the same as the runtime_error above. Still, we keep it to be on the
2458  // save side.
2459  assert(pha_mat_spt.nshelves() == N_se_total);
2460 
2461  const Index stokes_dim = pha_mat_spt.ncols();
2462  if (stokes_dim > 4 || stokes_dim < 1) {
2463  throw runtime_error(
2464  "The dimension of the stokes vector \n"
2465  "must be 1,2,3 or 4");
2466  }
2467 
2468  // Check that we do indeed have scat_data_mono here. Only checking the first
2469  // scat element, assuming the other elements have been processed in the same
2470  // manner. That's save against having scat_data here if that originated from
2471  // scat_data_raw reading routines (ScatSpecies/Element*Add/Read), it's not safe
2472  // against data read by ReadXML directly or if scat_data(_raw) has been (partly)
2473  // produced from scat_data_singleTmatrix. That would be too costly here,
2474  // though.
2475  // Also, we can't check here whether data is at the correct frequency since we
2476  // don't know f_grid and f_index here (we could pass it in, though).
2477  if (scat_data_mono[0][0].f_grid.nelem() > 1) {
2478  ostringstream os;
2479  os << "Scattering data seems to be *scat_data* (several freq points),\n"
2480  << "but *scat_data_mono* (1 freq point only) is expected here.";
2481  throw runtime_error(os.str());
2482  }
2483 
2484  GridPos T_gp = {0, {0, 1}}, Tred_gp;
2485  Vector itw(2);
2486 
2487  // Initialisation
2488  pha_mat_spt = 0.;
2489 
2490  Index i_se_flat = 0;
2491  for (Index i_ss = 0; i_ss < scat_data_mono.nelem(); i_ss++) {
2492  for (Index i_se = 0; i_se < scat_data_mono[i_ss].nelem(); i_se++) {
2493  // If the particle number density at a specific point in the
2494  // atmosphere for scattering element i_se is zero, we don't need to
2495  // do the transformation!
2496  if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
2497  PND_LIMIT) {
2498  // Temporary phase matrix which includes all temperatures.
2499  Index nT = scat_data_mono[i_ss][i_se].pha_mat_data.nvitrines();
2500  Tensor3 pha_mat_spt_tmp(nT, pha_mat_spt.nrows(), pha_mat_spt.ncols());
2501 
2502  pha_mat_spt_tmp = 0.;
2503 
2504  Index ti = -1;
2505  if (nT == 1) // just 1 T_grid element
2506  {
2507  ti = 0;
2508  } else if (rtp_temperature < 0.) // coding for 'not interpolate, but
2509  // pick one temperature'
2510  {
2511  if (rtp_temperature > -10.) // lowest T-point
2512  {
2513  ti = 0;
2514  } else if (rtp_temperature > -20.) // highest T-point
2515  {
2516  ti = nT - 1;
2517  } else // median T-point
2518  {
2519  ti = nT / 2;
2520  }
2521  } else {
2522  ostringstream os;
2523  os << "In pha_mat_sptFromMonoData.\n"
2524  << "The temperature grid of the scattering data does not\n"
2525  << "cover the atmospheric temperature at cloud location.\n"
2526  << "The data should include the value T = " << rtp_temperature
2527  << " K.";
2529  os.str(), scat_data_mono[i_ss][i_se].T_grid, rtp_temperature);
2530 
2531  // Gridpositions:
2532  gridpos(T_gp, scat_data_mono[i_ss][i_se].T_grid, rtp_temperature);
2533  gridpos_copy(Tred_gp, T_gp);
2534  Tred_gp.idx = 0;
2535  // Interpolation weights:
2536  interpweights(itw, Tred_gp);
2537  }
2538 
2539  // Do the transformation into the laboratory coordinate system.
2540  for (Index za_inc_idx = 0; za_inc_idx < doit_za_grid_size;
2541  za_inc_idx++) {
2542  for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
2543  aa_inc_idx++) {
2544  if (ti < 0) // Temperature interpolation
2545  {
2546  for (Index t_idx = 0; t_idx < 2; t_idx++) {
2548  pha_mat_spt_tmp(t_idx, joker, joker),
2549  scat_data_mono[i_ss][i_se].pha_mat_data(
2550  0, t_idx + T_gp.idx, joker, joker, joker, joker, joker),
2551  scat_data_mono[i_ss][i_se].za_grid,
2552  scat_data_mono[i_ss][i_se].aa_grid,
2553  scat_data_mono[i_ss][i_se].ptype,
2554  za_index,
2555  aa_index,
2556  za_inc_idx,
2557  aa_inc_idx,
2558  za_grid,
2559  aa_grid,
2560  verbosity);
2561  }
2562 
2563  for (Index i = 0; i < stokes_dim; i++) {
2564  for (Index j = 0; j < stokes_dim; j++) {
2565  pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, i, j) =
2566  interp(itw, pha_mat_spt_tmp(joker, i, j), Tred_gp);
2567  }
2568  }
2569  } else // no temperature interpolation required
2570  {
2572  pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, joker, joker),
2573  scat_data_mono[i_ss][i_se].pha_mat_data(
2574  0, ti, joker, joker, joker, joker, joker),
2575  scat_data_mono[i_ss][i_se].za_grid,
2576  scat_data_mono[i_ss][i_se].aa_grid,
2577  scat_data_mono[i_ss][i_se].ptype,
2578  za_index,
2579  aa_index,
2580  za_inc_idx,
2581  aa_inc_idx,
2582  za_grid,
2583  aa_grid,
2584  verbosity);
2585  }
2586  }
2587  }
2588  }
2589 
2590  i_se_flat++;
2591  }
2592  }
2593 }
2594 
2595 /* Workspace method: Doxygen documentation will be auto-generated */
2597  Tensor5& pha_mat_spt,
2598  // Input:
2599  const ArrayOfArrayOfSingleScatteringData& scat_data,
2600  const Index& scat_data_checked,
2601  const Vector& za_grid,
2602  const Vector& aa_grid,
2603  const Index& za_index, // propagation directions
2604  const Index& aa_index,
2605  const Index& f_index,
2606  const Numeric& rtp_temperature,
2607  const Tensor4& pnd_field,
2608  const Index& scat_p_index,
2609  const Index& scat_lat_index,
2610  const Index& scat_lon_index,
2611  const Verbosity& verbosity) {
2612  if (scat_data_checked != 1)
2613  throw runtime_error(
2614  "The scattering data must be flagged to have "
2615  "passed a consistency check (scat_data_checked=1).");
2616 
2617  const Index stokes_dim = pha_mat_spt.ncols();
2618  if (stokes_dim > 4 || stokes_dim < 1) {
2619  throw runtime_error(
2620  "The dimension of the stokes vector \n"
2621  "must be 1,2,3 or 4");
2622  }
2623 
2624  // Determine total number of scattering elements
2625  const Index N_se_total = TotalNumberOfElements(scat_data);
2626  if (N_se_total != pnd_field.nbooks()) {
2627  ostringstream os;
2628  os << "Total number of scattering elements in scat_data "
2629  << "inconsistent with size of pnd_field.";
2630  throw runtime_error(os.str());
2631  }
2632  // as pha_mat_spt is typically initialized from pnd_field, this theoretically
2633  // checks the same as the runtime_error above. Still, we keep it to be on the
2634  // save side.
2635  assert(pha_mat_spt.nshelves() == N_se_total);
2636 
2637  const Index N_ss = scat_data.nelem();
2638 
2639  // Phase matrix in laboratory coordinate system. Dimensions:
2640  // [frequency, za_inc, aa_inc, stokes_dim, stokes_dim]
2641  Tensor5 pha_mat_data_int;
2642 
2643  Index this_f_index;
2644 
2645  Index i_se_flat = 0;
2646  // Loop over scattering species
2647  for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
2648  const Index N_se = scat_data[i_ss].nelem();
2649 
2650  // Loop over the included scattering elements
2651  for (Index i_se = 0; i_se < N_se; i_se++) {
2652  // If the particle number density at a specific point in the
2653  // atmosphere for the i_se scattering element is zero, we don't need
2654  // to do the transfromation!
2655  if (abs(pnd_field(
2656  i_se_flat, scat_p_index, scat_lat_index, scat_lon_index)) >
2657  PND_LIMIT) {
2658  // First we have to transform the data from the coordinate system
2659  // used in the database (depending on the kind of ptype) to the
2660  // laboratory coordinate system.
2661 
2662  // Resize the variables for the interpolated data (1freq, 1T):
2663  pha_mat_data_int.resize(PHA_MAT_DATA.nshelves(),
2664  PHA_MAT_DATA.nbooks(),
2665  PHA_MAT_DATA.npages(),
2666  PHA_MAT_DATA.nrows(),
2667  PHA_MAT_DATA.ncols());
2668 
2669  // Frequency extraction and temperature interpolation
2670 
2671  // Gridpositions and interpolation weights;
2672  GridPos t_gp;
2673  Vector itw;
2674  Index this_T_index = -1;
2675  if (PHA_MAT_DATA.nvitrines() == 1) {
2676  this_T_index = 0;
2677  } else if (rtp_temperature < 0.) // coding for 'not interpolate, but
2678  // pick one temperature'
2679  {
2680  if (rtp_temperature > -10.) // lowest T-point
2681  {
2682  this_T_index = 0;
2683  } else if (rtp_temperature > -20.) // highest T-point
2684  {
2685  this_T_index = PHA_MAT_DATA.nvitrines() - 1;
2686  } else // median T-point
2687  {
2688  this_T_index = PHA_MAT_DATA.nvitrines() / 2;
2689  }
2690  } else {
2691  ostringstream os;
2692  os << "In pha_mat_sptFromScat_data.\n"
2693  << "The temperature grid of the scattering data does not\n"
2694  << "cover the atmospheric temperature at cloud location.\n"
2695  << "The data should include the value T = " << rtp_temperature
2696  << " K.";
2697  chk_interpolation_grids(os.str(), T_DATAGRID, rtp_temperature);
2698 
2699  gridpos(t_gp, T_DATAGRID, rtp_temperature);
2700 
2701  // Interpolation weights:
2702  itw.resize(2);
2703  interpweights(itw, t_gp);
2704  }
2705 
2706  if (PHA_MAT_DATA.nlibraries() == 1)
2707  this_f_index = 0;
2708  else
2709  this_f_index = f_index;
2710 
2711  if (this_T_index < 0) {
2712  // Interpolation of scattering matrix:
2713  for (Index i_za_sca = 0; i_za_sca < PHA_MAT_DATA.nshelves();
2714  i_za_sca++)
2715  for (Index i_aa_sca = 0; i_aa_sca < PHA_MAT_DATA.nbooks();
2716  i_aa_sca++)
2717  for (Index i_za_inc = 0; i_za_inc < PHA_MAT_DATA.npages();
2718  i_za_inc++)
2719  for (Index i_aa_inc = 0; i_aa_inc < PHA_MAT_DATA.nrows();
2720  i_aa_inc++)
2721  for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++)
2722  pha_mat_data_int(
2723  i_za_sca, i_aa_sca, i_za_inc, i_aa_inc, i) =
2724  interp(itw,
2725  PHA_MAT_DATA(this_f_index,
2726  joker,
2727  i_za_sca,
2728  i_aa_sca,
2729  i_za_inc,
2730  i_aa_inc,
2731  i),
2732  t_gp);
2733  } else {
2734  pha_mat_data_int = PHA_MAT_DATA(
2735  this_f_index, this_T_index, joker, joker, joker, joker, joker);
2736  /*
2737  for (Index i_za_sca = 0;
2738  i_za_sca < PHA_MAT_DATA.nshelves(); i_za_sca++)
2739  for (Index i_aa_sca = 0;
2740  i_aa_sca < PHA_MAT_DATA.nbooks(); i_aa_sca++)
2741  for (Index i_za_inc = 0;
2742  i_za_inc < PHA_MAT_DATA.npages(); i_za_inc++)
2743  for (Index i_aa_inc = 0;
2744  i_aa_inc < PHA_MAT_DATA.nrows(); i_aa_inc++)
2745  for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++)
2746  // Interpolation of phase matrix:
2747  pha_mat_data_int(i_za_sca, i_aa_sca,
2748  i_za_inc, i_aa_inc, i) =
2749  PHA_MAT_DATA(this_f_index, this_T_index,
2750  i_za_sca, i_aa_sca,
2751  */
2752  }
2753 
2754  // Do the transformation into the laboratory coordinate system.
2755  for (Index za_inc_idx = 0; za_inc_idx < za_grid.nelem();
2756  za_inc_idx++) {
2757  for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
2758  aa_inc_idx++) {
2760  pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, joker, joker),
2761  pha_mat_data_int,
2762  ZA_DATAGRID,
2763  AA_DATAGRID,
2764  PART_TYPE,
2765  za_index,
2766  aa_index,
2767  za_inc_idx,
2768  aa_inc_idx,
2769  za_grid,
2770  aa_grid,
2771  verbosity);
2772  }
2773  }
2774  }
2775  i_se_flat++;
2776  }
2777  }
2778 }
2779 
2780 /* Workspace method: Doxygen documentation will be auto-generated */
2781 void ScatSpeciesMerge( //WS Output:
2782  Tensor4& pnd_field,
2785  ArrayOfString& scat_species,
2786  Index& cloudbox_checked,
2787  //WS Input:
2788  const Index& atmosphere_dim,
2789  const Index& cloudbox_on,
2790  const ArrayOfIndex& cloudbox_limits,
2791  const Tensor3& t_field,
2792  const Tensor3& z_field,
2793  const Matrix& z_surface,
2794  const Verbosity& /*verbosity*/) {
2795  // FIXME:
2796  // so far, this works for both scat_data and scat_data_raw. Needs to be
2797  // adapted, though, once we have WSM that can create Z/K/a with different
2798  // f/T dimensions than scat_data_single.f/T_grid.
2799 
2800  // cloudbox variable state should be ok before entering here
2801  if (!cloudbox_checked)
2802  throw std::runtime_error(
2803  "You must call *cloudbox_checkedCalc* before this method.");
2804  //however, we modify cloudbox variables. hence force re-checking the new
2805  //variables by resetting cloudbox_checked to False.
2806  cloudbox_checked = 0;
2807 
2808  if (atmosphere_dim != 1)
2809  throw std::runtime_error(
2810  "Merging scattering elements only works with a 1D atmoshere");
2811 
2812  // Cloudbox on/off?
2813  if (!cloudbox_on) {
2814  /* Must initialise pnd_field anyway; but empty */
2815  pnd_field.resize(0, 0, 0, 0);
2816  return;
2817  }
2818 
2819  // ------- setup new pnd_field and scat_data -------------------
2820  ArrayOfIndex limits(2);
2821  //pressure
2822  limits[0] = cloudbox_limits[0];
2823  limits[1] = cloudbox_limits[1] + 1;
2824 
2825  Tensor4 pnd_field_merged(
2826  limits[1] - limits[0], limits[1] - limits[0], 1, 1, 0.);
2827 
2828  ArrayOfArrayOfSingleScatteringData scat_data_merged;
2829  scat_data_merged.resize(1);
2830  scat_data_merged[0].resize(pnd_field_merged.nbooks());
2831  ArrayOfArrayOfScatteringMetaData scat_meta_merged;
2832  scat_meta_merged.resize(1);
2833  scat_meta_merged[0].resize(pnd_field_merged.nbooks());
2834  ArrayOfString scat_species_merged;
2835  scat_species_merged.resize(1);
2836  scat_species_merged[0] = "mergedfield-mergedpsd";
2837  for (Index sp = 0; sp < scat_data_merged[0].nelem(); sp++) {
2838  SingleScatteringData& this_part = scat_data_merged[0][sp];
2839  this_part.ptype = scat_data[0][0].ptype;
2840  this_part.description = "Merged scattering elements";
2841  this_part.f_grid = scat_data[0][0].f_grid;
2842  this_part.za_grid = scat_data[0][0].za_grid;
2843  this_part.aa_grid = scat_data[0][0].aa_grid;
2844  this_part.pha_mat_data.resize(scat_data[0][0].pha_mat_data.nlibraries(),
2845  1,
2846  scat_data[0][0].pha_mat_data.nshelves(),
2847  scat_data[0][0].pha_mat_data.nbooks(),
2848  scat_data[0][0].pha_mat_data.npages(),
2849  scat_data[0][0].pha_mat_data.nrows(),
2850  scat_data[0][0].pha_mat_data.ncols());
2851  this_part.ext_mat_data.resize(scat_data[0][0].ext_mat_data.nshelves(),
2852  1,
2853  scat_data[0][0].ext_mat_data.npages(),
2854  scat_data[0][0].ext_mat_data.nrows(),
2855  scat_data[0][0].ext_mat_data.ncols());
2856  this_part.abs_vec_data.resize(scat_data[0][0].abs_vec_data.nshelves(),
2857  1,
2858  scat_data[0][0].abs_vec_data.npages(),
2859  scat_data[0][0].abs_vec_data.nrows(),
2860  scat_data[0][0].abs_vec_data.ncols());
2861  this_part.pha_mat_data = 0.;
2862  this_part.ext_mat_data = 0.;
2863  this_part.abs_vec_data = 0.;
2864  this_part.T_grid.resize(1);
2865  this_part.T_grid[0] = t_field(sp, 0, 0);
2866 
2867  ScatteringMetaData& this_meta = scat_meta_merged[0][sp];
2868  ostringstream os;
2869  os << "Merged scattering element of cloudbox-level #" << sp;
2870  this_meta.description = os.str();
2871  this_meta.source = "ARTS internal";
2872  this_meta.refr_index = "Unknown";
2873  this_meta.mass = -1.;
2874  this_meta.diameter_max = -1.;
2875  this_meta.diameter_volume_equ = -1.;
2876  this_meta.diameter_area_equ_aerodynamical = -1.;
2877  }
2878 
2879  // Check that all scattering elements have same ptype and data dimensions
2880  SingleScatteringData& first_part = scat_data[0][0];
2881  for (Index i_ss = 0; i_ss < scat_data.nelem(); i_ss++) {
2882  for (Index i_se = 0; i_se < scat_data[i_ss].nelem(); i_se++) {
2883  SingleScatteringData& orig_part = scat_data[i_ss][i_se];
2884 
2885  if (orig_part.ptype != first_part.ptype)
2886  throw std::runtime_error(
2887  "All scattering elements must have the same type");
2888 
2889  if (orig_part.f_grid.nelem() != first_part.f_grid.nelem())
2890  throw std::runtime_error(
2891  "All scattering elements must have the same f_grid");
2892 
2893  if (!is_size(orig_part.pha_mat_data(
2894  joker, 0, joker, joker, joker, joker, joker),
2895  first_part.pha_mat_data.nlibraries(),
2896  first_part.pha_mat_data.nshelves(),
2897  first_part.pha_mat_data.nbooks(),
2898  first_part.pha_mat_data.npages(),
2899  first_part.pha_mat_data.nrows(),
2900  first_part.pha_mat_data.ncols()))
2901  throw std::runtime_error(
2902  "All scattering elements must have the same pha_mat_data size"
2903  " (except for temperature).");
2904 
2905  if (!is_size(orig_part.ext_mat_data(joker, 0, joker, joker, joker),
2906  first_part.ext_mat_data.nshelves(),
2907  first_part.ext_mat_data.npages(),
2908  first_part.ext_mat_data.nrows(),
2909  first_part.ext_mat_data.ncols()))
2910  throw std::runtime_error(
2911  "All scattering elements must have the same ext_mat_data size"
2912  " (except for temperature).");
2913 
2914  if (!is_size(orig_part.abs_vec_data(joker, 0, joker, joker, joker),
2915  first_part.abs_vec_data.nshelves(),
2916  first_part.abs_vec_data.npages(),
2917  first_part.abs_vec_data.nrows(),
2918  first_part.abs_vec_data.ncols()))
2919  throw std::runtime_error(
2920  "All scattering elements must have the same abs_vec_data size"
2921  " (except for temperature).");
2922  }
2923  }
2924 
2925  //----- Start pnd_field_merged and scat_data_array_merged calculations -----
2926 
2927  GridPos T_gp;
2928  Vector itw(2);
2929 
2930  Index nlevels = pnd_field_merged.nbooks();
2931  // loop over pressure levels in cloudbox
2932  for (Index i_lv = 0; i_lv < nlevels - 1; i_lv++) {
2933  pnd_field_merged(i_lv, i_lv, 0, 0) = 1.;
2934 
2935  SingleScatteringData& this_part = scat_data_merged[0][i_lv];
2936  for (Index i_ss = 0; i_ss < scat_data.nelem(); i_ss++) {
2937  for (Index i_se = 0; i_se < scat_data[i_ss].nelem(); i_se++) {
2938  SingleScatteringData& orig_part = scat_data[i_ss][i_se];
2939  const Index pnd_index = FlattenedIndex(scat_data, i_ss, i_se);
2940 
2941  // If the particle number density at a specific point in the
2942  // atmosphere for the i_se scattering element is zero, we don't
2943  // need to do the transformation!
2944  if (pnd_field(pnd_index, i_lv, 0, 0) > PND_LIMIT) //TRS
2945  {
2946  Numeric temperature = this_part.T_grid[0];
2947  if (orig_part.T_grid.nelem() > 1) {
2948  ostringstream os;
2949  os << "The temperature grid of the scattering data "
2950  << "does not cover the\n"
2951  << "atmospheric temperature at cloud location. "
2952  << "The data should\n"
2953  << "include the value T = " << temperature << " K.\n"
2954  << "Offending particle is scat_data[" << i_ss << "][" << i_se
2955  << "]:\n"
2956  << "Description: " << orig_part.description << "\n";
2957  chk_interpolation_grids(os.str(), orig_part.T_grid, temperature);
2958 
2959  // Gridpositions:
2960  gridpos(T_gp, orig_part.T_grid, temperature);
2961  // Interpolation weights:
2962  interpweights(itw, T_gp);
2963  }
2964 
2966 
2967  // Loop over frequencies
2968  for (Index i_f = 0; i_f < orig_part.pha_mat_data.nlibraries();
2969  i_f++) {
2970  // Loop over zenith angles
2971  for (Index i_za = 0; i_za < orig_part.ext_mat_data.npages();
2972  i_za++) {
2973  // Loop over azimuth angles
2974  for (Index i_aa = 0; i_aa < orig_part.ext_mat_data.nrows();
2975  i_aa++) {
2976  // Weighted sum of ext_mat_data and abs_vec_data
2977  if (orig_part.T_grid.nelem() == 1) {
2978  Vector v = orig_part.ext_mat_data(i_f, 0, i_za, i_aa, joker);
2979  v *= pnd_field(pnd_index, i_lv, 0, 0);
2980  this_part.ext_mat_data(i_f, 0, i_za, 0, joker) += v;
2981 
2982  v = orig_part.abs_vec_data(i_f, 0, i_za, i_aa, joker);
2983  v *= pnd_field(pnd_index, i_lv, 0, 0);
2984  this_part.abs_vec_data(i_f, 0, i_za, i_aa, joker) += v;
2985  } else {
2986  for (Index i = 0; i < orig_part.ext_mat_data.ncols(); i++) {
2987  // Temperature interpolation
2988  this_part.ext_mat_data(i_f, 0, i_za, i_aa, i) +=
2989  pnd_field(pnd_index, i_lv, 0, 0) *
2990  interp(
2991  itw,
2992  orig_part.ext_mat_data(i_f, joker, i_za, i_aa, i),
2993  T_gp);
2994  }
2995  for (Index i = 0; i < orig_part.abs_vec_data.ncols(); i++) {
2996  // Temperature interpolation
2997  this_part.abs_vec_data(i_f, 0, i_za, i_aa, i) +=
2998  pnd_field(pnd_index, i_lv, 0, 0) *
2999  interp(
3000  itw,
3001  orig_part.abs_vec_data(i_f, joker, i_za, i_aa, i),
3002  T_gp);
3003  }
3004  }
3005  }
3006  }
3007 
3009 
3010  // Loop over outgoing zenith angles
3011  for (Index i_za_out = 0;
3012  i_za_out < orig_part.pha_mat_data.nshelves();
3013  i_za_out++) {
3014  // Loop over outgoing azimuth angles
3015  for (Index i_aa_out = 0;
3016  i_aa_out < orig_part.pha_mat_data.nbooks();
3017  i_aa_out++) {
3018  // Loop over incoming zenith angles
3019  for (Index i_za_inc = 0;
3020  i_za_inc < orig_part.pha_mat_data.npages();
3021  i_za_inc++) {
3022  // Loop over incoming azimuth angles
3023  for (Index i_aa_inc = 0;
3024  i_aa_inc < orig_part.pha_mat_data.nrows();
3025  i_aa_inc++) {
3026  // Weighted sum of pha_mat_data
3027  if (orig_part.T_grid.nelem() == 1) {
3028  Vector v = orig_part.pha_mat_data(i_f,
3029  0,
3030  i_za_out,
3031  i_aa_out,
3032  i_za_inc,
3033  i_aa_inc,
3034  joker);
3035  v *= pnd_field(pnd_index, i_lv, 0, 0);
3036  this_part.pha_mat_data(i_f,
3037  0,
3038  i_za_out,
3039  i_aa_out,
3040  i_za_inc,
3041  i_aa_inc,
3042  joker) = v;
3043  } else {
3044  // Temperature interpolation
3045  for (Index i = 0; i < orig_part.pha_mat_data.ncols();
3046  i++) {
3047  this_part.pha_mat_data(i_f,
3048  0,
3049  i_za_out,
3050  i_aa_out,
3051  i_za_inc,
3052  i_aa_inc,
3053  i) +=
3054  pnd_field(pnd_index, i_lv, 0, 0) *
3055  interp(itw,
3056  orig_part.pha_mat_data(i_f,
3057  joker,
3058  i_za_out,
3059  i_aa_out,
3060  i_za_inc,
3061  i_aa_inc,
3062  i),
3063  T_gp);
3064  }
3065  }
3066  }
3067  }
3068  }
3069  }
3070  }
3071  }
3072  }
3073  }
3074  }
3075 
3076  // Set new pnd_field at lowest altitude to 0 if the cloudbox doesn't touch
3077  // the ground.
3078  // The consistency for the original pnd_field has already been ensured by
3079  // cloudbox_checkedCalc
3080  if (z_field(cloudbox_limits[0], 0, 0) > z_surface(0, 0))
3081  pnd_field_merged(0, 0, 0, 0) = 0.;
3082 
3083  pnd_field = pnd_field_merged;
3084  scat_data = scat_data_merged;
3085  scat_meta = scat_meta_merged;
3086  scat_species = scat_species_merged;
3087 }
3088 
3089 /* Workspace method: Doxygen documentation will be auto-generated */
3091  //WS Output:
3092  Vector& meta_param,
3093  //WS Input:
3094  const ArrayOfArrayOfScatteringMetaData& scat_meta,
3095  const String& meta_name,
3096  const Index& scat_species_index,
3097  const Verbosity& /*verbosity*/) {
3098  if (scat_species_index < 0) {
3099  ostringstream os;
3100  os << "scat_species_index can't be <0!";
3101  throw runtime_error(os.str());
3102  }
3103 
3104  const Index nss = scat_meta.nelem();
3105 
3106  // check that scat_meta actually has at least scat_species_index elements
3107  if (!(nss > scat_species_index)) {
3108  ostringstream os;
3109  os << "Can not extract data for scattering species #" << scat_species_index
3110  << "\n"
3111  << "because scat_meta has only " << nss << " elements.";
3112  throw runtime_error(os.str());
3113  }
3114 
3115  const Index nse = scat_meta[scat_species_index].nelem();
3116  meta_param.resize(nse);
3117 
3118  for (Index i = 0; i < nse; i++) {
3119  if (meta_name == "mass")
3120  meta_param[i] = scat_meta[scat_species_index][i].mass;
3121  else if (meta_name == "diameter_max")
3122  meta_param[i] = scat_meta[scat_species_index][i].diameter_max;
3123  else if (meta_name == "diameter_volume_equ")
3124  meta_param[i] = scat_meta[scat_species_index][i].diameter_volume_equ;
3125  else if (meta_name == "diameter_area_equ_aerodynamical")
3126  meta_param[i] =
3127  scat_meta[scat_species_index][i].diameter_area_equ_aerodynamical;
3128  else {
3129  ostringstream os;
3130  os << "Meta parameter \"" << meta_name << "\"is unknown.";
3131  throw runtime_error(os.str());
3132  }
3133  }
3134 }
3135 
3136 
Index npages() const
Returns the number of pages.
Definition: matpackV.cc:50
INDEX Index
The type to use for all integer numbers and indices.
Definition: matpack.h:39
void pha_mat_sptFromData(Tensor5 &pha_mat_spt, const ArrayOfArrayOfSingleScatteringData &scat_data, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Index &f_index, const Vector &f_grid, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: pha_mat_sptFromData.
void MultiplyAndAdd(const Numeric x, const PropagationMatrix &y)
Add a scaled version of the input.
Index nrows() const
Returns the number of rows.
Definition: matpackV.cc:53
void opt_prop_bulkCalc(PropagationMatrix &ext_mat, StokesVector &abs_vec, const ArrayOfPropagationMatrix &ext_mat_spt, const ArrayOfStokesVector &abs_vec_spt, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &)
WORKSPACE METHOD: opt_prop_bulkCalc.
The Agenda class.
Definition: agenda_class.h:44
Index nelem() const
Number of elements.
Definition: array.h:195
void resize(Index s, Index b, Index p, Index r, Index c)
Resize function.
Definition: matpackV.cc:1743
void scat_dataCheck(const ArrayOfArrayOfSingleScatteringData &scat_data, const String &check_type, const Numeric &threshold, const Verbosity &verbosity)
WORKSPACE METHOD: scat_dataCheck.
Declarations having to do with the four output streams.
void pha_mat_sptFromMonoData(Tensor5 &pha_mat_spt, const ArrayOfArrayOfSingleScatteringData &scat_data_mono, const Index &doit_za_grid_size, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: pha_mat_sptFromMonoData.
The Vector class.
Definition: matpackI.h:860
#define abs(x)
Numeric interp(ConstVectorView itw, ConstVectorView a, const GridPos &tc)
Red 1D Interpolate.
void scat_dataReduceT(ArrayOfArrayOfSingleScatteringData &scat_data, const Index &i_ss, const Numeric &T, const Index &interp_order, const Index &phamat_only, const Numeric &threshold, const Verbosity &)
WORKSPACE METHOD: scat_dataReduceT.
#define PND_LIMIT
#define ABS_VEC_DATA
The Tensor4 class.
Definition: matpackIV.h:421
The range class.
Definition: matpackI.h:160
Index nrows() const
Returns the number of rows.
Definition: matpackVII.cc:57
#define T_DATAGRID
#define ZA_DATAGRID
void opt_prop_sptFromData(ArrayOfPropagationMatrix &ext_mat_spt, ArrayOfStokesVector &abs_vec_spt, const ArrayOfArrayOfSingleScatteringData &scat_data, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Index &f_index, const Vector &f_grid, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: opt_prop_sptFromData.
This file contains basic functions to handle XML data files.
The Tensor7 class.
Definition: matpackVII.h:2382
Header file for interpolation.cc.
Index nbooks() const
Returns the number of books.
Definition: matpackV.cc:47
const Numeric RAD2DEG
Stokes vector is as Propagation matrix but only has 4 possible values.
Index nelem() const
Returns the number of elements.
Definition: matpackI.cc:51
Contains sorting routines.
void chk_interpolation_grids(const String &which_interpolation, ConstVectorView old_grid, ConstVectorView new_grid, const Index order, const Numeric &extpolfac, const bool islog)
Check interpolation grids.
Definition: check_input.cc:989
Structure to store a grid position.
Definition: interpolation.h:73
Numeric diameter_volume_equ
This file contains the definition of Array.
void opt_prop_sptFromMonoData(ArrayOfPropagationMatrix &ext_mat_spt, ArrayOfStokesVector &abs_vec_spt, const ArrayOfArrayOfSingleScatteringData &scat_data_mono, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: opt_prop_sptFromMonoData.
void pha_mat_spt_agendaExecute(Workspace &ws, Tensor5 &pha_mat_spt, const Index za_index, const Index scat_lat_index, const Index scat_lon_index, const Index scat_p_index, const Index aa_index, const Numeric rtp_temperature, const Agenda &input_agenda)
Definition: auto_md.cc:24645
The Tensor3 class.
Definition: matpackIII.h:339
Index TotalNumberOfElements(const Array< Array< base > > &aa)
Determine total number of elements in an ArrayOfArray.
Definition: array.h:343
The global header file for ARTS.
Index nshelves() const
Returns the number of shelves.
Definition: matpackV.cc:44
#define DEBUG_ONLY(...)
Definition: debug.h:36
void MultiplyAndAdd(const Numeric x, const PropagationMatrix &y)
Multiply input by scalar and add to this.
void ExtractFromMetaSingleScatSpecies(Vector &meta_param, const ArrayOfArrayOfScatteringMetaData &scat_meta, const String &meta_name, const Index &scat_species_index, const Verbosity &)
WORKSPACE METHOD: ExtractFromMetaSingleScatSpecies.
const Numeric DEG2RAD
bool is_same_within_epsilon(const Numeric &a, const Numeric &b, const Numeric &epsilon)
Check, if two numbers agree within a given epsilon.
Definition: logic.cc:351
_CS_string_type str() const
Definition: sstream.h:491
#define AA_DATAGRID
void toupper()
Convert to upper case.
Definition: mystring.h:74
The declarations of all the exception classes.
void abs_vecAddGas(StokesVector &abs_vec, const ArrayOfPropagationMatrix &propmat_clearsky, const Verbosity &)
WORKSPACE METHOD: abs_vecAddGas.
void nlinspace(Vector &x, const Numeric start, const Numeric stop, const Index n)
nlinspace
Definition: math_funcs.cc:231
#define EXT_MAT_DATA
#define CREATE_OUT1
Definition: messages.h:205
void pha_mat_sptFromScat_data(Tensor5 &pha_mat_spt, const ArrayOfArrayOfSingleScatteringData &scat_data, const Index &scat_data_checked, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Index &f_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: pha_mat_sptFromScat_data.
void gridpos(ArrayOfGridPos &gp, ConstVectorView old_grid, ConstVectorView new_grid, const Numeric &extpolfac)
Set up a grid position Array.
#define PART_TYPE
const Joker joker
NUMERIC Numeric
The type to use for all floating point numbers.
Definition: matpack.h:33
Index StokesDimensions() const
The stokes dimension of the propagation matrix.
The Matrix class.
Definition: matpackI.h:1193
Index nlibraries() const
Returns the number of libraries.
Definition: matpackVII.cc:42
void gridpos_poly(ArrayOfGridPosPoly &gp, ConstVectorView old_grid, ConstVectorView new_grid, const Index order, const Numeric &extpolfac)
The maximum difference from 1 that we allow for a sum check.
void gridpos_copy(GridPos &gp_new, const GridPos &gp_old)
gridpos_copy
void resize(Index p, Index r, Index c)
Resize function.
Definition: matpackIII.cc:664
Header file for logic.cc.
Numeric diameter_area_equ_aerodynamical
Index NumberOfFrequencies() const
The number of frequencies of the propagation matrix.
This can be used to make arrays out of anything.
Definition: array.h:40
void opt_prop_sptFromScat_data(ArrayOfPropagationMatrix &ext_mat_spt, ArrayOfStokesVector &abs_vec_spt, const ArrayOfArrayOfSingleScatteringData &scat_data, const Index &scat_data_checked, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Index &f_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: opt_prop_sptFromScat_data.
void ext_matTransform(PropagationMatrix &ext_mat_lab, ConstTensor3View ext_mat_data, ConstVectorView za_datagrid, ConstVectorView aa_datagrid, const PType &ptype, const Numeric &za_sca, const Numeric &aa_sca, const Verbosity &verbosity)
Transformation of extinction matrix.
void ScatSpeciesMerge(Tensor4 &pnd_field, ArrayOfArrayOfSingleScatteringData &scat_data, ArrayOfArrayOfScatteringMetaData &scat_meta, ArrayOfString &scat_species, Index &cloudbox_checked, const Index &atmosphere_dim, const Index &cloudbox_on, const ArrayOfIndex &cloudbox_limits, const Tensor3 &t_field, const Tensor3 &z_field, const Matrix &z_surface, const Verbosity &)
WORKSPACE METHOD: ScatSpeciesMerge.
void resize(Index n)
Resize function.
Definition: matpackI.cc:404
void pha_matCalc(Tensor4 &pha_mat, const Tensor5 &pha_mat_spt, const Tensor4 &pnd_field, const Index &atmosphere_dim, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &)
WORKSPACE METHOD: pha_matCalc.
void scat_data_monoExtract(ArrayOfArrayOfSingleScatteringData &scat_data_mono, const ArrayOfArrayOfSingleScatteringData &scat_data, const Index &f_index, const Verbosity &)
WORKSPACE METHOD: scat_data_monoExtract.
The Tensor6 class.
Definition: matpackVI.h:1088
void pha_matTransform(MatrixView pha_mat_lab, ConstTensor5View pha_mat_data, ConstVectorView za_datagrid, ConstVectorView aa_datagrid, const PType &ptype, const Index &za_sca_idx, const Index &aa_sca_idx, const Index &za_inc_idx, const Index &aa_inc_idx, ConstVectorView za_grid, ConstVectorView aa_grid, const Verbosity &verbosity)
Transformation of phase matrix.
A constant view of a Vector.
Definition: matpackI.h:476
Index nshelves() const
Returns the number of shelves.
Definition: matpackVII.cc:48
Index npages() const
Returns the number of pages.
Definition: matpackVII.cc:54
Workspace class.
Definition: workspace_ng.h:40
Index nbooks() const
Returns the number of books.
Definition: matpackIV.cc:57
Index FlattenedIndex(const Array< Array< base > > &aa, Index outer, Index inner=0)
Determine the index of an element in a flattened version of the array.
Definition: array.h:354
void scat_dataCalc(ArrayOfArrayOfSingleScatteringData &scat_data, const ArrayOfArrayOfSingleScatteringData &scat_data_raw, const Vector &f_grid, const Index &interp_order, const Verbosity &)
WORKSPACE METHOD: scat_dataCalc.
void SetZero()
Sets all data to zero.
Structure to store a grid position for higher order interpolation.
Index ncols() const
Returns the number of columns.
Definition: matpackV.cc:56
#define PHA_MAT_DATA
#define CREATE_OUT0
Definition: messages.h:204
bool is_size(ConstVectorView x, const Index &n)
Verifies that the size of x is l.
Definition: logic.cc:79
void DoitScatteringDataPrepare(Workspace &ws, ArrayOfTensor7 &pha_mat_sptDOITOpt, ArrayOfArrayOfSingleScatteringData &scat_data_mono, Tensor7 &pha_mat_doit, Vector &aa_grid, const Index &doit_za_grid_size, const ArrayOfArrayOfSingleScatteringData &scat_data, const Index &scat_data_checked, const Index &f_index, const Index &atmosphere_dim, const Index &stokes_dim, const Tensor3 &t_field, const ArrayOfIndex &cloudbox_limits, const Tensor4 &pnd_field, const Agenda &pha_mat_spt_agenda, const Verbosity &verbosity)
WORKSPACE METHOD: DoitScatteringDataPrepare.
void abs_vecTransform(StokesVector &abs_vec_lab, ConstTensor3View abs_vec_data, ConstVectorView za_datagrid, ConstVectorView aa_datagrid, const PType &ptype, const Numeric &za_sca, const Numeric &aa_sca, const Verbosity &verbosity)
Transformation of absorption vector.
Index ncols() const
Returns the number of columns.
Definition: matpackVII.cc:60
void pha_mat_sptFromDataDOITOpt(Tensor5 &pha_mat_spt, const ArrayOfTensor7 &pha_mat_sptDOITOpt, const ArrayOfArrayOfSingleScatteringData &scat_data_mono, const Index &doit_za_grid_size, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &)
WORKSPACE METHOD: pha_mat_sptFromDataDOITOpt.
const Numeric PI
Index ncols() const
Returns the number of columns.
Definition: matpackIV.cc:66
void ext_matAddGas(PropagationMatrix &ext_mat, const ArrayOfPropagationMatrix &propmat_clearsky, const Verbosity &)
WORKSPACE METHOD: ext_matAddGas.
void interpweights(VectorView itw, const GridPos &tc)
Red 1D interpolation weights.
void resize(Index l, Index v, Index s, Index b, Index p, Index r, Index c)
Resize function.
Definition: matpackVII.cc:5484
#define CREATE_OUT2
Definition: messages.h:206
Scattering database structure and functions.
Index nbooks() const
Returns the number of books.
Definition: matpackVII.cc:51
#define F_DATAGRID
void scat_data_monoCalc(ArrayOfArrayOfSingleScatteringData &scat_data_mono, const ArrayOfArrayOfSingleScatteringData &scat_data, const Vector &f_grid, const Index &f_index, const Verbosity &)
WORKSPACE METHOD: scat_data_monoCalc.
void resize(Index b, Index p, Index r, Index c)
Resize function.
Definition: matpackIV.cc:1064
Numeric AngIntegrate_trapezoid(ConstMatrixView Integrand, ConstVectorView za_grid, ConstVectorView aa_grid)
AngIntegrate_trapezoid.
Definition: math_funcs.cc:296
The Tensor5 class.
Definition: matpackV.h:506