diff --git a/modules/base/pymod/table.py b/modules/base/pymod/table.py index 2e5188ea15e3e3edc61072c60a6619fb2919fdeb..6eea1f01062c8bc8e744ba15f8f15f2926260102 100644 --- a/modules/base/pymod/table.py +++ b/modules/base/pymod/table.py @@ -134,7 +134,7 @@ class Table(object): tab=Table() If you want to add columns directly when creating the table, column names - and column types can be specified as follows + and *column types* can be specified as follows .. code-block:: python @@ -144,6 +144,17 @@ class Table(object): float and bool, respectively. There will be no data in the table and thus, the table will not contain any rows. + The following *column types* are supported: + + ======= ======== + name abbrev + ======= ======== + string s + float f + int i + bool b + ======= ======== + If you want to add data to the table in addition, use the following: .. code-block:: python @@ -354,7 +365,7 @@ class Table(object): def __str__(self): return self.ToString() - def _AddRowsFromDict(self, d, merge=False): + def _AddRowsFromDict(self, d, overwrite=False): # get column indices idxs = [self.GetColIndex(k) for k in d.keys()] @@ -377,11 +388,11 @@ class Table(object): new_row[idx] = self._Coerce(v, self.col_types[idx]) # partially overwrite existing row with new data - if merge: - merge_idx = self.GetColIndex(merge) + if overwrite: + overwrite_idx = self.GetColIndex(overwrite) added = False for i,r in enumerate(self.rows): - if r[merge_idx]==new_row[merge_idx]: + if r[overwrite_idx]==new_row[overwrite_idx]: for j,e in enumerate(self.rows[i]): if new_row[j]==None: new_row[j] = e @@ -389,22 +400,30 @@ class Table(object): added = True break - # if not merge or merge did not find appropriate row - if not merge or not added: + # if not overwrite or overwrite did not find appropriate row + if not overwrite or not added: self.rows.append(new_row) - def AddRow(self, data, merge=None): + def AddRow(self, data, overwrite=None): """ - Add a row to the table. *row* may either a dictionary in which case the keys - in the dictionary must match the column names. Columns not found in the dict - will be initialized to None. Alternatively, if data is a list-like object, - the row is initialized from the values in data. The number of items in data - must match the number of columns in the table. A :class:`ValuerError` is - raised otherwise. + Add a row to the table. + + *row* may either be a dictionary or a list-like object. + In the case of a dictionary the keys in the dictionary must match the column + names. Columns not found in the dict will be initialized to None. + Alternatively, if data is a list-like object, the row is initialized from + the values in data. The number of items in data must match the number of + columns in the table. A :class:`ValuerError` is raised otherwise. The values + are added in the order specified in the list, thus, the order of the data + must match the columns. + + If *overwrite* is set and not None (must be set to an existing column name), + an existing row is overwritten if the value of column *overwrite* matches. + If no matching row is found, a new row is appended to the table. """ if type(data)==dict: - self._AddRowsFromDict(data, merge) + self._AddRowsFromDict(data, overwrite) else: if len(data)!=len(self.col_names): msg='data array must have %d elements, not %d' @@ -412,17 +431,17 @@ class Table(object): new_row = [self._Coerce(v, t) for v, t in zip(data, self.col_types)] # fully overwrite existing row with new data - if merge: - merge_idx = self.GetColIndex(merge) + if overwrite: + overwrite_idx = self.GetColIndex(overwrite) added = False for i,r in enumerate(self.rows): - if r[merge_idx]==new_row[merge_idx]: + if r[overwrite_idx]==new_row[overwrite_idx]: self.rows[i] = new_row added = True break - # if not merge or merge did not find appropriate row - if not merge or not added: + # if not overwrite or overwrite did not find appropriate row + if not overwrite or not added: self.rows.append(new_row) def RemoveCol(self, col): @@ -584,9 +603,13 @@ class Table(object): """ Load table from stream or file with given name. + By default, the file format is *ost* (see below) and is *not* automatically + determined (e.g. from file extension). Thus, it *format* must be specified + for reading other file formats. + The following file formats are understood: - ost + - ost This is an ost-specific, but still human readable file format. The file (stream) must start with header line of the form @@ -599,11 +622,11 @@ class Table(object): data items are automatically converted to the column format. Lines starting with a '#' and empty lines are ignored. - pickle + - pickle Deserializes the table from a pickled byte stream - csv + - csv Reads the table from comma separated values stream. Since there is no explicit type information in the csv file, the column types are guessed, @@ -657,15 +680,15 @@ class Table(object): """ Allows to conveniently iterate over a selection of columns, e.g. - .. code-block::python + .. code-block:: python tab=Table.Load('...') - for col1, col in tab.Zip('col1', 'col2'): + for col1, col2 in tab.Zip('col1', 'col2'): print col1, col2 is a shortcut for - .. code-block::python + .. code-block:: python tab=Table.Load('...') for col1, col2 in zip(tab['col1'], tab['col2']): @@ -820,8 +843,8 @@ class Table(object): histtype='stepfilled', align='mid', x_title=None, y_title=None, title=None, clear=True, save=False): """ - Create a histogram of the data in col for the range x_range, split into - num_bins bins and plot it using matplot lib + Create a histogram of the data in col for the range *x_range*, split into + *num_bins* bins and plot it using matplot lib. """ try: import matplotlib.pyplot as plt @@ -1158,17 +1181,18 @@ class Table(object): raise - def Save(self, stream, format='ost', sep=','): + def Save(self, stream_or_filename, format='ost', sep=','): """ - Save the table to stream or filename + Save the table to stream or filename. For supported file formats, see + :meth:`Load` """ format=format.lower() if format=='ost': - return self._SaveOST(stream) + return self._SaveOST(stream_or_filename) if format=='csv': - return self._SaveCSV(stream, sep=sep) + return self._SaveCSV(stream_or_filename, sep=sep) if format=='pickle': - return self._SavePickle(stream) + return self._SavePickle(stream_or_filename) raise ValueError('unknown format "%s"' % format) def _SavePickle(self, stream): @@ -1317,7 +1341,9 @@ class Table(object): style='-', title=None, x_title=None, y_title=None, clear=True, save=None): ''' - Plot an enrichment curve using matplotlib + Plot an enrichment curve using matplotlib. + + For more information about parameters, see :meth:`ComputeEnrichment` ''' try: @@ -1417,7 +1443,9 @@ class Table(object): class_dir='-', class_cutoff=2.0): ''' Computes the area under the curve of the enrichment using the trapezoidal - rule + rule. + + For more information about parameters, see :meth:`ComputeEnrichment` ''' try: import numpy as np @@ -1439,23 +1467,27 @@ class Table(object): For this it is necessary, that the datapoints are classified into positive and negative points. This can be done in two ways: - - by using one 'bool' column (class_col) which contains True for positives + - by using one 'bool' column (*class_col*) which contains True for positives and False for negatives - - by using a non-bool column (class_col), a cutoff value (class_cutoff) - and the classification columns direction (class_dir). This will generate + - by using a non-bool column (*class_col*), a cutoff value (*class_cutoff*) + and the classification columns direction (*class_dir*). This will generate the classification on the fly - * if class_dir=='-': values in the classification column that are - less than or equal to class_cutoff will be counted - as positives - * if class_dir=='+': values in the classification column that are - larger than or equal to class_cutoff will be counted + - if *class_dir* =='-': values in the classification column that are + less than or equal to *class_cutoff* will be counted as positives + - if *class_dir* =='+': values in the classification column that are + larger than or equal to *class_cutoff* will be + counted as positives - During the calculation, the table will be sorted according to score_dir, + During the calculation, the table will be sorted according to *score_dir*, where a '-' values means smallest values first and therefore, the smaller the value, the better. + If *class_col* does not contain any positives (i.e. value is True (if column + is of type bool) or evaluated to True (if column is of type int or float + (depending on *class_dir* and *class_cutoff*))) the ROC is not defined and + the function will return *None*. ''' ALLOWED_DIR = ['+','-'] @@ -1516,7 +1548,9 @@ class Table(object): class_dir='-', class_cutoff=2.0): ''' Computes the area under the curve of the receiver operating characteristics - using the trapezoidal rule + using the trapezoidal rule. + + For more information about parameters, see :meth:`ComputeROC` ''' try: import numpy as np @@ -1536,7 +1570,9 @@ class Table(object): style='-', title=None, x_title=None, y_title=None, clear=True, save=None): ''' - Plot an ROC curve using matplotlib + Plot an ROC curve using matplotlib. + + For more information about parameters, see :meth:`ComputeROC` ''' try: @@ -1612,6 +1648,43 @@ class Table(object): return False return True + def Extend(self, tab, overwrite=None): + """ + Append each row of *tab* to the current table. The data is appended based + on the column names, thus the order of the table columns is *not* relevant, + only the header names. + + If there is a column in *tab* that is not present in the current table, + it is added to the current table and filled with *None* for all the rows + present in the current table. + + If the type of any column in *tab* is not the same as in the current table + a *TypeError* is raised. + + If *overwrite* is set and not None (must be set to an existing column name), + an existing row is overwritten if the value of column *overwrite* matches. + If no matching row is found, a new row is appended to the table. + """ + # add column to current table if it doesn't exist + for name,typ in zip(tab.col_names, tab.col_types): + if not name in self.col_names: + self.AddCol(name, typ) + + # check that column types are the same in current and new table + for name in self.col_names: + curr_type = self.col_types[self.GetColIndex(name)] + new_type = tab.col_types[tab.GetColIndex(name)] + if curr_type!=new_type: + raise TypeError('cannot extend table, column %s in new '%name +\ + 'table different type (%s) than in '%new_type +\ + 'current table (%s)'%curr_type) + + num_rows = len(tab.rows) + for i in range(0,num_rows): + row = tab.rows[i] + data = dict(zip(tab.col_names,row)) + self.AddRow(data, overwrite) + def Merge(table1, table2, by, only_matching=False): """ diff --git a/modules/base/tests/test_table.py b/modules/base/tests/test_table.py index 88a799dee114f5d5d2f9961d07707f74957faef5..ea823869c3e67cec719dce4b21794e005cb6f85e 100644 --- a/modules/base/tests/test_table.py +++ b/modules/base/tests/test_table.py @@ -62,9 +62,9 @@ class TestTable(unittest.TestCase): self.CompareColCount(tab, 3) self.CompareRowCount(tab, 0) self.CompareColTypes(tab, ['first','second', 'third'], 'sif') - tab.AddRow(['x',3, None], merge=None) - tab.AddRow(['foo',None, 2.2], merge=None) - tab.AddRow([None,9, 3.3], merge=None) + tab.AddRow(['x',3, None], overwrite=None) + tab.AddRow(['foo',None, 2.2], overwrite=None) + tab.AddRow([None,9, 3.3], overwrite=None) return tab def CompareRowCount(self, t, row_count): @@ -375,7 +375,7 @@ class TestTable(unittest.TestCase): tab = Table(['first'],'i') self.CompareColCount(tab, 1) self.CompareRowCount(tab, 0) - tab.AddRow([2], merge=None) + tab.AddRow([2], overwrite=None) self.CompareColCount(tab, 1) self.CompareRowCount(tab, 1) self.CompareColNames(tab, ['first']) @@ -394,7 +394,7 @@ class TestTable(unittest.TestCase): tab.AddCol('first', 'int') self.CompareColCount(tab, 1) self.CompareRowCount(tab, 0) - tab.AddRow([2], merge=None) + tab.AddRow([2], overwrite=None) self.CompareColCount(tab, 1) self.CompareRowCount(tab, 1) self.CompareColNames(tab, ['first']) @@ -414,7 +414,7 @@ class TestTable(unittest.TestCase): self.CompareColCount(tab, 2) self.CompareRowCount(tab, 0) self.CompareColTypes(tab, ['first','second'], 'si') - tab.AddRow(['x',3], merge=None) + tab.AddRow(['x',3], overwrite=None) self.CompareColCount(tab, 2) self.CompareRowCount(tab, 1) tab.AddCol('third', 'float', 3.141) @@ -441,9 +441,9 @@ class TestTable(unittest.TestCase): self.CompareColCount(tab, 3) self.CompareRowCount(tab, 0) self.CompareColTypes(tab, ['first','second', 'third'], 'sif') - tab.AddRow(['x',3, 1.0], merge=None) - tab.AddRow(['foo',6, 2.2], merge=None) - tab.AddRow(['bar',9, 3.3], merge=None) + tab.AddRow(['x',3, 1.0], overwrite=None) + tab.AddRow(['foo',6, 2.2], overwrite=None) + tab.AddRow(['bar',9, 3.3], overwrite=None) self.CompareColCount(tab, 3) self.CompareRowCount(tab, 3) self.CompareDataFromDict(tab, {'second': [3,6,9], 'first': ['x','foo','bar'], 'third': [1,2.2,3.3]}) @@ -466,9 +466,9 @@ class TestTable(unittest.TestCase): self.CompareColCount(tab, 3) self.CompareRowCount(tab, 0) self.CompareColTypes(tab, ['first','second', 'aaa'], 'sif') - tab.AddRow({'first':'x','second':3, 'aaa':1.0}, merge=None) - tab.AddRow({'aaa':2.2, 'second':6, 'first':'foo'}, merge=None) - tab.AddRow({'second':9, 'aaa':3.3, 'first':'bar'}, merge=None) + tab.AddRow({'first':'x','second':3, 'aaa':1.0}, overwrite=None) + tab.AddRow({'aaa':2.2, 'second':6, 'first':'foo'}, overwrite=None) + tab.AddRow({'second':9, 'aaa':3.3, 'first':'bar'}, overwrite=None) self.CompareColCount(tab, 3) self.CompareRowCount(tab, 3) self.CompareDataFromDict(tab, {'second': [3,6,9], 'first': ['x','foo','bar'], 'aaa': [1,2.2,3.3]}) @@ -490,9 +490,9 @@ class TestTable(unittest.TestCase): self.CompareColCount(tab, 1) self.CompareRowCount(tab, 0) self.CompareColTypes(tab, ['first'], 's') - tab.AddRow(['x'], merge=None) - tab.AddRow(['foo'], merge=None) - tab.AddRow(['bar'], merge=None) + tab.AddRow(['x'], overwrite=None) + tab.AddRow(['foo'], overwrite=None) + tab.AddRow(['bar'], overwrite=None) tab.AddCol('second', 'int') tab.AddCol('third', 'float', 3.141) self.CompareColCount(tab, 3) @@ -501,9 +501,9 @@ class TestTable(unittest.TestCase): 'first': ['x','foo','bar'], 'third': [3.141, 3.141, 3.141]}) - def testAddRowFromDictWithMerge(self): + def testAddRowFromDictWithOverwrite(self): ''' - add rows from dictionary with merge (i.e. overwrite third row with additional data) + add rows from dictionary with overwrite (i.e. overwrite third row with additional data) x foo bar ------------------ @@ -522,14 +522,14 @@ class TestTable(unittest.TestCase): self.CompareDataFromDict(tab, {'x': ['row1', 'row2', 'row3'], 'foo': [True, None, False], 'bar': [1, 2, None]}) - tab.AddRow({'x':'row3', 'bar':3}, merge='x') + tab.AddRow({'x':'row3', 'bar':3}, overwrite='x') self.CompareDataFromDict(tab, {'x': ['row1', 'row2', 'row3'], 'foo': [True, None, False], 'bar': [1, 2, 3]}) - def testAddRowFromListWithMerge(self): + def testAddRowFromListWithOverwrite(self): ''' - add rows from list with merge (i.e. overwrite third row with additional data) + add rows from list with overwrite (i.e. overwrite third row with additional data) x foo bar ------------------ @@ -549,7 +549,7 @@ class TestTable(unittest.TestCase): self.CompareDataFromDict(tab, {'x': ['row1', 'row2', 'row3'], 'foo': [True, None, False], 'bar': [1, 2, None]}) - tab.AddRow(['row3', True, 3], merge='x') + tab.AddRow(['row3', True, 3], overwrite='x') self.CompareDataFromDict(tab, {'x': ['row1', 'row2', 'row3'], 'foo': [True, None, True], 'bar': [1, 2, 3]}) @@ -1288,6 +1288,101 @@ class TestTable(unittest.TestCase): tab.AddRow([None,8, 2]) self.assertAlmostEquals(tab.SpearmanCorrel('second','third'), -0.316227766) + def testExtend(self): + ''' + first second third + ---------------------- + x 3 NA + foo NA 2.200 + NA 9 3.300 + ''' + + # simple extend of the same table + tab = self.CreateTestTable() + self.CompareDataFromDict(tab, {'first': ['x','foo',None], + 'second': [3,None,9], + 'third': [None,2.2,3.3]}) + + tab.Extend(tab) + self.CompareDataFromDict(tab, {'first': ['x','foo',None,'x','foo',None], + 'second': [3,None,9,3,None,9], + 'third': [None,2.2,3.3,None,2.2,3.3]}) + + # simple extend of different tables with the same data + tab = self.CreateTestTable() + tab2 = self.CreateTestTable() + tab.Extend(tab2) + self.CompareDataFromDict(tab, {'first': ['x','foo',None,'x','foo',None], + 'second': [3,None,9,3,None,9], + 'third': [None,2.2,3.3,None,2.2,3.3]}) + self.CompareDataFromDict(tab2, {'first': ['x','foo',None], + 'second': [3,None,9], + 'third': [None,2.2,3.3]}) + + # add additional columns to current table + tab = self.CreateTestTable() + tab2 = self.CreateTestTable() + tab2.AddCol('foo','i',[1,2,3]) + tab.Extend(tab2) + self.CompareDataFromDict(tab, {'first': ['x','foo',None,'x','foo',None], + 'second': [3,None,9,3,None,9], + 'third': [None,2.2,3.3,None,2.2,3.3], + 'foo': [None,None,None,1,2,3]}) + + # different order of the data + tab = self.CreateTestTable() + tab2 = Table(['third','second','first'], + 'fis', + third=[None,2.2,3.3], + first=['x','foo',None], + second=[3, None, 9]) + self.CompareDataFromDict(tab2, {'first': ['x','foo',None], + 'second': [3,None,9], + 'third': [None,2.2,3.3]}) + tab.Extend(tab2) + self.CompareDataFromDict(tab, {'first': ['x','foo',None,'x','foo',None], + 'second': [3,None,9,3,None,9], + 'third': [None,2.2,3.3,None,2.2,3.3]}) + + # with overwrite (additional column) + tab = self.CreateTestTable() + tab2 = self.CreateTestTable() + tab2.AddCol('foo','i',[1,2,3]) + tab.Extend(tab2, overwrite='first') + self.CompareDataFromDict(tab, {'first': ['x','foo',None], + 'second': [3,None,9], + 'third': [None,2.2,3.3], + 'foo': [1,2,3]}) + + # with overwrite (no matching value) + tab = self.CreateTestTable() + tab2 = Table(['third','second','first'], + 'fis', + third=[None,2.2,3.3], + first=['a','bar','bla'], + second=[3, None, 9]) + tab.Extend(tab2, overwrite='first') + self.CompareDataFromDict(tab, {'first': ['x','foo',None,'a','bar','bla'], + 'second': [3,None,9,3,None,9], + 'third': [None,2.2,3.3,None,2.2,3.3]}) + + # with overwrite (with matching values) + tab = self.CreateTestTable() + tab2 = Table(['third','second','first'], + 'fis', + third=[None,2.2,3.4], + first=['a','bar','bla'], + second=[3, None, 9]) + tab.Extend(tab2, overwrite='third') + self.CompareDataFromDict(tab, {'first': ['a','bar',None,'bla'], + 'second': [3,None,9,9], + 'third': [None,2.2,3.3,3.4]}) + + # cannot extend if types are different + tab = Table('aaa','s',a=['a','b']) + tab2 = Table('aaa','i',a=[1,2]) + self.assertRaises(TypeError, tab.Extend, tab2) + if __name__ == "__main__": from ost import testutils testutils.RunTests() diff --git a/modules/io/src/img/map_io_spi_handler.cc b/modules/io/src/img/map_io_spi_handler.cc index d22897f4ee3976555418d3fdd8c0f49f49a14aa7..6f5e1798b4674ed037884719036eeeda30323f6e 100644 --- a/modules/io/src/img/map_io_spi_handler.cc +++ b/modules/io/src/img/map_io_spi_handler.cc @@ -198,9 +198,6 @@ void prep_header(spider_header& header, const img::Size& size, const geom::Vec3& header.fNcol = ncol; header.fLenbyt = ncol*4.0; // record length in bytesS header.fLabrec = ceil(1024.0 / header.fLenbyt); // nr label records in file header - if (fmod(1024,header.fLenbyt) != 0.0) { - header.fLabrec += 1.0; - } header.fLabbyt = header.fLabrec * header.fLenbyt; header.fIangle = 0.0; // flag indicating that tilt angles have been filled header.fScale = spatial_sampling; // scale @@ -389,6 +386,10 @@ void real_filler(std::istream& in, const spider_header& header, img::ImageHandle template <typename B > void real_dumper(std::ostream& f, const spider_header& header, const img::ImageHandle& mh,const img::alg::Normalizer& norm, bool swap_flag) { + int padding = header.fLabbyt-f.tellp(); + char* buffer=new char[padding]; + f.write(buffer,padding); + delete[] buffer; int slice_size=static_cast<int>(header.fNcol) * static_cast<int>(header.fNrow); boost::scoped_array<B> rawp(new B[slice_size]); diff --git a/modules/io/src/img/map_io_tiff_handler.cc b/modules/io/src/img/map_io_tiff_handler.cc index b1050724393b5ddf5bf3677af351125f61ef2d4b..0f418bc896c6f0563a5a3a0d38d1e5aecfe6495e 100644 --- a/modules/io/src/img/map_io_tiff_handler.cc +++ b/modules/io/src/img/map_io_tiff_handler.cc @@ -311,7 +311,7 @@ void MapIOTiffHandler::do_export(const img::MapHandle& image,TIFF* tfile,TIF& fo img::Point ori=image.GetSpatialOrigin(); geom::Vec3 sampling=image.GetPixelSampling(); float xreso=sampling[0]/Units::cm,yreso=sampling[1]/Units::cm; - float xpos=xreso*ori[0],ypos=yreso*ori[1]; + float xpos=std::max<Real>(0.0,xreso*ori[0]),ypos=std::max<Real>(0.0,yreso*ori[1]); //tiff file format only allows positivie origins, negative origins are lost here TIFFSetField(tfile,TIFFTAG_IMAGEWIDTH,width); TIFFSetField(tfile,TIFFTAG_IMAGELENGTH,height); TIFFSetField(tfile,TIFFTAG_SAMPLESPERPIXEL,spp); diff --git a/modules/io/tests/test_io_img.cc b/modules/io/tests/test_io_img.cc index d9116842a780e8870192ba941b06703f04f417fa..890a7a9c29a22beb57b4c7c0aa0ffb04273f8777 100644 --- a/modules/io/tests/test_io_img.cc +++ b/modules/io/tests/test_io_img.cc @@ -47,7 +47,7 @@ BOOST_AUTO_TEST_CASE(test_io_img) { //float tests boost::test_tools::close_at_tolerance<Real> close_test(::boost::test_tools::percent_tolerance(0.001)); - ost::img::ImageHandle testimage=ost::img::CreateImage(ost::img::Extent(ost::img::Point(0,0),ost::img::Point(3,3))); + ost::img::ImageHandle testimage=ost::img::CreateImage(ost::img::Extent(ost::img::Point(0,0),ost::img::Point(4,3))); int counter=0; for (img::ExtentIterator i(testimage.GetExtent()); !i.AtEnd(); ++i, ++counter) { testimage.SetReal(i, counter); @@ -57,7 +57,6 @@ BOOST_AUTO_TEST_CASE(test_io_img) std::map<String,ImageFormatBase*> float_formats; float_formats["DX"]=new DX; float_formats["Situs"]=new Situs; - float_formats["DAT (float)"]=new DAT(false,OST_FLOAT_FORMAT); float_formats["CCP4 (float)"]=new MRC; float_formats["MRC (float)"]=new MRC(false,MRC_OLD_FORMAT); float_formats["SPIDER"]= new Spider; @@ -84,7 +83,6 @@ BOOST_AUTO_TEST_CASE(test_io_img) //int 16 formats std::map<String,ImageFormatBase*> int_formats; int_formats["IPL (16 bit)"]=new IPL(true,OST_BIT16_FORMAT); - int_formats["DAT (16 bit)"]=new DAT(true,OST_BIT16_FORMAT); int_formats["TIF (16 bit)"]=new TIF; int_formats["JPK (16 bit)"]=new JPK; // int_formats["DF3"]=new DF3(true); @@ -137,7 +135,6 @@ BOOST_AUTO_TEST_CASE(test_io_img) //byte formats std::map<String,ImageFormatBase*> byte_formats; - byte_formats["DAT (byte)"]=new DAT(true,OST_BIT8_FORMAT); byte_formats["PNG"]=new PNG; byte_formats["JPK (byte)"]= new JPK(true,OST_BIT8_FORMAT); byte_formats["TIF (byte)"]= new TIF(true,OST_BIT8_FORMAT); @@ -161,4 +158,70 @@ BOOST_AUTO_TEST_CASE(test_io_img) } } +BOOST_AUTO_TEST_CASE(test_io_img_dat) +{ + // test for the dat file format using a square image (non square images not supported by dat) + //float test + boost::test_tools::close_at_tolerance<Real> close_test(::boost::test_tools::percent_tolerance(0.001)); + ost::img::ImageHandle testimage=ost::img::CreateImage(ost::img::Extent(ost::img::Point(0,0),ost::img::Point(3,3))); + int counter=0; + for (img::ExtentIterator i(testimage.GetExtent()); !i.AtEnd(); ++i, ++counter) { + testimage.SetReal(i, counter); + } + testimage+=5.01; //if all values are > 0.0 we can use close_at_tolerance + const String fname("temp_img.tmp"); + ost::io::SaveImage(testimage,fname,DAT(false,OST_FLOAT_FORMAT)); + ost::img::ImageHandle loadedimage=ost::io::LoadImage(fname,DAT(false,OST_FLOAT_FORMAT)); + bool failed=false; + ost::img::ExtentIterator eit(testimage.GetExtent()); + for(;!eit.AtEnd();++eit) { + if( ! close_test(testimage.GetReal(eit),loadedimage.GetReal(eit))){ + failed=true; + break; + } + } + if(failed){ + BOOST_ERROR("Image IO failed for plugin DAT (float) at point " << ost::img::Point(eit)<< ". The values are: " << testimage.GetReal(eit)<< ","<< loadedimage.GetReal(eit) ); + } + //int 16 format + ost::io::SaveImage(testimage,fname,DAT(true,OST_BIT16_FORMAT)); + loadedimage=ost::io::LoadImage(fname,DAT(true,OST_BIT16_FORMAT)); + ost::img::alg::Normalizer norm=ost::img::alg::CreateLinearRangeNormalizer(testimage,0.0,65535.0); + ost::img::ImageHandle scaled_image=testimage.Apply(norm); + failed=false; + eit=ost::img::ExtentIterator(testimage.GetExtent()); + for(;!eit.AtEnd();++eit) { + if( static_cast<int>(scaled_image.GetReal(eit))!=static_cast<int>(loadedimage.GetReal(eit))){ + failed=true; + break; + } + } + if(failed){ + BOOST_ERROR("Image IO failed for plugin DAT (int16) at point " + << ost::img::Point(eit)<< ". Should be " + << static_cast<int>(scaled_image.GetReal(eit)) << ", but " + << static_cast<int>(loadedimage.GetReal(eit)) << " found."); + } + + //byte format + ost::io::SaveImage(testimage,fname,DAT(true,OST_BIT8_FORMAT)); + loadedimage=ost::io::LoadImage(fname,DAT(true,OST_BIT8_FORMAT)); + norm=ost::img::alg::CreateLinearRangeNormalizer(testimage,0.0,255.0); + scaled_image=testimage.Apply(norm); + failed=false; + eit=ost::img::ExtentIterator(testimage.GetExtent()); + for(;!eit.AtEnd();++eit) { + if( static_cast<int>(scaled_image.GetReal(eit))!=static_cast<int>(loadedimage.GetReal(eit))){ + failed=true; + break; + } + } + if(failed){ + BOOST_ERROR("Image IO failed for plugin DAT (int8) at point " + << ost::img::Point(eit)<< ". Should be " + << static_cast<int>(scaled_image.GetReal(eit)) << ", but " + << static_cast<int>(loadedimage.GetReal(eit)) << " found."); + } +} + BOOST_AUTO_TEST_SUITE_END()