Logo Questions Linux Laravel Mysql Ubuntu Git Menu
 

pandas to_parquet fails on large datasets

I'm trying to save a very large dataset using pandas to_parquet, and it seems to fail when exceeding a certain limit, both with 'pyarrow' and 'fastparquet'. I reproduced the errors I am getting with the following code, and would be happy to hear ideas on how to overcome that issue:

Using Pyarrow:

low = 3
high = 8
for n in np.logspace(low, high, high-low+1):
    t0 = time()
    df = pd.DataFrame.from_records([(f'ind_{x}', ''.join(['x']*50))     for x in range(int(n))], columns=['a', 'b']).set_index('a')
    df.to_parquet(tmp_file, engine='pyarrow', compression='gzip')
    pd.read_parquet(tmp_file, engine='pyarrow')
    print(f'10^{np.log10(int(n))} read-write took {time()-t0} seconds')

10^3.0 read-write took 0.012851715087890625 seconds
10^4.0 read-write took 0.05722832679748535 seconds
10^5.0 read-write took 0.46846866607666016 seconds
10^6.0 read-write took 4.4494054317474365 seconds
10^7.0 read-write took 43.0602171421051 seconds
---------------------------------------------------------------------------
ArrowIOError                              Traceback (most recent call last)
<ipython-input-51-cad917a26b91> in <module>()
      5     df = pd.DataFrame.from_records([(f'ind_{x}', ''.join(['x']*50)) for x in range(int(n))], columns=['a', 'b']).set_index('a')
      6     df.to_parquet(tmp_file, engine='pyarrow', compression='gzip')
----> 7     pd.read_parquet(tmp_file, engine='pyarrow')
      8     print(f'10^{np.log10(int(n))} read-write took {time()-t0} seconds')

~/.conda/envs/anaconda3/lib/python3.6/site-packages/pandas/io/parquet.py in read_parquet(path, engine, columns, **kwargs)
    255 
    256     impl = get_engine(engine)
--> 257     return impl.read(path, columns=columns, **kwargs)

~/.conda/envs/anaconda3/lib/python3.6/site-packages/pandas/io/parquet.py in read(self, path, columns, **kwargs)
    128         kwargs['use_pandas_metadata'] = True
    129         return self.api.parquet.read_table(path, columns=columns,
--> 130                                            **kwargs).to_pandas()
    131 
    132     def _validate_write_lt_070(self, df):

~/.conda/envs/anaconda3/lib/python3.6/site-packages/pyarrow/parquet.py in read_table(source, columns, nthreads, metadata, use_pandas_metadata)
    939     pf = ParquetFile(source, metadata=metadata)
    940     return pf.read(columns=columns, nthreads=nthreads,
--> 941                    use_pandas_metadata=use_pandas_metadata)
    942 
    943 

~/.conda/envs/anaconda3/lib/python3.6/site-packages/pyarrow/parquet.py in read(self, columns, nthreads, use_pandas_metadata)
    148             columns, use_pandas_metadata=use_pandas_metadata)
    149         return self.reader.read_all(column_indices=column_indices,
--> 150                                     nthreads=nthreads)
    151 
    152     def scan_contents(self, columns=None, batch_size=65536):

_parquet.pyx in pyarrow._parquet.ParquetReader.read_all()

error.pxi in pyarrow.lib.check_status()
ArrowIOError: Arrow error: Invalid: BinaryArray cannot contain more than 2147483646 bytes, have 2147483650

Using fastparquet:

low = 3
high = 8
for n in np.logspace(low, high, high-low+1):
    t0 = time()
    df = pd.DataFrame.from_records([(f'ind_{x}', ''.join(['x']*50)) for x in range(int(n))], columns=['a', 'b']).set_index('a')
    df.to_parquet(tmp_file, engine='fastparquet', compression='gzip')
    pd.read_parquet(tmp_file, engine='fastparquet')
    print(f'10^{np.log10(int(n))} read-write took {time()-t0} seconds')

10^3.0 read-write took 0.17770028114318848 seconds
10^4.0 read-write took 0.06351733207702637 seconds
10^5.0 read-write took 0.46896958351135254 seconds
10^6.0 read-write took 5.464379549026489 seconds
10^7.0 read-write took 50.26520347595215 seconds
---------------------------------------------------------------------------
OverflowError                             Traceback (most recent call last)
<ipython-input-49-234a889ae790> in <module>()
      4     t0 = time()
      5     df = pd.DataFrame.from_records([(f'ind_{x}', ''.join(['x']*50)) for x in range(int(n))], columns=['a', 'b']).set_index('a')
----> 6     df.to_parquet(tmp_file, engine='fastparquet', compression='gzip')
      7     pd.read_parquet(tmp_file, engine='fastparquet')
      8     print(f'10^{np.log10(int(n))} read-write took {time()-t0} seconds')

~/.conda/envs/anaconda3/lib/python3.6/site-packages/pandas/core/frame.py in to_parquet(self, fname, engine, compression, **kwargs)
   1647         from pandas.io.parquet import to_parquet
   1648         to_parquet(self, fname, engine,
-> 1649                    compression=compression, **kwargs)
   1650 
   1651     @Substitution(header='Write out the column names. If a list of strings '

~/.conda/envs/anaconda3/lib/python3.6/site-packages/pandas/io/parquet.py in to_parquet(df, path, engine, compression, **kwargs)
    225     """
    226     impl = get_engine(engine)
--> 227     return impl.write(df, path, compression=compression, **kwargs)
    228 
    229 

~/.conda/envs/anaconda3/lib/python3.6/site-packages/pandas/io/parquet.py in write(self, df, path, compression, **kwargs)
    198         with catch_warnings(record=True):
    199             self.api.write(path, df,
--> 200                            compression=compression, **kwargs)
    201 
    202     def read(self, path, columns=None, **kwargs):

~/.conda/envs/anaconda3/lib/python3.6/site-packages/fastparquet/writer.py in write(filename, data, row_group_offsets, compression, file_scheme, open_with, mkdirs, has_nulls, write_index, partition_on, fixed_text, append, object_encoding, times)
    846     if file_scheme == 'simple':
    847         write_simple(filename, data, fmd, row_group_offsets,
--> 848                      compression, open_with, has_nulls, append)
    849     elif file_scheme in ['hive', 'drill']:
    850         if append:

~/.conda/envs/anaconda3/lib/python3.6/site-packages/fastparquet/writer.py in write_simple(fn, data, fmd, row_group_offsets, compression, open_with, has_nulls, append)
    715                    else None)
    716             rg = make_row_group(f, data[start:end], fmd.schema,
--> 717                                 compression=compression)
    718             if rg is not None:
    719                 fmd.row_groups.append(rg)

~/.conda/envs/anaconda3/lib/python3.6/site-packages/fastparquet/writer.py in make_row_group(f, data, schema, compression)
    612                 comp = compression
    613             chunk = write_column(f, data[column.name], column,
--> 614                                  compression=comp)
    615             rg.columns.append(chunk)
    616     rg.total_byte_size = sum([c.meta_data.total_uncompressed_size for c in

~/.conda/envs/anaconda3/lib/python3.6/site-packages/fastparquet/writer.py in write_column(f, data, selement, compression)
    545                                    data_page_header=dph, crc=None)
    546 
--> 547     write_thrift(f, ph)
    548     f.write(bdata)
    549 

~/.conda/envs/anaconda3/lib/python3.6/site-packages/fastparquet/thrift_structures.py in write_thrift(fobj, thrift)
     49     pout = TCompactProtocol(fobj)
     50     try:
---> 51         thrift.write(pout)
     52         fail = False
     53     except TProtocolException as e:

~/.conda/envs/anaconda3/lib/python3.6/site-packages/fastparquet/parquet_thrift/parquet/ttypes.py in write(self, oprot)
   1028     def write(self, oprot):
   1029         if oprot._fast_encode is not None and self.thrift_spec is not None:
-> 1030             oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
   1031             return
   1032         oprot.writeStructBegin('PageHeader')

OverflowError: int out of range
like image 650
kenissur Avatar asked Jun 10 '18 09:06

kenissur


People also ask

Can pandas handle big data?

Pandas uses in-memory computation which makes it ideal for small to medium sized datasets. However, Pandas ability to process big datasets is limited due to out-of-memory errors.

How large can a pandas DataFrame be?

The long answer is the size limit for pandas DataFrames is 100 gigabytes (GB) of memory instead of a set number of cells.


1 Answers

It seems you succeeded with Pyarrow to write but not to read, and failed to write with fastparquet, thus did not get to read. I suggest you to write the data with Pyarrow and read with fastparquet by chunks, iterating through the row-groups:

from fastparquet import ParquetFile

df.to_parquet(tmp_file, engine='pyarrow', compression='gzip')
pf = ParquetFile(tmp_file)
for df in pf.iter_row_groups():
    print(df.head(n=10))
like image 199
Liana Ziskind Avatar answered Sep 22 '22 15:09

Liana Ziskind