File tree Expand file tree Collapse file tree 1 file changed +5
-5
lines changed
Expand file tree Collapse file tree 1 file changed +5
-5
lines changed Original file line number Diff line number Diff line change 201201 }
202202 ],
203203 "source" : [
204- " print(f'X array: {os.path.getsize(\" ./data/X_on_disk.npy\" ):12} bytes ({bytes2GB (os.path.getsize(\" ./data/X_on_disk.npy\" )):3.2f} GB )')\n " ,
205- " print(f'y array: {os.path.getsize(\" ./data/y_on_disk.npy\" ):12} bytes ({bytes2GB (os.path.getsize(\" ./data/y_on_disk.npy\" )):3.2f} GB )')"
204+ " print(f'X array: {os.path.getsize(\" ./data/X_on_disk.npy\" ):12} bytes ({bytes2str (os.path.getsize(\" ./data/X_on_disk.npy\" ))} )')\n " ,
205+ " print(f'y array: {os.path.getsize(\" ./data/y_on_disk.npy\" ):12} bytes ({bytes2str (os.path.getsize(\" ./data/y_on_disk.npy\" ))} )')\n "
206206 ]
207207 },
208208 {
299299 }
300300 ],
301301 "source" : [
302- " print(f'X array on disk: {sys.getsizeof(X_on_disk):12} bytes ({bytes2GB (sys.getsizeof(X_on_disk)):3.3f} GB )')\n " ,
303- " print(f'y array on disk: {sys.getsizeof(y_on_disk):12} bytes ({bytes2GB (sys.getsizeof(y_on_disk)):3.3f} GB )')"
302+ " print(f'X array on disk: {sys.getsizeof(X_on_disk):12} bytes ({bytes2str (sys.getsizeof(X_on_disk))} )')\n " ,
303+ " print(f'y array on disk: {sys.getsizeof(y_on_disk):12} bytes ({bytes2str (sys.getsizeof(y_on_disk))} )')"
304304 ]
305305 },
306306 {
12251225 " Let's now check performance of the combined process: slicing plus conversion to a tensor. Based on what we've seen there are 3 options: \n " ,
12261226 " \n " ,
12271227 " - slice np.array in memory + conversion to tensor\n " ,
1228- " - slice np.memamap on disk + conversion to tensor\n " ,
1228+ " - slice np.memmap on disk + conversion to tensor\n " ,
12291229 " - slice itemified np.memmap + converion to tensor"
12301230 ]
12311231 },
You can’t perform that action at this time.
0 commit comments