# Initialize data
data_size = 1e6
server_count = 1000
chunk_size = data_size / server_count
data = zbigarray(data_size)
# Process data in parallel on each server (Map Reduce, Batch, etc)
for server in server_count:
data.activate().process(server*chunk_size, chunk_size)
# Numpy
np.ndarray(shape=(2,2), dtype="float")
# Out-of-core data
ZBigArray(shape=(1e18,2), dtype="float")
# Full out-of-core
ZBigArray(shape=(1e9,2e9), dtype="float")