Skip to content

Commit

Permalink
out-of-memory checkpointing
Browse files Browse the repository at this point in the history
goal: results should not (never? in weak small cache?) be stored in an in-memory memo table. so that memo table should be not present in this implementation. instead all memo questions go to the sqlite3 database.

this drives some blurring between in-memory caching and disk-based checkpointing: the previous disk based checkpointed model relied on repopulating the in-memory memo table cache...

i hit some thread problems when using one sqlite3 connection across threads and the docs are unclear about what I can/cannot do, so i made this open the sqlite3 database on every access. that's probably got quite a performance hit, but its probably enough for basically validating the idea.
  • Loading branch information
benclifford committed Aug 22, 2024
1 parent 9ff13d7 commit 23ff9ce
Show file tree
Hide file tree
Showing 4 changed files with 168 additions and 12 deletions.
20 changes: 9 additions & 11 deletions parsl/dataflow/memoization.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import pickle
import threading
from functools import lru_cache, singledispatch
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence

import typeguard

Expand Down Expand Up @@ -164,13 +164,13 @@ def start(self, *, dfk: DataFlowKernel, memoize: bool = True, checkpoint_files:
def close(self) -> None:
raise NotImplementedError

def update_memo(self, task: TaskRecord, r: Future[Any]) -> None:
def update_memo(self, task: TaskRecord, r: Future) -> None:
raise NotImplementedError

def checkpoint(self, tasks: Sequence[TaskRecord]) -> str:
def checkpoint(self, tasks: Sequence[TaskRecord]) -> None:
raise NotImplementedError

def check_memo(self, task: TaskRecord) -> Optional[Future[Any]]:
def check_memo(self, task: TaskRecord) -> Optional[Future]:
raise NotImplementedError


Expand Down Expand Up @@ -242,7 +242,7 @@ def start(self, *, dfk: DataFlowKernel, memoize: bool = True, checkpoint_files:
def close(self) -> None:
pass # nothing to close but more should move here

def check_memo(self, task: TaskRecord) -> Optional[Future[Any]]:
def check_memo(self, task: TaskRecord) -> Optional[Future]:
"""Create a hash of the task and its inputs and check the lookup table for this hash.
If present, the results are returned.
Expand Down Expand Up @@ -277,7 +277,7 @@ def check_memo(self, task: TaskRecord) -> Optional[Future[Any]]:
assert isinstance(result, Future) or result is None
return result

def hash_lookup(self, hashsum: str) -> Future[Any]:
def hash_lookup(self, hashsum: str) -> Future:
"""Lookup a hash in the memoization table.
Args:
Expand All @@ -291,7 +291,7 @@ def hash_lookup(self, hashsum: str) -> Future[Any]:
"""
return self.memo_lookup_table[hashsum]

def update_memo(self, task: TaskRecord, r: Future[Any]) -> None:
def update_memo(self, task: TaskRecord, r: Future) -> None:
"""Updates the memoization lookup table with the result from a task.
Args:
Expand All @@ -316,7 +316,7 @@ def update_memo(self, task: TaskRecord, r: Future[Any]) -> None:
logger.debug(f"Storing app cache entry {task['hashsum']} with result from task {task_id}")
self.memo_lookup_table[task['hashsum']] = r

def _load_checkpoints(self, checkpointDirs: Sequence[str]) -> Dict[str, Future[Any]]:
def _load_checkpoints(self, checkpointDirs: Sequence[str]) -> Dict[str, Future]:
"""Load a checkpoint file into a lookup table.
The data being loaded from the pickle file mostly contains input
Expand Down Expand Up @@ -388,7 +388,7 @@ def load_checkpoints(self, checkpointDirs: Optional[Sequence[str]]) -> Dict[str,
else:
return {}

def checkpoint(self, tasks: Sequence[TaskRecord]) -> str:
def checkpoint(self, tasks: Sequence[TaskRecord]) -> None:
"""Checkpoint the dfk incrementally to a checkpoint file.
When called, every task that has been completed yet not
Expand Down Expand Up @@ -457,8 +457,6 @@ def checkpoint(self, tasks: Sequence[TaskRecord]) -> str:
else:
logger.info("Done checkpointing {} tasks".format(count))

return checkpoint_dir

def filter_for_checkpoint(self, app_fu: AppFuture) -> bool:
"""Overridable method to decide if an entry should be checkpointed"""
return app_fu.exception() is None
112 changes: 112 additions & 0 deletions parsl/dataflow/memosql.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
import logging
import pickle
import sqlite3
from concurrent.futures import Future
from pathlib import Path
from typing import Optional, Sequence

from parsl.dataflow.dflow import DataFlowKernel
from parsl.dataflow.memoization import Memoizer, make_hash
from parsl.dataflow.taskrecord import TaskRecord

logger = logging.getLogger(__name__)


class SQLiteMemoizer(Memoizer):
"""Memoize out of memory into an sqlite3 database.
TODO: probably going to need some kind of shutdown now, to close
the sqlite3 connection.
which might also be useful for driving final checkpoints in the
original impl?
"""

def start(self, *, dfk: DataFlowKernel, memoize: bool = True, checkpoint_files: Sequence[str], run_dir: str) -> None:
"""TODO: run_dir is the per-workflow run dir, but we need a broader checkpoint context... one level up
by default... get_all_checkpoints uses "runinfo/" as a relative path for that by default so replicating
that choice would do here. likewise I think for monitoring."""

self.db_path = Path(dfk.config.run_dir) / "checkpoint.sqlite3"
logger.debug("starting with db_path %r", self.db_path)

# TODO: api wart... turning memoization on or off should not be part of the plugin API
self.memoize = memoize

connection = sqlite3.connect(self.db_path)
cursor = connection.cursor()

cursor.execute("CREATE TABLE IF NOT EXISTS checkpoints(key, result)")
# probably want some index on key because that's what we're doing all the access via.

connection.commit()
connection.close()
logger.debug("checkpoint table created")

def close(self):
pass

def checkpoint(self, tasks: Sequence[TaskRecord]) -> None:
"""All the behaviour for this memoizer is in check_memo and update_memo.
"""
logger.debug("Explicit checkpoint call is a no-op with this memoizer")

def check_memo(self, task: TaskRecord) -> Optional[Future]:
"""TODO: document this: check_memo is required to set the task hashsum,
if that's how we're going to key checkpoints in update_memo. (that's not
a requirement though: other equalities are available."""
task_id = task['id']
hashsum = make_hash(task)
logger.debug("Task {} has memoization hash {}".format(task_id, hashsum))
task['hashsum'] = hashsum

connection = sqlite3.connect(self.db_path)
cursor = connection.cursor()
cursor.execute("SELECT result FROM checkpoints WHERE key = ?", (hashsum, ))
r = cursor.fetchone()

if r is None:
connection.close()
return None
else:
data = pickle.loads(r[0])
connection.close()

memo_fu: Future = Future()

if data['exception'] is None:
memo_fu.set_result(data['result'])
else:
assert data['result'] is None
memo_fu.set_exception(data['exception'])

return memo_fu

def update_memo(self, task: TaskRecord, r: Future) -> None:
logger.debug("updating memo")

if not self.memoize or not task['memoize'] or 'hashsum' not in task:
logger.debug("preconditions for memo not satisfied")
return

if not isinstance(task['hashsum'], str):
logger.error(f"Attempting to update app cache entry but hashsum is not a string key: {task['hashsum']}")
return

app_fu = task['app_fu']
hashsum = task['hashsum']

# this comes from the original concatenation-based checkpoint code:
if app_fu.exception() is None:
t = {'hash': hashsum, 'exception': None, 'result': app_fu.result()}
else:
t = {'hash': hashsum, 'exception': app_fu.exception(), 'result': None}

value = pickle.dumps(t)

connection = sqlite3.connect(self.db_path)
cursor = connection.cursor()

cursor.execute("INSERT INTO checkpoints VALUES(?, ?)", (hashsum, value))

connection.commit()
connection.close()
4 changes: 3 additions & 1 deletion parsl/tests/configs/htex_local_alternate.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from parsl.data_provider.ftp import FTPInTaskStaging
from parsl.data_provider.http import HTTPInTaskStaging
from parsl.data_provider.zip import ZipFileStaging
from parsl.dataflow.memosql import SQLiteMemoizer
from parsl.executors import HighThroughputExecutor
from parsl.launchers import SingleNodeLauncher

Expand Down Expand Up @@ -66,7 +67,8 @@ def fresh_config():
monitoring_debug=False,
resource_monitoring_interval=1,
),
usage_tracking=True
usage_tracking=True,
memoizer=SQLiteMemoizer()
)


Expand Down
44 changes: 44 additions & 0 deletions parsl/tests/test_checkpointing/test_python_checkpoint_2_sqlite.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import contextlib
import os

import pytest

import parsl
from parsl import python_app
from parsl.dataflow.memosql import SQLiteMemoizer
from parsl.tests.configs.local_threads_checkpoint import fresh_config


@contextlib.contextmanager
def parsl_configured(run_dir, **kw):
c = fresh_config()
c.memoizer = SQLiteMemoizer()
c.run_dir = run_dir
for config_attr, config_val in kw.items():
setattr(c, config_attr, config_val)
dfk = parsl.load(c)
for ex in dfk.executors.values():
ex.working_dir = run_dir
yield dfk

parsl.dfk().cleanup()


@python_app(cache=True)
def uuid_app():
import uuid
return uuid.uuid4()


@pytest.mark.local
def test_loading_checkpoint(tmpd_cwd):
"""Load memoization table from previous checkpoint
"""
with parsl_configured(tmpd_cwd, checkpoint_mode="task_exit"):
checkpoint_files = [os.path.join(parsl.dfk().run_dir, "checkpoint")]
result = uuid_app().result()

with parsl_configured(tmpd_cwd, checkpoint_files=checkpoint_files):
relaunched = uuid_app().result()

assert result == relaunched, "Expected following call to uuid_app to return cached uuid"

0 comments on commit 23ff9ce

Please sign in to comment.