From 9dc5ed40faf3ea4d97edc559001cb2df3ff5a5fb Mon Sep 17 00:00:00 2001 From: Michal Mielewczyk Date: Tue, 10 Sep 2024 16:39:13 +0200 Subject: [PATCH] Introduce optimistic fast path for engine_rd Signed-off-by: Avi Halaf Signed-off-by: Robert Baldyga Signed-off-by: Michal Mielewczyk --- src/concurrency/ocf_cache_line_concurrency.c | 2 +- src/engine/engine_rd.c | 43 ++++++++++++++++++++ src/engine/engine_rd.h | 2 + src/ocf_core.c | 34 ++++++++++++++++ 4 files changed, 80 insertions(+), 1 deletion(-) diff --git a/src/concurrency/ocf_cache_line_concurrency.c b/src/concurrency/ocf_cache_line_concurrency.c index c802a0a4..e9244fb3 100644 --- a/src/concurrency/ocf_cache_line_concurrency.c +++ b/src/concurrency/ocf_cache_line_concurrency.c @@ -33,7 +33,7 @@ static ocf_cache_line_t ocf_cl_lock_line_get_entry( return req->map[index].coll_idx; } -static int ocf_cl_lock_line_fast(struct ocf_alock *alock, +int ocf_cl_lock_line_fast(struct ocf_alock *alock, struct ocf_request *req, int rw) { int32_t i; diff --git a/src/engine/engine_rd.c b/src/engine/engine_rd.c index 3cf67c75..5ae6e2ed 100644 --- a/src/engine/engine_rd.c +++ b/src/engine/engine_rd.c @@ -258,3 +258,46 @@ int ocf_read_generic(struct ocf_request *req) return 0; } + +int ocf_read_generic_try_fast(struct ocf_request *req) +{ + struct ocf_alock *c = ocf_cache_line_concurrency(req->cache); + + /* Calculate hashes for hash-bucket locking */ + ocf_req_hash(req); + + /* Read-lock hash buckets associated with request target core & LBAs + * (core lines) to assure that cache mapping for these core lines does + * not change during traversation */ + ocf_hb_req_prot_lock_rd(req); + + /* check CL status */ + ocf_engine_lookup(req); + + if (ocf_engine_is_mapped(req) && ocf_engine_is_hit(req) && + ocf_cl_lock_line_fast(c, req, OCF_READ) == OCF_LOCK_ACQUIRED) { + + OCF_DEBUG_RQ(req, "Submit read generic fast"); + + ocf_req_get(req); + ocf_engine_set_hot(req); + ocf_hb_req_prot_unlock_rd(req); + + if (ocf_engine_needs_repart(req)) { + ocf_hb_req_prot_lock_wr(req); + ocf_user_part_move(req); + ocf_hb_req_prot_unlock_wr(req); + } + + ocf_read_generic_submit_hit(req); + + /* Update statistics */ + ocf_engine_update_request_stats(req); + ocf_engine_update_block_stats(req); + return OCF_FAST_PATH_YES; + } else { + ocf_hb_req_prot_unlock_rd(req); + OCF_DEBUG_RQ(req, "Failed to read generic fast"); + return OCF_FAST_PATH_NO; + } +} diff --git a/src/engine/engine_rd.h b/src/engine/engine_rd.h index 8cb4007a..4f72c29c 100644 --- a/src/engine/engine_rd.h +++ b/src/engine/engine_rd.h @@ -1,5 +1,6 @@ /* * Copyright(c) 2012-2021 Intel Corporation + * Copyright(c) 2024 Huawei Technologies * SPDX-License-Identifier: BSD-3-Clause */ @@ -9,5 +10,6 @@ int ocf_read_generic(struct ocf_request *req); void ocf_read_generic_submit_hit(struct ocf_request *req); +int ocf_read_generic_try_fast(struct ocf_request *req); #endif /* ENGINE_RD_H_ */ diff --git a/src/ocf_core.c b/src/ocf_core.c index ee88efa7..b2e22b46 100644 --- a/src/ocf_core.c +++ b/src/ocf_core.c @@ -10,9 +10,12 @@ #include "ocf_io_priv.h" #include "metadata/metadata.h" #include "engine/cache_engine.h" +#include "engine/engine_rd.h" #include "utils/utils_user_part.h" #include "ocf_request.h" +#define MAX_FAST_PATH_CACHE_LINES (64) + struct ocf_core_volume { ocf_core_t core; }; @@ -218,6 +221,29 @@ static void ocf_req_complete(struct ocf_request *req, int error) ocf_io_put(&req->ioi.io); } +static inline int _ocf_core_submit_io_fast_rd_generic(struct ocf_io *io, + struct ocf_request *req) +{ + int res; + + switch (req->cache_mode) { + case ocf_req_cache_mode_wt: + case ocf_req_cache_mode_wa: + case ocf_req_cache_mode_wi: + case ocf_req_cache_mode_wb: + case ocf_req_cache_mode_wo: + res = ocf_read_generic_try_fast(req); + break; + default: + break; + } + + if (res == OCF_FAST_PATH_NO) + ocf_req_clear_map(req); + + return res; +} + static inline ocf_req_cache_mode_t _ocf_core_req_resolve_fast_mode( ocf_cache_t cache, struct ocf_request *req) { @@ -248,6 +274,14 @@ static int ocf_core_submit_io_fast(struct ocf_io *io, struct ocf_request *req, if (req->cache_mode == ocf_req_cache_mode_pt) return OCF_FAST_PATH_NO; + /* If a read request isn't too big for a lookup in submission context, + check it is a read-hit and if cache line lock could be acquired + without waiting. If so, submit immediately */ + if (req->rw == OCF_READ) { + if (req->core_line_count <= MAX_FAST_PATH_CACHE_LINES) + return _ocf_core_submit_io_fast_rd_generic(io, req); + } + resolved_mode = _ocf_core_req_resolve_fast_mode(cache, req); if (resolved_mode == ocf_req_cache_mode_max) return OCF_FAST_PATH_NO;