From 290c87555d6764a4515414104e465326be80d960 Mon Sep 17 00:00:00 2001 From: UIMSolutions Date: Thu, 2 Jan 2025 10:25:59 +0100 Subject: [PATCH] Add uim.caches.classes.engines package with initial modules and README --- caches/README.md | 5 + caches/uim/caches/classes/caches/cache.d | 409 ++++++++++++++++++- caches/uim/caches/classes/engines/README.md | 5 + caches/uim/caches/classes/engines/apcu.dx | 147 +++++++ caches/uim/caches/classes/engines/array_.dx | 126 ++++++ caches/uim/caches/classes/engines/engine.d | 189 +++++++++ caches/uim/caches/classes/engines/file.d | 345 ++++++++++++++++ caches/uim/caches/classes/engines/file.dx | 345 ++++++++++++++++ caches/uim/caches/classes/engines/memory.d | 345 ++++++++++++++++ caches/uim/caches/classes/engines/null_.d | 24 +- caches/uim/caches/classes/engines/package.d | 15 +- caches/uim/caches/classes/engines/redis.dx | 278 +++++++++++++ caches/uim/caches/classes/engines/registry.d | 2 +- caches/uim/caches/helpers/cache.d | 16 + caches/uim/caches/helpers/engine.d | 16 + caches/uim/caches/helpers/package.d | 4 + caches/uim/caches/tests/package.d | 4 +- 17 files changed, 2267 insertions(+), 8 deletions(-) create mode 100644 caches/README.md create mode 100644 caches/uim/caches/classes/engines/README.md create mode 100644 caches/uim/caches/classes/engines/apcu.dx create mode 100644 caches/uim/caches/classes/engines/array_.dx create mode 100644 caches/uim/caches/classes/engines/file.d create mode 100644 caches/uim/caches/classes/engines/file.dx create mode 100644 caches/uim/caches/classes/engines/memory.d create mode 100644 caches/uim/caches/classes/engines/redis.dx create mode 100644 caches/uim/caches/helpers/cache.d create mode 100644 caches/uim/caches/helpers/engine.d diff --git a/caches/README.md b/caches/README.md new file mode 100644 index 0000000000..db81057ebf --- /dev/null +++ b/caches/README.md @@ -0,0 +1,5 @@ +# Library 📚 uim-caches + +[![D](https://github.com/UIMSolutions/uim/actions/workflows/uim-caches.yml/badge.svg)](https://github.com/UIMSolutions/uim/actions/workflows/uim-caches.yml) [![License](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) + +This library enables the use of caching. Caching can be used to speed up reading of expensive or slow resources by maintaining a second copy of the required data in a faster or tighter storage system. For example, the results of queries or remote web service accesses that do not change frequently can be stored in a cache. Once the data is in the cache, reading data from the cache is much more performant than accessing the remote resource. diff --git a/caches/uim/caches/classes/caches/cache.d b/caches/uim/caches/classes/caches/cache.d index ede4d3d42a..354975cc58 100644 --- a/caches/uim/caches/classes/caches/cache.d +++ b/caches/uim/caches/classes/caches/cache.d @@ -43,4 +43,411 @@ import uim.caches; */ class DCache : UIMObject, ICache { mixin(CacheThis!()); -} \ No newline at end of file + + /* + override bool initialize(Json[string] initData = null) { + if (!super.initialize(initData)) { + return false; + } + + // An array mapping URL schemes to fully qualified caching engine class names. + _dsnClassMap = [ + /* "array": ArrayCacheEngine.classname, + "apcu": ApcuCacheEngine.classname, + "file": FileCacheEngine.classname, * / + "memcached": MemoryCacheEngine.classname, + "memory": MemoryCacheEngine.classname, + "null": NullCacheEngine.classname, + /* "redis": RedisCacheEngine.classname, * / + ]; + + return true; + } + + // An array mapping URL schemes to fully qualified caching engine class names. + protected static STRINGAA _dsnClassMap; + + // #region enable + // Flag for tracking whether caching is enabled. + protected static bool _enabled = true; + // Re-enable caching. + static void enable() { + _enabled = true; + } + + // Disable caching. + static void disable() { + _enabled = false; + } + + // Check whether caching is enabled. + static bool enabled() { + return _enabled; + } + // #endregion enable + + // Group to Config mapping + protected static Json _groups = null; + + // Cache Registry used for creating and using cache adapters. + protected static DCacheRegistry _registry; + + // Returns the Cache Registry instance used for creating and using cache adapters. + static DCacheRegistry getRegistry() { + return _registry ? _registry : new DCacheRegistry(); + } + + /** + * Sets the Cache Registry instance used for creating and using cache adapters. + * Also allows for injecting of a new registry instance. + * / + static void setRegistry(DCacheRegistry cacheRegistry) { + _registry = cacheRegistry; + } + + // Finds and builds the instance of the required engine class. + protected /* static * / void _buildEngine(string configName) { + auto myRegistry = getRegistry(); + + // TODO + /* if (configuration.isEmpty(configName~".classname")) { + throw new DInvalidArgumentException( + "The `%s` cache configuration does not exist." + .format(configName) + ); + } * / + + auto configData = configuration.get(configName); + // TODO + /* try { + myRegistry.load(configName, configData); + } catch (RuntimeException exception) { + if (!hasKey("fallback", configData)) { + myRegistry.set(configName, new DNullEngine()); + trigger_error(exception.message(), ERRORS.USER_WARNING); + + return; + } + if (!configuration.hasKey("fallback")) { + throw exception; + } + if (configuration.getString("fallback") == configName) { + throw new DInvalidArgumentException( + "`%s` cache configuration cannot fallback to it" + .format(configName), 0, exception); + } + auto myfallbackEngine = pool(configuration.get("fallback")).clone; + assert(cast(DCacheEngine)myfallbackEngine); + + configuration + .merge("groups", Json.emptyArray) + .merge("prefix", ""); + + myfallbackEngine.configuration.set("groups", mynewConfig["groups"], false); + if (mynewConfig["prefix"]) { + myfallbackEngine.configuration.set("prefix", mynewConfig["prefix"], false); + } + myRegistry.set(configName, myfallbackEngine); + } */ + /* if (cast(DCacheEngine)configuration.get("classname")) { + configData = configuration.get("classname").configuration.data; + } */ + /* if (!configuration.isEmpty("groups")) { + (cast(DArrayData)configuration.get("groups")).values.each!((groupName) { + _groups[groupName).concat( configName; + _groups.set(groupName, _groups[groupName].unique); + _groups[groupName].sort; + }); + } * / + } + + // Get a SimpleCacheEngine object for the named cache pool. + /* static ICache&ICacheEngine pool(string configName) { + if (!_enabled) { + return new DNullEngine(); + } + myRegistry = getRegistry(); + + if (myRegistry.{configName} !is null) { + return myRegistry.{configName}; + } + _buildEngine(configName); + + return myRegistry.{configName}; + } */ + + /** + * Write data for key into cache. + * + * ### Usage: + * + * Writing to the active cache config: + * + * ``` + * Cache.write("cached_data", mydata); + * ``` + * + * Writing to a specific cache config: + * + * ``` + * Cache.write("cached_data", mydata, "long_term"); + * ``` + * / + static bool write(string key, Json dataToCache, string configName = "default") { + // TODO + /* if (isResource(dataToCache)) { + return false; + } */ + /* auto mybackend = pool(configName); + auto wasSuccessful = mybackend.set(key, dataToCache); + if (!wasSuccessful && dataToCache != "") { + throw new DCacheWriteException( + "%s cache was unable to write '%s' to %s cache" + .format( + configName, + key, + get_class(mybackend) + )); + } + return wasSuccessful; * / + return false; + } + + /** + * Write data for many keys into cache. + * + * ### Usage: + * + * Writing to the active cache config: + * + * ``` + * Cache.writeMany(["cached_data_1": 'data 1", "cached_data_2": 'data 2"]); + * ``` + * + * Writing to a specific cache config: + * + * ``` + * Cache.writeMany(["cached_data_1": 'data 1", "cached_data_2": 'data 2"], "long_term"); + * ``` + */ + /* static bool writeMany(Json[data] dataToStore, string configName = "default") { + return pool(configName).set(dataToStore); + } */ + + /** + * Read a key from the cache. + * + * ### Usage: + * + * Reading from the active cache configuration. + * + * ``` + * Cache.read("_data"); + * ``` + * + * Reading from a specific cache configuration. + * + * ``` + * Cache.read("_data", "long_term"); + * ``` + */ + /* static Json read(string key, string configName = "default") { + return pool(configName).get(key); + } */ + + /** + * Read multiple keys from the cache. + * + * ### Usage: + * + * Reading multiple keys from the active cache configuration. + * + * ``` + * Cache.readMany(["_data_1", "_data_2]); + * ``` + * + * Reading from a specific cache configuration. + * + * ``` + * Cache.readMany(["_data_1", "_data_2], "long_term"); + * ``` + * / + static Json readMany(string[] keysToFetch, string configName = "default") { + return Json(null); + // TODO return pool(configName).items(keysToFetch); + } + + // Increment a number under the key and return incremented value. + /* static int|false increment(string key, int incValue = 1, string configName = "default") { + if (incValue < 0) { + throw new DInvalidArgumentException("Offset cannot be less than `0`."); + } + return pool(configName).increment(key, incValue); + } */ + + // Decrement a number under the key and return decremented value. + /* static long decrement(string itemKey, int decValue = 1, string configName = "default") { + if (decValue < 0) { + throw new DInvalidArgumentException("Offset cannot be less than `0`."); + } + return pool(configName).decrement(itemKey, decValue); + } */ + + /** + * Delete a key from the cache. + * + * ### Usage: + * + * Deleting from the active cache configuration. + * + * ``` + * Cache.removeKey("_data"); + * ``` + * + * Deleting from a specific cache configuration. + * + * ``` + * Cache.removeKey("_data", "long_term"); + * ``` + */ + /* static bool removeKey(string key, string configName = "default") { + return pool(configName).removeKey(key); + } */ + + /** + * Delete many keys from the cache. + * + * ### Usage: + * + * Deleting multiple keys from the active cache configuration. + * + * ``` + * Cache.deleteMany(["_data_1", "_data_2"]); + * ``` + * + * Deleting from a specific cache configuration. + * + * ``` + * Cache.deleteMany(["_data_1", "_data_2], "long_term"); + * ``` + * / + static bool deleteMany(string[] someKeys, string configName = "default") { + // TODO return pool(configName).removeKey(someKeys); + return false; + } + + /** + * Delete all keys from the cache. + * Params: + * returns True if the cache was successfully cleared, false otherwise + * / + static bool clear(string configName = "default") { + // TODO return pool(configName).clear(); + return false; + } + + /** + * Delete all keys from the cache from all configurations. + * + * Status code. For each configuration, it reports the status of the operation + * / + static bool[string] clearAll() { + bool[string] mystatus; + + // TODO configured().each!(configName => mystatus[configName] = clear(configName)); + + return mystatus; + } + + // Delete all keys from the cache belonging to the same group. + static bool clearGroup(string groupName, string configName = "default") { + // TODO return pool(configName).clearGroup(groupName); + return false; + } + + /** + * Retrieve group names to config mapping. + * + * ``` + * Cache.config("daily", ["duration": '1 day", "groups": ["posts"]]); + * Cache.config("weekly", ["duration": '1 week", "groups": ["posts", "archive"]]); + * configDatas = Cache.groupConfigs("posts"); + * ``` + * + * configDatas will equal to `["posts": ["daily", "weekly"]]` + * Calling this method will load all the configured engines. + * Params: + * string groupName Group name or null to retrieve all group mappings + */ + /* static Json[string] groupConfigs(string groupName = null) { + configured() + .each!(configName => pool(configName)); + + if (groupName.isNull) { + return _groups; + } + if (_groups.hasKey(groupName)) { + return [groupName: _groups[groupName]]; + } + throw new DInvalidArgumentException("Invalid cache group `%s`.".format(groupName)); + } */ + + /** + * Provides the ability to easily do read-through caching. + * + * If the key is not set, the default callback is run to get the default value. + * The results will then be stored into the cache config + * at key. + * + * Examples: + * + * Using a Closure to provide data, assume `this` is a Table object: + * + * ``` + * results = Cache.remember("all_articles", auto () { + * return _find("all").toJString(); + * }); + * ``` + * Params: + * string aKey The cache key to read/store data at. + * the cache key is empty. + */ + /* static Json remember(string aKey, IClosure callbackWhenEmpty, string configName = "default") { + auto myexisting = read(aKey, configName); + if (myexisting) { + return myexisting; + } + + Json results = callbackWhenEmpty(); + write(aKey, results, configName); + + return results; + } */ + + /** + * Write data for key into a cache engine if it doesn`t exist already. + * + * ### Usage: + * + * Writing to the active cache config: + * + * ``` + * Cache.add("cached_data", mydata); + * ``` + * + * Writing to a specific cache config: + * + * ``` + * Cache.add("cached_data", mydata, "long_term"); + * ``` + * / + static bool add(string key, Json dataToCache, string configName = "default") { + // TODO + /* if (isResource(dataToCache)) { + return false; + } + return pool(configName).add(key, dataToCache); * / + return false; + } + */ +} diff --git a/caches/uim/caches/classes/engines/README.md b/caches/uim/caches/classes/engines/README.md new file mode 100644 index 0000000000..874afa0401 --- /dev/null +++ b/caches/uim/caches/classes/engines/README.md @@ -0,0 +1,5 @@ +# Package 📦 uim.caches.classes.engines + +## Packages + +## Modules diff --git a/caches/uim/caches/classes/engines/apcu.dx b/caches/uim/caches/classes/engines/apcu.dx new file mode 100644 index 0000000000..b0f9fba93c --- /dev/null +++ b/caches/uim/caches/classes/engines/apcu.dx @@ -0,0 +1,147 @@ +module uim.caches.classes.engines.apcu; + +import uim.caches; + +@safe: + +// APCu storage engine for cache +class DApcuCacheEngine : DCacheEngine { + mixin(CacheEngineThis!("Apcu")); + + // Contains the compiled group names (prefixed with the global configuration prefix) + protected string[] _compiledGroupNames; + + override bool initialize(Json[string] initData = null) { + if (!super.initialize(initData)) { + return false; + } + + /* if (!extension_loaded("apcu")) { + throw new UIMException("The `apcu` extension must be enabled to use ApcuEngine."); + } */ + + return true; + } + + // Write data for key into cache + + override bool set(string itemKey, Json dataToCache, long timeToLive = 0) { + return false; + // TODO + /* auto aKey = internalKey(itemKey); + auto myDuration = duration(timeToLive); + + return apcu_store(aKey, dataToCache, myDuration); */ + } + + // Read a key from the cache + override Json get(string itemKey, Json defaultValue = Json(null)) { + auto internKey = internalcorrectKey(key); + // TODO /* auto myValue = apcu_fetch(internalKey(internKey), mysuccess); + + // return mysuccess ? myValue : defaultValue; + return Json(null); + } + + // Increments the value of an integer cached key + override long increment(string key, int incValue = 1) { + auto internKey = internalcorrectKey(key); + // TODO return apcu_inc(internKey, incValue); */ + return 0; + } + + // Decrements the value of an integer cached key + override long decrement(string key, int decValue = 1) { + auto internKey = internalKey(itemKey); + // TODO return apcu_dec(internKey, myoffset); */ + return 0; + } + + // Delete a key from the cache + override bool removeKey(string key) { + auto internKey = internalcorrectKey(key); + + // TODO return apcu_removeKey(internKey); + return false; + } + + // Delete all keys from the cache. This will clear every cache config using APC. + /* override bool clear() { + if (class_hasKey(APCUIterator.classname, false)) { + auto myIterator = new APCUIterator( + "/^" ~ preg_quote(configuration.getString("prefix"), "/").correctUrl, + APC_ITER_NONE + ); + apcu_removeKey(myiterator); + + return true; + } + + auto mycache = apcu_cache_info(); // Raises warning by itself already + mycache["cache_list"] + .filter!(key => aKey["info"].startsWith(configuration.get("prefix"))) + .each!(key => apcu_removeKey(aKey["info"])); + } + return true; + } */ + + /** + * Write data for key into cache if it doesn`t exist already. + * If it already exists, it fails and returns false. + */ + /* override bool add(string itemKey, Json dataToCache) { + auto internKey = internalKey(itemKey); + Json duration = configuration.get("duration"); + + return apcu_add(internKey, dataToCache, duration); + } */ + + /** + * Returns the `group value` for each of the configured groups + * If the group initial value was not found, then it initializes the group accordingly. + */ + override string[] groups() { + if (_compiledGroupNames.isEmpty) { + configuration.get("groups").getStringArray + .map!(group => configuration.getString("prefix") ~ group).array; + } + auto mysuccess = false; + // TODO + /* auto mygroups = apcu_fetch(_compiledGroupNames, mysuccess); + if (mysuccess && count(mygroups) != count(configuration.get("groups"))) { + _compiledGroupNames.each!((groupname) { + if (!mygroups.hasKey(groupname)) { + auto myvalue = 1; + if (apcu_store(groupname, myvalue) == false) { + warning( + "Failed to store key `%s` with value `%s` into APCu cache." + .format(groupname, myvalue) + ); + } + mygroups[mygroup] = myvalue; + } + }); + //TODO ksort(mygroups); + } */ + string[] results = null; + // TOD auto groupValues = mygroups.values; + //TODO + /* foreach (index : mygroup; configuration.get("groups")) { + results ~= mygroup ~ groupValues[index]; + } */ + return results; + } + + /* + * Increments the group value to simulate deletion of all keys under a group + * old values will remain in storage until they expire. + */ + override bool clearGroup(string groupName) { + bool isSuccess = false; + // TODO apcu_inc(configuration.getString("prefix") ~ groupName, 1, isSuccess); + + return isSuccess; + } +} + +mixin(CacheEngineCalls!("Apcu")); diff --git a/caches/uim/caches/classes/engines/array_.dx b/caches/uim/caches/classes/engines/array_.dx new file mode 100644 index 0000000000..28c742ce52 --- /dev/null +++ b/caches/uim/caches/classes/engines/array_.dx @@ -0,0 +1,126 @@ +module uim.caches.classes.engines.array_; + +import uim.caches; + +@safe: + +/** + * Array storage engine for cache. + * Not actually a persistent cache engine. All data is only + * stored in memory for the duration of a single process. While not + * useful in production settings this engine can be useful in tests + * or console tools where you don`t want the overhead of interacting + * with a cache servers, but want the work saving properties a cache provides. + */ +class DArrayCacheEngine : DCacheEngine { + mixin(CacheEngineThis!("Array")); + + override bool initialize(Json[string] initData = null) { + if (!super.initialize(initData)) { + return false; + } + + return true; + } + + // Write data for key into cache + override bool updateKey(string itemKey, Json dataForCache, long timeToLive = 0) { + Json data = Json.emptyObject; + data.set("exp", 0); // TODO time() + duration(timeToLive)); + data.set("val", dataForCache); + _cachedData.set(internalKey(itemKey), data); + + return true; + } + + // Cached data. + // Structured as [key: [exp: expiration, val: value]] + protected Json[string] _cachedData; + // Delete a key from the cache + override bool removeKey(string key) { + string internKey = internalcorrectKey(key); + return _cachedData.removeKey(internKey); + } + + // Delete all keys from the cache. This will clear every cache config using APC. + + // Read a key from the cache + override Json read(string key, Json defaultValue = Json(null)) { + string internKey = internalcorrectKey(key); + if (!_cachedData.hasKey(internKey)) { + return defaultValue; + } + + auto value = _cachedData[internKey]; + + // Check expiration + auto checkTime = time(); + if (value.getLong("exp") <= checkTime) { + _cachedData.removeKey(internKey); + return defaultValue; + } + + return value["val"]; + } + + // Increments the value of an integer cached key + override long increment(string key, int incValue = 1) { + if (read(key).isNull) { + updateKey(key, 0); + } + auto internKey = internalcorrectKey(key); + _cachedData.set(internKey ~ ".val", _cachedData.getLong(internKey ~ ".val") + incValue); + + return _cachedData[internKey ~ ".val"]; + } + + // Decrements the value of an integer cached key + override long decrement(string key, int decValue = 1) { + if (get(key).isNull) { + set(key, 0); + } + auto internKey = internalcorrectKey(key); + _cachedData.set(internKey ~ ".val", _cachedData.getLong(internKey ~ ".val") - decValue); + + return _cachedData.get(internKey ~ ".val"); + } + + /** + * Returns the `group value` for each of the configured groups. + * If the group initial value was not found, then it initializes the group accordingly. + */ + override string[] groups() { + string[] results; + + // TODO + /* + configuration.get("groups").each!((group) { + string key = configuration.getString("prefix") ~ myGroup; + if (!_cachedData.hasKey(key)) { + _cachedData[aKey] = ["exp": D_INT_MAX, "val": 1]; + } + string myvalue = _cachedData[aKey]["val"]; + results ~= myGroup ~ myvalue; + }); + */ + return results; + } + + override bool clear() { + return _cachedData.clear; + } + /** + * Increments the group value to simulate deletion of all keys under a group + * old values will remain in storage until they expire. + */ + override bool clearGroup(string groupName) { + string key = configuration.get("prefix").toString ~ groupName; + // TODO + /* if (_cachedData.hasKey(key)) { + _cachedData[key]["val"] += 1; + } */ + return true; + } +} + +mixin(CacheEngineCalls!("Array")); diff --git a/caches/uim/caches/classes/engines/engine.d b/caches/uim/caches/classes/engines/engine.d index ae235f5b18..a60d301e84 100644 --- a/caches/uim/caches/classes/engines/engine.d +++ b/caches/uim/caches/classes/engines/engine.d @@ -7,4 +7,193 @@ import uim.caches; // Storage engine for UIM caching class DCacheEngine : UIMObject, ICacheEngine { mixin(CacheEngineThis!()); + + /* + override bool initialize(Json[string] initData = null) { + if (!super.initialize(initData)) { + return false; + } + + /** + * The default cache configuration is overridden in most cache adapters. These are + * the keys that are common to all adapters. If overridden, this property is not used. + * + * - `duration` Specify how long items in this cache configuration last. + * - `groups` List of groups or "tags" associated to every key stored in this config. + * handy for deleting a complete group from cache. + * - `prefix` Prefix appended to all entries. Good for when you need to share a keyspace + * with either another cache config or another application. + * - `warnOnWriteFailures` Some engines, such as ApcuEngine, may raise warnings on + * write failures. + * / + + if (configuration.hasKey("groups")) { + configuration.getStringArray("groups").sort; // TODO _groupPrefix = repeat("%s_", configuration.getStringArray("groups").length); + } + /* if (!configuration.isNumeric("duration")) { + // TODO configuration.set("duration", configuration.get("duration").toTime - time()); + } * / + + configuration + .setDefault("duration", 3600) + .setDefault("groups", Json.emptyArray) + .setDefault("prefix", "uim_") + .setDefault("warnOnWriteFailures", true); + return true; + } + + // Group prefixes to be prepended to every key in this cache engine + mixin(TProperty!("string", "groupName")); // #region items + // Obtains multiple cache items by their unique keys. + void items(Json[string] newItems, long timeToLive = 0) { + clear(); + updateKey(newItems.dup, timeToLive); + } + + Json[string] items(string[] keysToUse = null) { + if (keysToUse.isEmpty) { + return items(keys); + } + + Json[string] results; + keysToUse + .each!((key) { + /* if (auto item = read(key)) { + results.set(key, item); + } * / + }); + return results; + } + + string[] keys() { + return null; + } + + // Persists a set of key: value pairs in the cache, with an optional TTL. + /* bool items(Json[string] items, long timeToLive = 0) { + // TODO ensureValidType(myvalues, CHECK_KEY); + + Json restoreDuration = Json(null); + if (timeToLive != 0) { + restoreDuration = configuration.hasKey("duration"); + configuration.set("duration", timeToLive); + } + try { + return items.byKeyValue + .all!(kv => updateKey(aKey, myvalue)); + } finally { + if (restoreDuration.isNull) { + configuration.set("duration", restoreDuration); + } + } + return false; + } * / + // #region items + + // #region read + // Fetches the value for a given key from the cache. + Json[] read(string[] keys, Json defaultValue = Json(null)) { + return keys.map!(key => read(key, defaultValue)).array; + } + + Json read(string key, Json defaultValue = Json(null)) { + return Json(null); + } + + // #endregion read + + // #region update + // Persists data in the cache, uniquely referenced by the given key with an optional expiration timeToLive time. + bool updateKey(Json[string] items, long timeToLive = 0) { + return items.byKeyValue + .all!(kv => updateKey(kv.key, kv.value, timeToLive)); + } + + bool updateKey(string key, Json value, long timeToLive = 0) { + return false; + } + // #endregion update + + // Increment a number under the key and return incremented value + long increment(string key, int incValue = 1) { + return 0; + } + // Decrement a number under the key and return decremented value + long decrement(string key, int decValue = 1) { + return 0; + } + + // Merge an item (key, value) to the cache if it does not already exist. + bool merge(string key, Json value, long timeToLive = 0) { + return read(key).isNull + ? updateKey(key, value, timeToLive) : false; + } + + // #region remove + // Delete all keys from the cache + bool clear() { + return removeKey(keys); + } + + // Deletes multiple cache items as a list + bool removeKey(string[] keys) { + return keys.all!(key => removeKey(key)); + } + + // Delete a key from the cache + bool removeKey(string key) { + return false; + } + // #endregion remove + + /** + * Clears all values belonging to a group. Is up to the implementing engine + * to decide whether actually delete the keys or just simulate it to achieve the same result. + * / + abstract bool clearGroup(string groupName); + + /** + * Does whatever initialization for each group is required and returns the `group value` for each of them, + * this is the token representing each group in the cache key + * / + string[] groups() { + return configuration.getStringArray( + "groups"); + } + + /** + * Generates a key for cache backend usage. + * + * If the requested key is valid, the group prefix value and engine prefix are applied. + * Whitespace in keys will be replaced. + * / + protected string internalKey(string key) { + string prefix = groupName + ? groups().join("_").md5 : ""; + + // TODO auto changedKey = key.replaceAll.regex(r"/[\s]+/", "_"); + return configuration.getString( + "prefix") ~ prefix; // ~ changedKey; + } + + /** + * Cache Engines may trigger warnings if they encounter failures during operation, + * if option warnOnWriteFailures is set to true. + * / + protected void warning( + string warningMessage) { + if (!configuration.getBoolean( + "warnOnWriteFailures")) { + return; + } + // TODO triggerWarning(warningMessage); + } + + // Convert the various expressions of a TTL value into duration in seconds + protected long duration( + long timeToLive = 0) { + return timeToLive == 0 + ? configuration.getLong( + "duration") : timeToLive; + } */ } diff --git a/caches/uim/caches/classes/engines/file.d b/caches/uim/caches/classes/engines/file.d new file mode 100644 index 0000000000..70ae665a03 --- /dev/null +++ b/caches/uim/caches/classes/engines/file.d @@ -0,0 +1,345 @@ +/**************************************************************************************************************** +* Copyright: © 2017-2024 Ozan Nurettin Süel (aka UIManufaktur) * +* License: Subject to the terms of the Apache 2.0 license, as written in the included LICENSE.txt file. * +* Authors: Ozan Nurettin Süel (aka UIManufaktur) * +*****************************************************************************************************************/ +module uim.caches.classes.engines.file; + +import uim.caches; + +@safe: + +/** + * File Storage engine for cache. Filestorage is the slowest cache storage + * to read and write. However, it is good for servers that don"t have other storage + * engine available, or have content which is not performance sensitive. + * + * You can configure a FileEngine cache, using Cache.config() + */ +class DFileCacheEngine : DCacheEngine { + mixin(CacheEngineThis!("File")); + + override bool initialize(Json[string] initData = null) { + if (!super.initialize(initData)) { + return false; + } + + configuration + .setDefault("duration", 3600) // `duration` Specify how long items in this cache configuration last. + .setDefault("groups", Json.emptyArray) // `groups` List of groups or "tags" associated to every key stored in this config. + .setDefault("lock", true) // `lock` Used by FileCache. Should files be locked before writing to them? + .setDefault("mask", std.conv.octal!"664") // `mask` The mask used for created files + .setDefault("dirMask", std.conv.octal!"770") // `dirMask` The mask used for created folders + .setDefault("path", Json(null)) // `path` Path to where cachefiles should be saved. Defaults to system"s temp dir. + .setDefault("prefix", "uim_") // `prefix` Prepended to all entries. + .setDefault("serialize", true); // `serialize` Should cache objects be serialized first. + + /* + string path = configuration.getString("path", sys_get_temp_dir() ~ DIRECTORY_SEPARATOR ~ "uim_cache" ~ DIRECTORY_SEPARATOR); + configuration.set("path", path.subString(-1) != DIRECTORY_SEPARATOR + ? path ~ DIRECTORY_SEPARATOR + : path; + + if (_groupPrefix) { + _groupPrefix = _groupPrefix.replace("_", DIRECTORY_SEPARATOR); + } + return _active(); */ + return true; + } + +/* override long decrement(string itemKey, int decValue = 1) { + // TODO throw new DLogicException("Files cannot be atomically decremented."); + return 0; + } + + override long increment(string itemKey, int incValue = 1) { + // TODO + // throw new DLogicException("Files cannot be atomically incremented."); + return 0; + } + */ + // True unless FileEngine.__active(); fails + protected bool _init = true; + + // Instance of SplFileObject class + // TODO protected DSplFileObject _splFileObject; + + // Write data for key into cache + /* override */ + bool set(string dataId, Json cacheData, long timeToLive = 0) { + /* TODO if (cacheData is null || !_init) { + return false; + } + + auto aKey = internalKey(dataId); + + if (_setKey(aKey, true) == false) { + return false; + } + if (!configuration.isEmpty("serialize")) { + cacheData = serialize(cacheData); + } + myexpires = time() + duration(timeToLive); + mycontents = [myexpires, D_EOL, cacheData, D_EOL].join(); + + if (configuration.hasKey("lock")) { + _File.flock(LOCK_EX); + } + _File.rewind(); + mysuccess = _File.ftruncate(0) && + _File.fwrite(mycontents) && + _File.fflush(); + + if (configuration.hasKey("lock")) { + _File.flock(LOCK_UN); + } + _File = null; + + return mysuccess; */ + return false; + } + + /* + // Read a key from the cache + Json get(string dataId, Json defaultValue = Json(null)) { + auto key = internalKey(dataId); + + if (!_init || _setcorrectKey(key) == false) { + return defaultValue; + } + if (configuration.hasKey("lock")) { + _File.flock(LOCK_SH); + } + _File.rewind(); + auto mytime = time(); + auto mycachetime = to!int(_File.currentValue()); + + if (mycachetime < mytime) { + if (configuration.hasKey("lock")) { + _File.flock(LOCK_UN); + } + return defaultValue; + } + string myData = ""; + _File.next(); + while (_File.valid()) { + /** @psalm-suppress PossiblyInvalidOperand * / + myData ~= _File.currentValue(); + _File.next(); + } + if (configuration.hasKey("lock")) { + _File.flock(LOCK_UN); + } + myData = myData.strip; + + if (myData != "" && !configuration.isEMpty("serialize")) { + myData = unserialize(myData); + } + return myData; + } + + // Delete a key from the cache + override bool removeKey(string dataId) { + auto key = internalKey(dataId); + + if (_setcorrectKey(key) == false || !_init) { + return false; + } + auto mypath = _File.getRealPath(); + removeKey(_File); + + return mypath.isEmpty + ? false + : @unlink(mypath) ; + } + + // Delete all values from the cache + bool clear() { + if (!_init) { + return false; + } + removeKey(_File); + + _clearDirectory(configuration.get("path"]); + + mydirectory = new DRecursiveDirectoryIterator( + configuration.get("path"], + FilesystemIterator.SKIP_DOTS + ); + /** @var \RecursiveDirectoryIterator<\DFileInfo> myiterator Coerce for Dstan/psalm * / + auto myIterator = new DRecursiveIteratorIterator( + mydirectory, + RecursiveIteratorIterator.SELF_FIRST + ); + + string[] mycleared; + myiterator.each!((myfileInfo) { + if (myfileInfo.isFile()) { + removeKey(myfileInfo); + continue; + } + + auto myrealPath = myfileInfo.getRealPath(); + if (!myrealPath) { + removeKey(myfileInfo); + continue; + } + + string mypath = myrealPath ~ DIRECTORY_SEPARATOR; + if (!mycleared.has(mypath)) { + _clearDirectory(mypath); + mycleared ~= mypath; + } + // possible inner iterators need to be unset too in order for locks on parents to be released + removeKey(myfileInfo); + }); + // unsetting iterators helps releasing possible locks in certain environments, + // which could otherwise make `rmdir()` fail + removeKey(mydirectory, myiterator); + + return true; + } + + // Used to clear a directory of matching files. + protected void _clearDirectory(string pathToSearch) { + if (!isDir(pathToSearch)) { + return; + } + + auto mydir = dir(pathToSearch); + if (!mydir) { + return; + } + myprefixLength = configuration.get("prefix").length; + + while ((myentry = mydir.read()) == true) { + if (subString(myentry, 0, myprefixLength) != configuration.get("prefix")) { + continue; + } + try { + myfile = new DSplFileObject(mypath ~ myentry, "r"); + } catch (Exception) { + continue; + } + if (myfile.isFile()) { + myfilePath = myfile.getRealPath(); + removeKey(myfile); + } + } + mydir.close(); + } + + /** + * Sets the current cache key this class is managing, and creates a writable SplFileObject + * for the cache file the key is referring to. + */ + /* protected bool _setKey(string key, bool createKeyIfNotExists = false) { + mygroups = null; + if (_groupPrefix) { + mygroups = vsprintf(_groupPrefix, this.groups()); + } + mydir = configuration.getString("path") ~ mygroups; + + if (!isDir(mydir)) { + mkdir(mydir, configuration.get("dirMask"), true); + } + mypath = new DFileInfo(mydir ~ key); + + if (!createKeyIfNotExists && !mypath.isFile()) { + return false; + } + /** @psalm-suppress TypeDoesNotContainType * / + if ( + _File is null || + _File.getBasename() != key || + _File.valid() == false + ) { + myexists = isFile(mypath.getPathname()); + try { + _File = mypath.openFile("c+"); + } catch (Exception exception) { + trigger_error(exception.message(), ERRORS.USER_WARNING); + + return false; + } + mypath = null; + + if (!myexists && !chmod(_File.getPathname(), configuration.getLong("mask"])) { + trigger_error( + "Could not apply permission mask `%s` on cache file `%s`" + .format(_File.getPathname(), + configuration.get("mask"] + ), ERRORS.USER_WARNING); + } + } + return true; + } */ + + // Determine if cache directory is writable + /* protected bool _active() { + mydir = new DFileInfo(configuration.get("path"]); + mypath = mydir.getPathname(); + mysuccess = true; + if (!isDir(mypath)) { + mysuccess = @mkdir(mypath, configuration.get("dirMask"], true) ; + } + myisWritableDir = (mydir.isDir() && mydir.isWritable()); + if (!mysuccess || (_init && !myisWritableDir)) { + _init = false; + trigger_error("%s is not writable" + .format(configuration.get("path"] + ), ERRORS.USER_WARNING); + } + return mysuccess; + } */ + + /* override */ protected string internalKey(string key) { + // auto newKey = super.internalcorrectKey(key); + /* return rawUrlEncode(newKey); */ + return null; + } + + // Recursively deletes all files under any directory named as mygroup + /* override */ bool clearGroup(string groupName) { + // TODO + /* removeKey(_File); + + string myprefix = configuration.getString("prefix"); + + DRecursiveDirectoryIterator mydirectoryIterator = new DRecursiveDirectoryIterator( + configuration.get("path")); + DRecursiveIteratorIterator mycontents = new DRecursiveIteratorIterator( + mydirectoryIterator, + RecursiveIteratorIterator.CHILD_FIRST + ); + // TODO + /* + DFileInfo[] myfiltered = new DCallbackFilterIterator( + mycontents, + auto(DFileInfo mycurrent) use(groupName, myprefix) { + if (!mycurrent.isFile()) { + return false;} + myhasPrefix = myprefix is null || str_starts_with(mycurrent.getBasename(), myprefix); + return myhasPrefix + ? mycurrent.getPathname() + .has( + DIRECTORY_SEPARATOR ~ groupName ~ DIRECTORY_SEPARATOR + ) : false;} + + ); + + myfiltered.each!((obj) { + auto mypath = obj.getPathName(); removeKey(obj); @unlink(mypath) ; + }); + // unsetting iterators helps releasing possible locks in certain environments, + // which could otherwise make `rmdir()` fail + removeKey(mydirectoryIterator, mycontents, myfiltered); + + return true; + } + } */ + return false; + } +} + +mixin(CacheEngineCalls!("File")); diff --git a/caches/uim/caches/classes/engines/file.dx b/caches/uim/caches/classes/engines/file.dx new file mode 100644 index 0000000000..39b1c16897 --- /dev/null +++ b/caches/uim/caches/classes/engines/file.dx @@ -0,0 +1,345 @@ +module uim.caches.classes.engines.file; + +import uim.caches; + +@safe: + +/** + * File Storage engine for cache. Filestorage is the slowest cache storage + * to read and write. However, it is good for servers that don"t have other storage + * engine available, or have content which is not performance sensitive. + * + * You can configure a FileEngine cache, using Cache.config() + */ +class DFileCacheEngine : DCacheEngine { + mixin(CacheEngineThis!("File")); + + override bool initialize(Json[string] initData = null) { + if (!super.initialize(initData)) { + return false; + } + + configuration + // `duration` Specify how long items in this cache configuration last. + .setDefault("duration", 3600) + // `groups` List of groups or "tags" associated to every key stored in this config. + .setDefault("groups", Json.emptyArray) + // `lock` Used by FileCache. Should files be locked before writing to them? + .setDefault("lock", true) + // `mask` The mask used for created files + // TODO "mask": std.conv.octal!"664") + // `dirMask` The mask used for created folders + // TODO "dirMask": std.conv.octal!"770") + // `path` Path to where cachefiles should be saved. Defaults to system"s temp dir. + .setDefault("path", Json(null)) + // `prefix` Prepended to all entries. + .setDefault("prefix", "uim_") + // `serialize` Should cache objects be serialized first. + .setDefault("serialize", true) + + /* + configuration.get("path"] = configuration.get("path", sys_get_temp_dir()~DIRECTORY_SEPARATOR ~ "uim_cache" ~ DIRECTORY_SEPARATOR); + if (subString(configuration.get("path"], -1) != DIRECTORY_SEPARATOR) { + configuration.get("path").concat( DIRECTORY_SEPARATOR; + } + if (_groupPrefix) { + _groupPrefix = _groupPrefix.replace("_", DIRECTORY_SEPARATOR); + } + return _active(); */ + return true; + } + + override long decrement(string itemKey, int decValue = 1) { + // TODO throw new DLogicException("Files cannot be atomically decremented."); + return 0; + } + + override long increment(string itemKey, int incValue = 1) { + // TODO + // throw new DLogicException("Files cannot be atomically incremented."); + return 0; + } + + // True unless FileEngine.__active(); fails + protected bool _init = true; + + // Instance of SplFileObject class + // TODO protected DSplFileObject _splFileObject; + + // Write data for key into cache + override bool set(string dataId, Json cacheData, long timeToLive = 0) { + /* TODO if (cacheData is null || !_init) { + return false; + } + + auto aKey = internalKey(dataId); + + if (_setKey(aKey, true) == false) { + return false; + } + if (!configuration.isEmpty("serialize")) { + cacheData = serialize(cacheData); + } + myexpires = time() + duration(timeToLive); + mycontents = [myexpires, D_EOL, cacheData, D_EOL].join(); + + if (configuration.hasKey("lock")) { + _File.flock(LOCK_EX); + } + _File.rewind(); + mysuccess = _File.ftruncate(0) && + _File.fwrite(mycontents) && + _File.fflush(); + + if (configuration.hasKey("lock")) { + _File.flock(LOCK_UN); + } + _File = null; + + return mysuccess; */ + return false; + } + + /* + // Read a key from the cache + Json get(string dataId, Json defaultValue = Json(null)) { + auto key = internalKey(dataId); + + if (!_init || _setcorrectKey(key) == false) { + return defaultValue; + } + if (configuration.hasKey("lock")) { + _File.flock(LOCK_SH); + } + _File.rewind(); + mytime = time(); + mycachetime = to!int(_File.currentValue()); + + if (mycachetime < mytime) { + if (configuration.hasKey("lock")) { + _File.flock(LOCK_UN); + } + return defaultValue; + } + string myData = ""; + _File.next(); + while (_File.valid()) { + /** @psalm-suppress PossiblyInvalidOperand * / + myData ~= _File.currentValue(); + _File.next(); + } + if (configuration.hasKey("lock")) { + _File.flock(LOCK_UN); + } + myData = strip(myData); + + if (myData != "" && !configuration.isEMpty("serialize")) { + myData = unserialize(myData); + } + return myData; + } + + // Delete a key from the cache + override bool removeKey(string dataId) { + auto key = internalKey(dataId); + + if (_setcorrectKey(key) == false || !_init) { + return false; + } + auto mypath = _File.getRealPath(); + removeKey(_File); + + return mypath.isEmpty + ? false + : @unlink(mypath) ; + } + + // Delete all values from the cache + bool clear() { + if (!_init) { + return false; + } + removeKey(_File); + + _clearDirectory(configuration.get("path"]); + + mydirectory = new DRecursiveDirectoryIterator( + configuration.get("path"], + FilesystemIterator.SKIP_DOTS + ); + /** @var \RecursiveDirectoryIterator<\DFileInfo> myiterator Coerce for Dstan/psalm * / + auto myIterator = new DRecursiveIteratorIterator( + mydirectory, + RecursiveIteratorIterator.SELF_FIRST + ); + + string[] mycleared; + myiterator.each!((myfileInfo) { + if (myfileInfo.isFile()) { + removeKey(myfileInfo); + continue; + } + + auto myrealPath = myfileInfo.getRealPath(); + if (!myrealPath) { + removeKey(myfileInfo); + continue; + } + + string mypath = myrealPath ~ DIRECTORY_SEPARATOR; + if (!mycleared.has(mypath)) { + _clearDirectory(mypath); + mycleared ~= mypath; + } + // possible inner iterators need to be unset too in order for locks on parents to be released + removeKey(myfileInfo); + }); + // unsetting iterators helps releasing possible locks in certain environments, + // which could otherwise make `rmdir()` fail + removeKey(mydirectory, myiterator); + + return true; + } + + // Used to clear a directory of matching files. + protected void _clearDirectory(string pathToSearch) { + if (!isDir(pathToSearch)) { + return; + } + + auto mydir = dir(pathToSearch); + if (!mydir) { + return; + } + myprefixLength = configuration.get("prefix").length; + + while ((myentry = mydir.read()) == true) { + if (subString(myentry, 0, myprefixLength) != configuration.get("prefix")) { + continue; + } + try { + myfile = new DSplFileObject(mypath ~ myentry, "r"); + } catch (Exception) { + continue; + } + if (myfile.isFile()) { + myfilePath = myfile.getRealPath(); + removeKey(myfile); + } + } + mydir.close(); + } + + /** + * Sets the current cache key this class is managing, and creates a writable SplFileObject + * for the cache file the key is referring to. + */ + /* protected bool _setKey(string key, bool createKeyIfNotExists = false) { + mygroups = null; + if (_groupPrefix) { + mygroups = vsprintf(_groupPrefix, this.groups()); + } + mydir = configuration.getString("path") ~ mygroups; + + if (!isDir(mydir)) { + mkdir(mydir, configuration.get("dirMask"), true); + } + mypath = new DFileInfo(mydir ~ key); + + if (!createKeyIfNotExists && !mypath.isFile()) { + return false; + } + /** @psalm-suppress TypeDoesNotContainType * / + if ( + !_File !is null) || + _File.getBasename() != key || + _File.valid() == false + ) { + myexists = isFile(mypath.getPathname()); + try { + _File = mypath.openFile("c+"); + } catch (Exception exception) { + trigger_error(exception.message(), ERRORS.USER_WARNING); + + return false; + } + mypath = null; + + if (!myexists && !chmod(_File.getPathname(), configuration.getLong("mask"])) { + trigger_error( + "Could not apply permission mask `%s` on cache file `%s`" + .format(_File.getPathname(), + configuration.get("mask"] + ), ERRORS.USER_WARNING); + } + } + return true; + } */ + + // Determine if cache directory is writable + /* protected bool _active() { + auto mydir = new DFileInfo(configuration.get("path")); + auto mypath = mydir.getPathname(); + auto mysuccess = true; + if (!isDir(mypath)) { + mysuccess = @mkdir(mypath, configuration.get("dirMask"), true) ; + } + myisWritableDir = (mydir.isDir() && mydir.isWritable()); + if (!mysuccess || (_init && !myisWritableDir)) { + _init = false; + trigger_error("%s is not writable" + .format(configuration.getString("path"), ERRORS.USER_WARNING)); + } + return mysuccess; + } */ + + override protected string internalKey(string key) { + auto newKey = super.internalcorrectKey(key); + + return rawUrlEncode(newKey); + } + + // Recursively deletes all files under any directory named as mygroup + override bool clearGroup(string groupName) { + // TODO + /* removeKey(_File); + + string myprefix = configuration.getString("prefix"); + + DRecursiveDirectoryIterator mydirectoryIterator = new DRecursiveDirectoryIterator( + configuration.get("path")); + DRecursiveIteratorIterator mycontents = new DRecursiveIteratorIterator( + mydirectoryIterator, + RecursiveIteratorIterator.CHILD_FIRST + ); + // TODO + /* + DFileInfo[] myfiltered = new DCallbackFilterIterator( + mycontents, + auto(DFileInfo mycurrent) use(groupName, myprefix) { + if (!mycurrent.isFile()) { + return false;} + myhasPrefix = myprefix is null || str_starts_with(mycurrent.getBasename(), myprefix); + return myhasPrefix + ? mycurrent.getPathname() + .has( + DIRECTORY_SEPARATOR ~ groupName ~ DIRECTORY_SEPARATOR + ) : false;} + + ); + + myfiltered.each!((obj) { + auto mypath = obj.getPathName(); removeKey(obj); @unlink(mypath) ; + }); + // unsetting iterators helps releasing possible locks in certain environments, + // which could otherwise make `rmdir()` fail + removeKey(mydirectoryIterator, mycontents, myfiltered); + + return true; + } + } */ + return false; + } +} + +mixin(CacheEngineCalls!("File")); diff --git a/caches/uim/caches/classes/engines/memory.d b/caches/uim/caches/classes/engines/memory.d new file mode 100644 index 0000000000..8637535043 --- /dev/null +++ b/caches/uim/caches/classes/engines/memory.d @@ -0,0 +1,345 @@ +/**************************************************************************************************************** +* Copyright: © 2017-2024 Ozan Nurettin Süel (aka UIManufaktur) * +* License: Subject to the terms of the Apache 2.0 license, as written in the included LICENSE.txt file. * +* Authors: Ozan Nurettin Süel (aka UIManufaktur) * +*****************************************************************************************************************/ +module uim.caches.classes.engines.memory; + +import uim.caches; + +@safe: + +/** + * Memory storage engine for cache. Memory has some limitations in the amount of + * control you have over expire times far in the future. See MemoryEngine.write() for + * more information. + * + * Memory engine supports binary protocol and igbinary + * serialization (if memcached extension is compiled with --enable-igbinary). + * Compressed keys can also be incremented/decremented. + */ +class DMemoryCacheEngine : DCacheEngine { + mixin(CacheEngineThis!("Memory")); + + override bool initialize(Json[string] initData = null) { + if (!super.initialize(initData)) { + return false; + } + /** + * The default config used unless overridden by runtime configuration + * + * - `compress` Whether to compress data + * - `duration` Specify how long items in this cache configuration last. + * - `groups` List of groups or 'tags' associated to every key stored in this config. + * handy for deleting a complete group from cache. + * - `username` Login to access the Memcache server + * - `password` Password to access the Memcache server + * - `persistent` The name of the persistent connection. All configurations using + * the same persistent value will share a single underlying connection. + * - `prefix` Prepended to all entries. Good for when you need to share a keyspace + * with either another cache config or another application. + * - `serialize` The serializer engine used to serialize data. Available engines are 'D", + * 'igbinary' and 'Json'. Beside 'D", the memcached extension must be compiled with the + * appropriate serializer support. + * Use the \Memory.OPT_* constants as keys. + */ + configuration + .setDefault("compress", false) + .setDefault("duration", 3600) + .setDefault("username", "") + .setDefaults(["host", "password", "persistent", "port"], Json(null)) + .setDefault("prefix", "uim_") + .setDefault("serialize", "d") + .setDefault("servers", ["127.0.0.1"].toJson) // `servers` String or array of memcached servers. If an array MemcacheEngine will use them as a pool. + .setDefaults(["groups", "options"], Json.emptyArray); // `options` - Additional options for the memcached client. Should be an array of option: value. + + return true; + } + + // List of available serializer engines + // Memory must be compiled with Json and igbinary support to use these engines +/* protected int[string] _serializers; + + protected string[] _compiledGroupNames; + + protected Json[string] _memory; + /* protected DMemory _memcached;* / + + override string[] keys() { + // TODO wrong these are the internal Keys + return _memory.keys; + } + */ + /** + * Initialize the Cache Engine + * + * Called automatically by the cache frontend + * / + + if (!extension_loaded("memcached")) { + throw new UIMException("The `memcached` extension must be enabled to use MemoryEngine."); + } + /* _serializers = [ + "igbinary": Memory: : SERIALIZER_IGBINARY, + "Json": Memory: : SERIALIZER_Json, + "d": Memory: : SERIALIZER_D, + ]; */ + /* + if (defined("Memory.HAVE_MSGPACK")) { + // TODO _serializers["msgpack"] = Memory.SERIALIZER_MSGPACK; + } + super.initialize(initData); + + if (!configuration.isEmpty("host")) { + configuration.set("servers", configuration.isEmpty("port") + ? [configuration.get("host")] + : ["%s:%d".format(configuration.getString("host"), configuration.getString("port")) + ); + } + /* if (configData.hasKey("servers")) { + configuration.set("servers", configuration.get("servers"], false); + } */ + /* if (!configuration.isArray("servers")) { + configuration.set("servers", [configuration.getArray("servers")]); + } * / + if (!_memory is null) { + return true; + } + // _memory = configuration.get("persistent"] + // TODO ? new DMemory(configuration.get("persistent"]) : new DMemory(); + + } + + _setOptions(); + + string[] serversFromConfig = configuration.get("servers"); + if (auto servers = _memory.getServerList()) { + if (_memory.isPersistent()) { + servers + .filter!(server => !server.getString("host") ~ ": " ~ server.getString("port").isIn(serversFromConfig)) + .each!(server => throw new DInvalidArgumentException( + "Invalid cache configuration. Multiple persistent cache configurations are detected" ~ + " with different `servers` values. `servers` values for persistent cache configurations" ~ + " must be the same when using the same persistence id." + )); + } + } + return true; + } + serversFromConfig + .map!(server => parseServerString(server)) + .array; +} +if (!_memory.addServers(myservers)) { + return false; +} + +if (configuration.isArray("options"]) { + configuration.get("options"].byKeyValue + .each!(optValue => _memory.setOption(optValue.key, optValue.value)); +} +if (configuration.isEmpty("username"] && !configuration.isEmpty("login")) { + throw new DInvalidArgumentException( + "Please pass " username" instead of 'login' for connecting to Memory" + ); +} +if (configuration.hasKeys("username", "password")) { + if (!hasMethod(_memory, "setSaslAuthData")) { + throw new DInvalidArgumentException( + "Memory extension is not built with SASL support" + ); + } + _memory.setOption(Memory.OPT_BINARY_PROTOCOL, true); + _memory.setSaslAuthData( + configuration.getString("username"), + configuration.getString("password") + ); +} +return true; +} + +/** + * Settings the memcached instance + * When the Memory extension is not built + * with the desired serializer engine. + * / + protected void _setOptions() { + // _memory.setOption(Memory.OPT_LIBKETAMA_COMPATIBLE, true); + + string myserializer = configuration.getString("serialize").lower; + if (!_serializers.hasKey(myserializer)) { + throw new DInvalidArgumentException( + "`%s` is not a valid serializer engine for Memory.".format(myserializer) + ); + } + /* if (myserializer != "d" && !constant("Memory.HAVE_" ~ myserializer.upper)) { + throw new DInvalidArgumentException( + "Memory extension is not compiled with `%s` support.".format(myserializer) + ); * / + } + + /* _memory.setOption( + Memory.OPT_SERIALIZER, + _serializers[myserializer] + ); */ + // Check for Amazon ElastiCache instance + /* if ( + defined("Memory.OPT_CLIENT_MODE") && + defined("Memory.DYNAMIC_CLIENT_MODE") + ) { + _memory.setOption(Memory.OPT_CLIENT_MODE, Memory.DYNAMIC_CLIENT_MODE); + } */ + + /* _memory.setOption( + Memory.OPT_COMPRESSION, + configuration.getBoolean("compress") + ); +}*/ + + /** + * Parses the server address into the host/port. Handles both IPv6 and IPv4 + * addresses and Unix sockets + * Params: + * string myserver The server address string. + */ + /* Json[string] parseServerString(string myserver) { + auto mysocketTransport = "unix://"; + /* if (myserver.startsWith(mysocketTransport)) { + return [subString(myserver, mysocketTransport.length), 0]; + } * / + + /* size_t myposition; + if (myserver.startsWith("[")) { + size_t myposition = indexOf(myserver, "]:"); + if (myposition == true) { + myposition++; + } + } else { + myposition = indexOf(myserver, ": "); + } + auto myport = 11211; + auto myhost = myserver; + if (myposition == true) { + myhost = subString(myserver, 0, myposition); + myport = subString(myserver, myposition + 1); + } + return [ + myhost, /* (int) * / myport + ]; + } */ + + /** + * Read an option value from the memcached connection. + * Params: + * int myname The option name to read. + */ + /* Json getOption(string myname) { + return _memory.get(myname); + } */ + + /** + * Write data for key into cache. When using memcached as your cache engine + * remember that the Memory pecl extension does not support cache expiry + * times greater than 30 days in the future. Any duration greater than 30 days + * will be treated as real Unix time value rather than an offset from current time. + * / + override bool updateKey(string itemKey, Json dataToCache, long timeToLive = 0) { + return false; + // TODO + // return _memory.set(internalKey(itemKey), dataToCache, duration(timeToLive)); + } + + override bool merge(Json[string] items, long timeToLive = 0) { + Json[string] cacheData = null; + /* items.byKeyValue + .each!(kv => cacheData.set(internalKey(kv.key), kv.value)); * / + // TODOreturn _memory.merge(cacheData, duration(timeToLive)); + return false; + } + + // Write many cache entries to the cache at once + /* override bool updateKey(Json[string] items, long timeToLive = 0) { + Json[string] cacheData = null; + items.byKeyValue + .each!(kv => cacheData[internalKey(kv.key)] = kv.value); + return _memory.set(cacheData); //, duration(timeToLive)); + } * / + + // Read a key from the cache + override Json read(string key, Json defaultValue = Json(null)) { + // string internKey = internalcorrectKey(key); + // TODO auto myvalue = _memory.get(internKey); + /* return _memory.getResultCode() == Memory.RES_NOTFOUND + ? defaultValue : myvalue; * / + return Json(null); + } + + // Increments the value of an integer cached key + override long increment(string key, int incValue = 1) { + return 1; + // TODO return _memory.set(internalcorrectKey(key), _memory.getLong(internalcorrectKey(key)) + incValue); + } + + // Decrements the value of an integer cached key + override long decrement(string key, int decValue = 1) { + return 0; + // TODO return _memory.set(internalcorrectKey(key), _memory.getLong(internalcorrectKey(key)) - decValue); + } + + // Delete a key from the cache + override bool removeKey(string key) { + // return _memory.removeKey(internalcorrectKey(key)); + return false; + } + + // Delete all keys from the cache + override bool clear() { + string prefix = configuration.getString("prefix"); + /* _memory.getAllKeys() + .filter!(key => key.startsWith(prefix)) + .each!(key => _memory.removeKey(key)); * / + return true; + } + + // Add a key to the cache if it does not already exist. + /* override bool merge(string key, Json value, long timeToLive = 0) { + auto internKey = internalcorrectKey(key); + return _memory.add(internKey, value, duration); + } */ + + /** + * Returns the `group value` for each of the configured groups + * If the group initial value was not found, then it initializes the group accordingly. + * / + override string[] groups() { + if (_compiledGroupNames.isEmpty) { + _compiledGroupNames = configuration.getStringArray("groups") + .map!(group => configuration.getString("prefix") ~ group).array; + } + + /* + auto mygroups = _memory.data(_compiledGroupNames) ? memory.data( + _compiledGroupNames) : null; + if (count(mygroups) != count(configuration.get("groups"))) { + _compiledGroupNames + .filter!(groupName => !mygroups.hasKey(groupName)) + .each!((groupName) { _memory.set(mygroup, 1, 0); mygroups[mygroup] = 1; }); */ + /* ksort(mygroups); * / + } * / + + // auto groupValues = mygroups.values; + string[] result; // = configuration.getArray("groups").map!((index, group) => group ~ groupValues[index].getString).array; + return result; + } + + /** + * Increments the group value to simulate deletion of all keys under a group + * old values will remain in storage until they expire. + * / + override bool clearGroup(string groupName) { + // TODO return /* (bool) * / _memory.increment(configuration.getString("prefix") ~ groupName); + return false; + } */ +} + +mixin(CacheEngineCalls!("Memory")); diff --git a/caches/uim/caches/classes/engines/null_.d b/caches/uim/caches/classes/engines/null_.d index 3768022283..31ebb532a4 100644 --- a/caches/uim/caches/classes/engines/null_.d +++ b/caches/uim/caches/classes/engines/null_.d @@ -14,6 +14,28 @@ class DNullCacheEngine : DCacheEngine { override bool updateKey(string key, Json valueToSet, long timeToLive = 0) { return true; - } */ + } + + override Json read(string itemKey, Json defaultValue = null) { + return defaultValue; + } + + override long increment(string itemKey, int incValue = 1) { + return 1; + } + + override long decrement(string itemKey, int decValue = 1) { + return 1; + } + + override bool removeKey(string key) { + return true; + } + + override bool clearGroup(string groupName) { + return true; + } + */ + } mixin(CacheEngineCalls!("Null")); diff --git a/caches/uim/caches/classes/engines/package.d b/caches/uim/caches/classes/engines/package.d index 75c9f95784..b767bf17d8 100644 --- a/caches/uim/caches/classes/engines/package.d +++ b/caches/uim/caches/classes/engines/package.d @@ -1,7 +1,16 @@ module uim.caches.classes.engines; -public { +public { // Main import uim.caches.classes.engines.engine; - import uim.caches.classes.engines.null_; import uim.caches.classes.engines.registry; -} \ No newline at end of file +} + +public { // Additional + /* import uim.caches.classes.engines.apcu; */ + /* import uim.caches.classes.engines.array_; */ + /* import uim.caches.classes.engines.file; */ + import uim.caches.classes.engines.memory; + import uim.caches.classes.engines.null_; + + /* import uim.caches.classes.engines.redis; */ +} diff --git a/caches/uim/caches/classes/engines/redis.dx b/caches/uim/caches/classes/engines/redis.dx new file mode 100644 index 0000000000..a4d29bfad3 --- /dev/null +++ b/caches/uim/caches/classes/engines/redis.dx @@ -0,0 +1,278 @@ +module uim.caches.classes.engines.redis; + +import uim.caches; + +@safe: +// Redis storage engine for cache. +class DRedisCacheEngine : DCacheEngine { + mixin(CacheEngineThis!("Redis")); + + override bool initialize(Json[string] initData = null) { + if (!super.initialize(initData)) { + return false; + } + + /** + * The default config used unless overridden by runtime configuration + * + * - `database` database number to use for connection. + * - `duration` Specify how long items in this cache configuration last. + * - `groups` List of groups or 'tags' associated to every key stored in this config. + * handy for deleting a complete group from cache. + * - `password` Redis server password. + * - `persistent` Connect to the Redis server with a persistent connection + * - `port` port number to the Redis server. + * - `prefix` Prefix appended to all entries. Good for when you need to share a keyspace + * with either another cache config or another application. + * - `scanCount` Number of keys to ask for each scan (default: 10) + * - `server` URL or IP to the Redis server host. + * - `timeout` timeout in seconds (float). + * - `unix_socket` Path to the unix socket file (default: false) + * + */ + + // TODO + /* if (!extension_loaded("redis")) { + throw new UIMException("The `redis` extension must be enabled to use RedisEngine."); + } */ + + if (Json host = initData.getJson("host")) { + initData["server"] = host; + } + + configuration + .setDefault("database", 0) + .setDefault("duration", 3600) + .setDefault("groups", Json.emptyArray) + .setDefault("persistent", true) + .setDefault("port", 6379) + .setDefault("prefix", "uim_") + .setDefault("host", Json(null)) + .setDefault("server", "127.0.0.1") + .setDefault("timeout", 0) + .setDefault(["password", "unix_socket"], false) + .setDefault("scanCount", 10); + + return true; + // TODO return _connect(); + } + + // Redis wrapper. + // TODO protected DRedis _redis; + + // Connects to a Redis server + protected bool _connect() { + bool result; + // TODO + /* try { + _redis = new DRedis(); + if (!configuration.isEmpty("unix_socket")) { + result = _redis.connect(configuration.get("unix_socket")); + } else if (configuration.isEmpty("persistent")) { + result = _redis.connect( + configuration.get("server"), + configuration.getLong("port"), + configuration.getLong("timeout") + ); + } else { + persistentId = configuration.getString("port") ~ configuration.getString("timeout") ~ configuration.getString("database"); + result = _redis.pconnect( + configuration.get("server"), + configuration.getLong("port"), + configuration.getLong("timeout"), + persistentId + ); + } + } catch (RedisException anException) { + if (class_hasKey(Log.classname)) { + Log.error("RedisEngine could not connect. Got error: " ~ anException.message()); + } + return false; + } + if (result && configuration.hasKey("password")) { + result = _redis.auth(configuration.getString("password")); + } + if (result) { + result = _redis.select(configuration.getLong("database")); + } */ + return result; + } + + // Write data for key into cache. + override bool updateKey(string key, Json dataToCache, long timeToLive = 0) { + auto internKey = internalcorrectKey(key); + auto serializedData = serialize(dataToCache); + + auto myDuration = duration(timeToLive); + return myDuration == 0 + ? _redis.set(internKey, serializedData) + : _redis.setEx(internKey, myDuration, serializedData); + } + + // Read a key from the cache + override Json read(string key, Json defaultValue = Json(null)) { + auto internKey = internalcorrectKey(key); + auto value = _redis.get(internKey); + return value.isNull + ? defaultValue + : _unserialize(value); + } + + // Increments the value of an integer cached key & update the expiry time + override long increment(string key, int incOffset = 1) { + auto aKey = internalKey(itemKey); + auto aDuration = configuration.getLong("duration"); + + /* + auto aValue = _redis.incrBy(aKey, incOffset); + if (aDuration > 0) { + _redis.expire(aKey, aDuration); + } */ + return 0; // aValue; + } + + // Decrements the value of an integer cached key & update the expiry time + override long decrement(string itemKey, int decValue = 1) { + auto aDuration = configuration.get("duration"); + auto aKey = internalKey(itemKey); + + auto aValue = _redis.decrBy(aKey, decValue); + if (aDuration > 0) { + _redis.expire(aKey, aDuration); + } + return aValue; + } + + // Delete a key from the cache + override bool removeKey(string key) { + auto key = internalKey(dataIdentifier); + return _redis.del(key) > 0; + } + + /** + * Delete a key from the cache asynchronously + * Just unlink a key from the cache. The actual removal will happen later asynchronously. + */ + /* override */bool deleteAsync(string dataIdentifier) { + auto key = internalKey(dataId); + return _redis.unlink(key) > 0; + } + + // Delete all keys from the cache + override bool clear() { + _redis.setOption(Redis.OPT_SCAN, to!string(Redis.SCAN_RETRY)); + + // TODO + /* override bool isAllDeleted = true; + auto anIterator = null; + auto somePattern = configuration.getString("prefix") ~ "*"; + + while (true) { + auto someKeys = _redis.scan(anIterator, somePattern, configuration.getLong("scanCount")); + + if (someKeys == false) { + break; + } + someKeys.each!((key) { + isDeleted = (_redis.del(aKey) > 0); + isAllDeleted = isAllDeleted && isDeleted; + }); + } */ + + return isAllDeleted; + } + + /** + * Delete all keys from the cache by a blocking operation + * + * Faster than clear() using unlink method. + */ + // TODO + /* + override bool clearBlocking() { + _redis.setOption(Redis.OPT_SCAN, /* (string) * /Redis.SCAN_RETRY); + + override bool isAllDeleted = true; + auto anIterator = null; + string somePattern = configuration.getString("prefix") ~ "*"; + while (true) { + auto someKeys = _redis.scan(anIterator, somePattern, configuration.getLong("scanCount")); + + if (someKeys == false) { + break; + } + someKeys.each!((key) { + override bool isDeleted = (_redis.unlink(key) > 0); + isAllDeleted = isAllDeleted && isDeleted; + }); + } + return isAllDeleted; + }*/ + + /** + * Write data for key into cache if it doesn`t exist already. + * If it already exists, it fails and returns false. + */ + override bool add(string dataId, Json dataToCache) { + auto aDuration = configuration.get("duration"); + auto aKey = internalKey(dataId); + auto aValue = serialize(dataToCache); + + return false; // TODO (_redis.set(aKey, aValue, ["nx", "ex": aDuration])); + } + + /** + * Returns the `group value` for each of the configured groups + * If the group initial value was not found, then it initializes the group accordingly. + */ + override string[] groups() { + string[] result; + configuration.get("groups").each!((group) { + auto aValue = _redis.get(configuration.getString("prefix") ~ group); + if (!aValue) { + aValue = serialize(1); + _redis.set(configuration.getString("prefix") ~ group, aValue); + } + result ~= anGroup ~ aValue; + }); + + return result; + } + + /** + * Increments the group value to simulate deletion of all keys under a group + * old values will remain in storage until they expire. + */ + override bool clearGroup(string groupName) { + return /* (bool) */_redis.incr(configuration.getString("prefix") ~ groupName); + } + + /** + * Serialize value for saving to Redis. + * + * This is needed instead of using Redis' in built serialization feature + * as it creates problems incrementing/decrementing intially set integer value. + */ + protected string serialize(Json valueToSerialize) { + return isInt(valueToSerialize) + ? to!string(valueToSerialize) + : serialize(valueToSerialize); + } + + // Unserialize string value fetched from Redis. + protected Json unserialize(string valueToUnserialize) { + return Json(null); + /* + return preg_match("/^[-]?\d+$/", valueToUnserialize) + ? /* (int) * / valueToUnserialize + : unserialize(valueToUnserialize); */ + } + + // Disconnects from the redis server + auto __destruct() { + if (configuration.isEmpty("persistent")) { + _redis.close(); + } + } +} +mixin(CacheEngineCalls!("Redis")); diff --git a/caches/uim/caches/classes/engines/registry.d b/caches/uim/caches/classes/engines/registry.d index 7ecdc5d99d..498bae4d72 100644 --- a/caches/uim/caches/classes/engines/registry.d +++ b/caches/uim/caches/classes/engines/registry.d @@ -13,6 +13,6 @@ import uim.caches; class DCacheEngineRegistry : DObjectRegistry!DCacheEngine { } -auto CacheEngineRegistry() { // Singleton +auto CacheEngineRegistration() { // Singleton return DCacheEngineRegistry.registration; } diff --git a/caches/uim/caches/helpers/cache.d b/caches/uim/caches/helpers/cache.d new file mode 100644 index 0000000000..2e7817c618 --- /dev/null +++ b/caches/uim/caches/helpers/cache.d @@ -0,0 +1,16 @@ +/**************************************************************************************************************** +* Copyright: © 2017-2024 Ozan Nurettin Süel (aka UIManufaktur) * +* License: Subject to the terms of the Apache 2.0 license, as written in the included LICENSE.txt file. * +* Authors: Ozan Nurettin Süel (aka UIManufaktur) * +*****************************************************************************************************************/ +module uim.caches.helpers.cache; + +import uim.caches; + +bool isCache(Object instance) { + if (cast(ICache) instance) { + return true; + } + + return false; +} diff --git a/caches/uim/caches/helpers/engine.d b/caches/uim/caches/helpers/engine.d new file mode 100644 index 0000000000..527f014db4 --- /dev/null +++ b/caches/uim/caches/helpers/engine.d @@ -0,0 +1,16 @@ +/**************************************************************************************************************** +* Copyright: © 2017-2024 Ozan Nurettin Süel (aka UIManufaktur) * +* License: Subject to the terms of the Apache 2.0 license, as written in the included LICENSE.txt file. * +* Authors: Ozan Nurettin Süel (aka UIManufaktur) * +*****************************************************************************************************************/ +module uim.caches.helpers.engine; + +import uim.caches; + +bool isEngine(Object instance) { + if (cast(ICacheEngine) instance) { + return true; + } + + return false; +} diff --git a/caches/uim/caches/helpers/package.d b/caches/uim/caches/helpers/package.d index 7ac8fbc109..0890f8ed44 100644 --- a/caches/uim/caches/helpers/package.d +++ b/caches/uim/caches/helpers/package.d @@ -1,2 +1,6 @@ module uim.caches.helpers; +public { + import uim.caches.helpers.cache; + import uim.caches.helpers.engine; +} \ No newline at end of file diff --git a/caches/uim/caches/tests/package.d b/caches/uim/caches/tests/package.d index 54a3dea64b..351d86ebc3 100644 --- a/caches/uim/caches/tests/package.d +++ b/caches/uim/caches/tests/package.d @@ -1,6 +1,6 @@ module uim.caches.tests; public { - import uim.caches.test.cache; - import uim.caches.test.engine; + import uim.caches.tests.cache; + import uim.caches.tests.engine; } \ No newline at end of file