diff --git a/package-lock.json b/package-lock.json index 3909675e4..5c812e63d 100644 --- a/package-lock.json +++ b/package-lock.json @@ -9,7 +9,7 @@ "version": "10.27.0", "license": "Apache-2.0", "dependencies": { - "@splitsoftware/splitio-commons": "1.16.1-rc.0", + "@splitsoftware/splitio-commons": "1.16.1-rc.3", "@types/google.analytics": "0.0.40", "@types/ioredis": "^4.28.0", "bloom-filters": "^3.0.0", @@ -872,9 +872,9 @@ "dev": true }, "node_modules/@splitsoftware/splitio-commons": { - "version": "1.16.1-rc.0", - "resolved": "https://registry.npmjs.org/@splitsoftware/splitio-commons/-/splitio-commons-1.16.1-rc.0.tgz", - "integrity": "sha512-LcKWbB/ruUl38NQqoIAzYm+RTvrse8iZqZbCP7lqq8v5R6LvYd0nhavDmLTHIr0oKfZzYCKm36SOmSe0S7dvlQ==", + "version": "1.16.1-rc.3", + "resolved": "https://registry.npmjs.org/@splitsoftware/splitio-commons/-/splitio-commons-1.16.1-rc.3.tgz", + "integrity": "sha512-c+EkbFp/Ui+BCiSj3EnA/c2LzJRSWtM642GHdPxBJFkDqKmDyKMOp69j/wCXD7ZDN8w0P5tFgbTujrTfA2tHuA==", "dependencies": { "tslib": "^2.3.1" }, @@ -8437,9 +8437,9 @@ "dev": true }, "@splitsoftware/splitio-commons": { - "version": "1.16.1-rc.0", - "resolved": "https://registry.npmjs.org/@splitsoftware/splitio-commons/-/splitio-commons-1.16.1-rc.0.tgz", - "integrity": "sha512-LcKWbB/ruUl38NQqoIAzYm+RTvrse8iZqZbCP7lqq8v5R6LvYd0nhavDmLTHIr0oKfZzYCKm36SOmSe0S7dvlQ==", + "version": "1.16.1-rc.3", + "resolved": "https://registry.npmjs.org/@splitsoftware/splitio-commons/-/splitio-commons-1.16.1-rc.3.tgz", + "integrity": "sha512-c+EkbFp/Ui+BCiSj3EnA/c2LzJRSWtM642GHdPxBJFkDqKmDyKMOp69j/wCXD7ZDN8w0P5tFgbTujrTfA2tHuA==", "requires": { "tslib": "^2.3.1" } diff --git a/package.json b/package.json index 848169bd3..bf8afe8ae 100644 --- a/package.json +++ b/package.json @@ -40,7 +40,7 @@ "node": ">=6" }, "dependencies": { - "@splitsoftware/splitio-commons": "1.16.1-rc.0", + "@splitsoftware/splitio-commons": "1.16.1-rc.3", "@types/google.analytics": "0.0.40", "@types/ioredis": "^4.28.0", "bloom-filters": "^3.0.0", diff --git a/src/__tests__/browserSuites/push-fallbacking.spec.js b/src/__tests__/browserSuites/push-fallback.spec.js similarity index 91% rename from src/__tests__/browserSuites/push-fallbacking.spec.js rename to src/__tests__/browserSuites/push-fallback.spec.js index 83832c0d6..90f698b5d 100644 --- a/src/__tests__/browserSuites/push-fallbacking.spec.js +++ b/src/__tests__/browserSuites/push-fallback.spec.js @@ -39,9 +39,9 @@ const userKey = 'nicolas@split.io'; const secondUserKey = 'marcio@split.io'; const baseUrls = { - sdk: 'https://sdk.push-fallbacking/api', - events: 'https://events.push-fallbacking/api', - auth: 'https://auth.push-fallbacking/api' + sdk: 'https://sdk.push-fallback/api', + events: 'https://events.push-fallback/api', + auth: 'https://auth.push-fallback/api' }; const config = { core: { @@ -51,11 +51,14 @@ const config = { scheduler: { featuresRefreshRate: 0.2, segmentsRefreshRate: 0.25, + largeSegmentsRefreshRate: 0.25, impressionsRefreshRate: 3000 }, urls: baseUrls, streamingEnabled: true, - // debug: true, + sync: { + largeSegmentsEnabled: true + } }; const settings = settingsFactory(config); @@ -79,30 +82,31 @@ const MILLIS_DESTROY = MILLIS_STREAMING_DISABLED_CONTROL + settings.scheduler.fe /** * Sequence of calls: - * 0.0 secs: initial SyncAll (/splitChanges, /mySegments/*), auth, SSE connection - * 0.1 secs: SSE connection opened -> syncAll (/splitChanges, /mySegments/nicolas) - * 0.2 secs: Streaming down (OCCUPANCY event) -> fetch due to fallback to polling (/splitChanges, /mySegments/nicolas) + * 0.0 secs: initial SyncAll (/splitChanges, /my(Large)Segments/nicolas), auth, SSE connection + * 0.1 secs: SSE connection opened -> syncAll (/splitChanges, /my(Large)Segments/nicolas) + * 0.2 secs: Streaming down (OCCUPANCY event) -> fetch due to fallback to polling (/splitChanges, /my(Large)Segments/nicolas) * 0.3 secs: SPLIT_UPDATE event ignored * 0.4 secs: periodic fetch due to polling (/splitChanges) - * 0.45 secs: periodic fetch due to polling (/mySegments/*) - * 0.5 secs: Streaming up (OCCUPANCY event) -> syncAll (/splitChanges, /mySegments/nicolas) - * 0.55 secs: create a new client while streaming -> initial fetch (/mySegments/marcio), auth, SSE connection and syncAll (/splitChanges, /mySegments/nicolas, /mySegments/marcio) + * 0.45 secs: periodic fetch due to polling (/my(Large)Segments/*) + * 0.5 secs: Streaming up (OCCUPANCY event) -> syncAll (/splitChanges, /my(Large)Segments/nicolas) + * 0.55 secs: create a new client while streaming -> initial fetch (/my(Large)Segments/marcio), auth, SSE connection and syncAll (/splitChanges, /my(Large)Segments/nicolas, /my(Large)Segments/marcio) * 0.6 secs: SPLIT_UPDATE event -> /splitChanges - * 0.7 secs: Streaming down (CONTROL event) -> fetch due to fallback to polling (/splitChanges, /mySegments/nicolas, /mySegments/marcio) + * 0.7 secs: Streaming down (CONTROL event) -> fetch due to fallback to polling (/splitChanges, /my(Large)Segments/nicolas, /my(Large)Segments/marcio) * 0.8 secs: MY_SEGMENTS_UPDATE event ignored * 0.9 secs: periodic fetch due to polling (/splitChanges) - * 0.95 secs: periodic fetch due to polling (/mySegments/nicolas, /mySegments/marcio, /mySegments/facundo) - * 1.0 secs: Streaming up (CONTROL event) -> syncAll (/splitChanges, /mySegments/nicolas, /mySegments/marcio, /mySegments/facundo) + * 0.95 secs: periodic fetch due to polling (/my(Large)Segments/nicolas, /my(Large)Segments/marcio, /my(Large)Segments/facundo) + * 1.0 secs: Streaming up (CONTROL event) -> syncAll (/splitChanges, /my(Large)Segments/nicolas, /my(Large)Segments/marcio, /my(Large)Segments/facundo) * 1.1 secs: MY_SEGMENTS_UPDATE event -> /mySegments/nicolas - * 1.2 secs: Streaming down (CONTROL event) -> fetch due to fallback to polling (/splitChanges, /mySegments/nicolas, /mySegments/marcio, /mySegments/facundo) + * 1.2 secs: Streaming down (CONTROL event) -> fetch due to fallback to polling (/splitChanges, /my(Large)Segments/nicolas, /my(Large)Segments/marcio, /my(Large)Segments/facundo) * 1.3 secs: STREAMING_RESET control event -> auth, SSE connection, syncAll and stop polling * 1.5 secs: STREAMING_RESET control event -> auth, SSE connection, syncAll * 1.6 secs: Streaming closed (CONTROL STREAMING_DISABLED event) -> fetch due to fallback to polling (/splitChanges, /mySegments/nicolas, /mySegments/marcio, /mySegments/facundo) - * 1.8 secs: periodic fetch due to polling (/splitChanges): due to update without segments, mySegments are not fetched + * 1.8 secs: periodic fetch due to polling (/splitChanges) + * 1.85 secs: periodic fetch due to polling (/myLargeSegments/*). /mySegments/* are not fetched due to update without segments * 2.0 secs: periodic fetch due to polling (/splitChanges) * 2.1 secs: destroy client */ -export function testFallbacking(fetchMock, assert) { +export function testFallback(fetchMock, assert) { assert.plan(20); fetchMock.reset(); @@ -213,6 +217,10 @@ export function testFallbacking(fetchMock, assert) { return { status: 200, body: authPushEnabledNicolas }; }); + // MyLargeSegments are fetched one more time than MySegments due to smart pausing of MySegments sync at the end of the test + fetchMock.get({ url: url(settings, '/myLargeSegments/nicolas%40split.io'), repeat: 14 }, { status: 200, body: { myLargeSegments: [] } }); + fetchMock.get({ url: url(settings, '/myLargeSegments/marcio%40split.io'), repeat: 10 }, { status: 200, body: { myLargeSegments: [] } }); + // initial split and mySegment sync fetchMock.getOnce(url(settings, '/splitChanges?s=1.1&since=-1'), { status: 200, body: splitChangesMock1 }); fetchMock.getOnce(url(settings, '/mySegments/nicolas%40split.io'), { status: 200, body: mySegmentsNicolasMock1 }); diff --git a/src/__tests__/browserSuites/push-refresh-token.spec.js b/src/__tests__/browserSuites/push-refresh-token.spec.js index cecfcb1fe..d1f929925 100644 --- a/src/__tests__/browserSuites/push-refresh-token.spec.js +++ b/src/__tests__/browserSuites/push-refresh-token.spec.js @@ -57,7 +57,7 @@ export function testRefreshToken(fetchMock, assert) { sseCount++; switch (sseCount) { case 1: - assert.true(nearlyEqual(Date.now() - start, 0), 'first connection is created inmediatelly'); + assert.true(nearlyEqual(Date.now() - start, 0), 'first connection is created immediately'); break; case 2: assert.true(nearlyEqual(Date.now() - start, MILLIS_REFRESH_TOKEN + MILLIS_CONNDELAY), 'second connection is created with a delay'); diff --git a/src/__tests__/browserSuites/readiness.spec.js b/src/__tests__/browserSuites/readiness.spec.js index 3b98d5132..024ff8277 100644 --- a/src/__tests__/browserSuites/readiness.spec.js +++ b/src/__tests__/browserSuites/readiness.spec.js @@ -62,7 +62,7 @@ export default function (fetchMock, assert) { }); }); - assert.test(t => { // Timeout test, we have retries but mySegmnets takes too long + assert.test(t => { // Timeout test, we have retries but mySegments takes too long const testUrls = { sdk: 'https://sdk.baseurl/readinessSuite2', events: 'https://events.baseurl/readinessSuite2' diff --git a/src/__tests__/browserSuites/telemetry.spec.js b/src/__tests__/browserSuites/telemetry.spec.js index eb46977dd..39be3e20f 100644 --- a/src/__tests__/browserSuites/telemetry.spec.js +++ b/src/__tests__/browserSuites/telemetry.spec.js @@ -76,7 +76,7 @@ export default async function telemetryBrowserSuite(fetchMock, t) { // @TODO check if iDe value is correct assert.deepEqual(data, { - mE: {}, hE: { sp: { 500: 1 }, ms: { 500: 1 } }, tR: 0, aR: 0, iQ: 4, iDe: 1, iDr: 0, spC: 31, seC: 1, skC: 1, eQ: 1, eD: 0, sE: [], t: [], ufs: {} + mE: {}, hE: { sp: { 500: 1 }, ms: { 500: 1 } }, tR: 0, aR: 0, iQ: 4, iDe: 1, iDr: 0, spC: 32, seC: 1, skC: 1, eQ: 1, eD: 0, sE: [], t: [], ufs: {} }, 'metrics/usage JSON payload should be the expected'); finish.next(); @@ -96,7 +96,7 @@ export default async function telemetryBrowserSuite(fetchMock, t) { // @TODO check if iDe value is correct assert.deepEqual(data, { mL: {}, mE: {}, hE: {}, hL: {}, // errors and latencies were popped - tR: 0, aR: 0, iQ: 4, iDe: 1, iDr: 0, spC: 31, seC: 1, skC: 1, eQ: 1, eD: 0, sE: [], t: [], ufs: {} + tR: 0, aR: 0, iQ: 4, iDe: 1, iDr: 0, spC: 32, seC: 1, skC: 1, eQ: 1, eD: 0, sE: [], t: [], ufs: {} }, '2nd metrics/usage JSON payload should be the expected'); return 200; }); diff --git a/src/__tests__/mocks/splitchanges.since.-1.json b/src/__tests__/mocks/splitchanges.since.-1.json index 6198d41cc..82ceb16d7 100644 --- a/src/__tests__/mocks/splitchanges.since.-1.json +++ b/src/__tests__/mocks/splitchanges.since.-1.json @@ -1,5 +1,75 @@ { "splits": [ + { + "orgId": null, + "environment": null, + "trafficTypeId": null, + "trafficTypeName": null, + "name": "in_large_segment", + "seed": -1984784937, + "status": "ACTIVE", + "killed": false, + "defaultTreatment": "no", + "conditions": [ + { + "matcherGroup": { + "combiner": "AND", + "matchers": [ + { + "keySelector": { + "trafficType": "user", + "attribute": null + }, + "matcherType": "IN_LARGE_SEGMENT", + "negate": false, + "userDefinedSegmentMatcherData": { + "segmentName": "harnessians" + }, + "whitelistMatcherData": null, + "unaryNumericMatcherData": null, + "betweenMatcherData": null, + "unaryStringMatcherData": null + } + ] + }, + "partitions": [ + { + "treatment": "yes", + "size": 100 + } + ] + }, + { + "matcherGroup": { + "combiner": "AND", + "matchers": [ + { + "keySelector": { + "trafficType": "user", + "attribute": null + }, + "matcherType": "IN_LARGE_SEGMENT", + "negate": false, + "userDefinedSegmentMatcherData": { + "segmentName": "splitters" + }, + "whitelistMatcherData": null, + "unaryNumericMatcherData": null, + "betweenMatcherData": null, + "unaryStringMatcherData": null + } + ] + }, + "partitions": [ + { + "treatment": "yes", + "size": 100 + } + ] + } + ], + "configurations": {} + }, { "orgId": null, "environment": null, diff --git a/src/__tests__/nodeSuites/push-fallbacking.spec.js b/src/__tests__/nodeSuites/push-fallback.spec.js similarity index 98% rename from src/__tests__/nodeSuites/push-fallbacking.spec.js rename to src/__tests__/nodeSuites/push-fallback.spec.js index 5f330634e..7789236a1 100644 --- a/src/__tests__/nodeSuites/push-fallbacking.spec.js +++ b/src/__tests__/nodeSuites/push-fallback.spec.js @@ -35,9 +35,9 @@ import { settingsFactory } from '../../settings'; const key = 'nicolas@split.io'; const baseUrls = { - sdk: 'https://sdk.push-fallbacking/api', - events: 'https://events.push-fallbacking/api', - auth: 'https://auth.push-fallbacking/api' + sdk: 'https://sdk.push-fallback/api', + events: 'https://events.push-fallback/api', + auth: 'https://auth.push-fallback/api' }; const config = { core: { @@ -96,7 +96,7 @@ const MILLIS_DESTROY = MILLIS_STREAMING_DISABLED_CONTROL + settings.scheduler.fe * 2.1 secs: periodic fetch due to polling (/segmentChanges/*) * 2.1 secs: destroy client */ -export function testFallbacking(fetchMock, assert) { +export function testFallback(fetchMock, assert) { assert.plan(17); fetchMock.reset(); __setEventSource(EventSourceMock); diff --git a/src/__tests__/nodeSuites/push-refresh-token.spec.js b/src/__tests__/nodeSuites/push-refresh-token.spec.js index 0cb2c5424..b3246a2c4 100644 --- a/src/__tests__/nodeSuites/push-refresh-token.spec.js +++ b/src/__tests__/nodeSuites/push-refresh-token.spec.js @@ -57,7 +57,7 @@ export function testRefreshToken(fetchMock, assert) { sseCount++; switch (sseCount) { case 1: - assert.true(nearlyEqual(Date.now() - start, 0), 'first connection is created inmediatelly'); + assert.true(nearlyEqual(Date.now() - start, 0), 'first connection is created immediately'); break; case 2: assert.true(nearlyEqual(Date.now() - start, MILLIS_REFRESH_TOKEN + MILLIS_CONNDELAY), 'second connection is created with a delay'); diff --git a/src/__tests__/nodeSuites/telemetry.spec.js b/src/__tests__/nodeSuites/telemetry.spec.js index fdbc7c457..a6a6bb66f 100644 --- a/src/__tests__/nodeSuites/telemetry.spec.js +++ b/src/__tests__/nodeSuites/telemetry.spec.js @@ -66,7 +66,7 @@ export default async function telemetryNodejsSuite(key, fetchMock, assert) { // @TODO check if iDe value is correct assert.deepEqual(data, { - mE: {}, hE: { sp: { 500: 1 } }, tR: 0, aR: 0, iQ: 4, iDe: 1, iDr: 0, spC: 31, seC: 3, skC: 3, eQ: 1, eD: 0, sE: [], t: [], ufs: {} + mE: {}, hE: { sp: { 500: 1 } }, tR: 0, aR: 0, iQ: 4, iDe: 1, iDr: 0, spC: 32, seC: 3, skC: 3, eQ: 1, eD: 0, sE: [], t: [], ufs: {} }, 'metrics/usage JSON payload should be the expected'); finish.next(); @@ -85,7 +85,7 @@ export default async function telemetryNodejsSuite(key, fetchMock, assert) { // @TODO check if iDe value is correct assert.deepEqual(data, { mL: {}, mE: {}, hE: {}, hL: {}, // errors and latencies were popped - tR: 0, aR: 0, iQ: 4, iDe: 1, iDr: 0, spC: 31, seC: 3, skC: 3, eQ: 1, eD: 0, sE: [], t: [], ufs: {} + tR: 0, aR: 0, iQ: 4, iDe: 1, iDr: 0, spC: 32, seC: 3, skC: 3, eQ: 1, eD: 0, sE: [], t: [], ufs: {} }, '2nd metrics/usage JSON payload should be the expected'); return 200; }); diff --git a/src/__tests__/push/browser.spec.js b/src/__tests__/push/browser.spec.js index 2ff84728d..0d8dbeee1 100644 --- a/src/__tests__/push/browser.spec.js +++ b/src/__tests__/push/browser.spec.js @@ -4,7 +4,7 @@ import { testAuthWithPushDisabled, testAuthWith401, testNoEventSource, testSSEWi import { testPushRetriesDueToAuthErrors, testPushRetriesDueToSseErrors, testSdkDestroyWhileAuthRetries, testSdkDestroyWhileAuthSuccess, testSdkDestroyWhileConnDelay } from '../browserSuites/push-initialization-retries.spec'; import { testSynchronization } from '../browserSuites/push-synchronization.spec'; import { testSynchronizationRetries } from '../browserSuites/push-synchronization-retries.spec'; -import { testFallbacking } from '../browserSuites/push-fallbacking.spec'; +import { testFallback } from '../browserSuites/push-fallback.spec'; import { testRefreshToken } from '../browserSuites/push-refresh-token.spec'; import { testSplitKillOnReadyFromCache } from '../browserSuites/push-corner-cases.spec'; import { testFlagSets } from '../browserSuites/push-flag-sets.spec'; @@ -32,7 +32,7 @@ tape('## Browser JS - E2E CI Tests for PUSH ##', function (assert) { assert.test('E2E / PUSH synchronization: happy paths', testSynchronization.bind(null, fetchMock)); assert.test('E2E / PUSH synchronization: retries', testSynchronizationRetries.bind(null, fetchMock)); - assert.test('E2E / PUSH fallbacking, CONTROL, OCCUPANCY and STREAMING_RESET messages', testFallbacking.bind(null, fetchMock)); + assert.test('E2E / PUSH fallback, CONTROL, OCCUPANCY and STREAMING_RESET messages', testFallback.bind(null, fetchMock)); assert.test('E2E / PUSH refresh token and connection delay', testRefreshToken.bind(null, fetchMock)); diff --git a/src/__tests__/push/node.spec.js b/src/__tests__/push/node.spec.js index 6345e15db..ddb756a00 100644 --- a/src/__tests__/push/node.spec.js +++ b/src/__tests__/push/node.spec.js @@ -4,7 +4,7 @@ import { testAuthWithPushDisabled, testAuthWith401, testAuthWith400, testNoEvent import { testPushRetriesDueToAuthErrors, testPushRetriesDueToSseErrors, testSdkDestroyWhileAuthRetries, testSdkDestroyWhileAuthSuccess } from '../nodeSuites/push-initialization-retries.spec'; import { testSynchronization } from '../nodeSuites/push-synchronization.spec'; import { testSynchronizationRetries } from '../nodeSuites/push-synchronization-retries.spec'; -import { testFallbacking } from '../nodeSuites/push-fallbacking.spec'; +import { testFallback } from '../nodeSuites/push-fallback.spec'; import { testRefreshToken } from '../nodeSuites/push-refresh-token.spec'; import { testFlagSets } from '../nodeSuites/push-flag-sets.spec'; @@ -33,7 +33,7 @@ tape('## Node JS - E2E CI Tests for PUSH ##', async function (assert) { assert.test('E2E / PUSH synchronization: happy paths', testSynchronization.bind(null, fetchMock)); assert.test('E2E / PUSH synchronization: retries', testSynchronizationRetries.bind(null, fetchMock)); - assert.test('E2E / PUSH fallbacking, CONTROL and OCCUPANCY messages', testFallbacking.bind(null, fetchMock)); + assert.test('E2E / PUSH fallback, CONTROL and OCCUPANCY messages', testFallback.bind(null, fetchMock)); assert.test('E2E / PUSH refresh token and connection delay', testRefreshToken.bind(null, fetchMock)); diff --git a/src/settings/__tests__/node.spec.js b/src/settings/__tests__/node.spec.js index 54418b874..4e0942e0a 100644 --- a/src/settings/__tests__/node.spec.js +++ b/src/settings/__tests__/node.spec.js @@ -134,8 +134,8 @@ tape('SETTINGS / Log error and fallback to InMemory storage if no valid storage ]; assert.deepEqual(logSpy.args, [ - ['[ERROR] splitio => The provided REDIS storage is invalid for this mode. It requires consumer mode. Fallbacking into default MEMORY storage.'], - ['[ERROR] splitio => The provided \'INVALID\' storage type is invalid. Fallbacking into default MEMORY storage.'] + ['[ERROR] splitio => The provided REDIS storage is invalid for this mode. It requires consumer mode. Fallback into default MEMORY storage.'], + ['[ERROR] splitio => The provided \'INVALID\' storage type is invalid. Fallback into default MEMORY storage.'] ], 'logs error message'); settings.forEach(setting => { assert.equal(setting.storage.type, 'MEMORY', 'fallbacks to memory storage'); }); diff --git a/src/settings/storage/browser.js b/src/settings/storage/browser.js index 92daa2cd2..10a9e3eea 100644 --- a/src/settings/storage/browser.js +++ b/src/settings/storage/browser.js @@ -31,7 +31,7 @@ export function validateStorage(settings) { if (type !== STORAGE_MEMORY && type !== STORAGE_LOCALSTORAGE || type === STORAGE_LOCALSTORAGE && !isLocalStorageAvailable()) { fallbackToMemory(); - log.error('Invalid or unavailable storage. Fallbacking into MEMORY storage'); + log.error('Invalid or unavailable storage. Fallback into MEMORY storage'); } return { diff --git a/src/settings/storage/node.js b/src/settings/storage/node.js index ea85045df..b5296889f 100644 --- a/src/settings/storage/node.js +++ b/src/settings/storage/node.js @@ -16,7 +16,7 @@ export function validateStorage(settings) { case STORAGE_REDIS: { // If passing REDIS storage in localhost or standalone mode, we log an error and fallback to MEMORY storage if (mode === STANDALONE_MODE || mode === LOCALHOST_MODE) { - log.error('The provided REDIS storage is invalid for this mode. It requires consumer mode. Fallbacking into default MEMORY storage.'); + log.error('The provided REDIS storage is invalid for this mode. It requires consumer mode. Fallback into default MEMORY storage.'); return { type: STORAGE_MEMORY, prefix @@ -74,7 +74,7 @@ export function validateStorage(settings) { // If passing MEMORY storage in consumer mode, throw an error (no way to fallback to REDIS storage) if (mode === CONSUMER_MODE) throw new Error('A REDIS storage is required on consumer mode'); // If passing an invalid storage type, log an error - if (type !== STORAGE_MEMORY) log.error(`The provided '${type}' storage type is invalid. Fallbacking into default MEMORY storage.`); + if (type !== STORAGE_MEMORY) log.error(`The provided '${type}' storage type is invalid. Fallback into default MEMORY storage.`); return { type: STORAGE_MEMORY, prefix