diff --git a/client/javascript-sdk.mdx b/client/javascript-sdk.mdx index ad9589bc0..94c18e680 100644 --- a/client/javascript-sdk.mdx +++ b/client/javascript-sdk.mdx @@ -3,6 +3,22 @@ title: JavaScript Client SDK (Web) sidebarTitle: Javascript (Web) description: Statsig's JavaScript SDK for browser and React applications. icon: "js" +doc_type: api-reference +sdk_version: 3.x +keywords: + - javascript + - client sdk + - browser + - web + - frontend + - react + - api reference + - typescript +related_docs: + - /sdks/getting-started + - /sdks/client-vs-server + - /guides/first-feature + - /client/Next --- import Initialization from '/snippets/client/initialization.mdx' diff --git a/console-api/introduction.mdx b/console-api/introduction.mdx index bc1221662..6adfc6442 100644 --- a/console-api/introduction.mdx +++ b/console-api/introduction.mdx @@ -1,6 +1,20 @@ --- title: Console API Overview sidebarTitle: "Overview" +description: Programmatically manage Statsig configurations using the Console API +doc_type: api-reference +api_version: "20240601" +keywords: + - console api + - api + - rest api + - automation + - configuration management + - programmatic access + - crud api +related_docs: + - /http-api/overview + - /sdks/getting-started --- The "Console API" is the CRUD API for performing the actions offered on console.statsig.com without needing to go through the web UI. diff --git a/experiments/overview.mdx b/experiments/overview.mdx index fdd4d0529..b5ecb8acc 100644 --- a/experiments/overview.mdx +++ b/experiments/overview.mdx @@ -2,6 +2,27 @@ title: "Experiments Overview" sidebarTitle: "Overview" description: "Learn the fundamentals of experimentation with Statsig, including key concepts, randomization units, and statistical significance." +doc_type: concept +aliases: + - a/b tests + - a/b/n tests + - experimentation +keywords: + - experiments + - a/b testing + - a/b/n testing + - randomized controlled trials + - statistical significance + - hypothesis testing + - control group + - test group + - variants + - randomization unit +related_docs: + - /experiments/create-new + - /guides/abn-tests + - /metrics/introduction + - /statsig-warehouse-native/introduction --- **Experimentation** is a powerful tool for making data-driven decisions that improve product outcomes and customer experiences. diff --git a/feature-flags/overview.mdx b/feature-flags/overview.mdx index 289a53c0d..d0d4a5c25 100644 --- a/feature-flags/overview.mdx +++ b/feature-flags/overview.mdx @@ -1,6 +1,27 @@ --- title: Feature Flags description: Feature Gates, commonly known as feature flags, allow you to toggle the behavior of your product in real time without deploying new code. +doc_type: concept +aliases: + - feature gates + - gates + - toggles + - kill switches +keywords: + - feature flags + - feature gates + - feature toggles + - rollout + - gradual release + - kill switch + - targeting + - deployment + - scheduled rollouts +related_docs: + - /guides/first-feature + - /experiments/overview + - /dynamic-config + - /sdks/getting-started --- **Feature Gates**, commonly known as feature flags, allow you to toggle the behavior of your product in real time without deploying new code. diff --git a/glossary.mdx b/glossary.mdx new file mode 100644 index 000000000..5f1e57efb --- /dev/null +++ b/glossary.mdx @@ -0,0 +1,225 @@ +--- +title: Glossary +description: Comprehensive glossary of Statsig terminology and concepts +doc_type: reference +keywords: + - glossary + - terminology + - definitions + - concepts + - reference +--- + +This glossary provides definitions for key terms and concepts used throughout Statsig documentation. + +## Core Concepts + +### A/B Test +An experiment methodology where users are randomly assigned to different variants (typically Control and Test groups) to measure the impact of product changes on key metrics. Also referred to as an **Experiment**. + +**Aliases**: Experiment, A/B/n Test + +### Allocation +The percentage of users eligible to be enrolled in an experiment or feature rollout. + +### Assignment +The process of determining which variant or group a user receives in an experiment or feature flag. + +## Feature Management + +### Feature Flag +A configuration that enables or disables functionality for specific users or groups without deploying new code. Used for gradual rollouts, kill switches, and targeted releases. + +**Aliases**: Feature Gate, Gate, Toggle + +### Feature Gate +See **Feature Flag**. + +### Dynamic Config +A configuration object that can return different values for different users based on targeting rules. Unlike feature flags (boolean), dynamic configs can return complex JSON objects. + +**Aliases**: Config, Remote Config + +### Kill Switch +A feature flag used to quickly disable functionality in production without requiring a code deployment. Typically used for emergency rollbacks. + +## Experimentation + +### Control Group +The baseline group in an experiment that does not receive the product change being tested. Used as a comparison point to measure the impact of changes. + +### Test Group +The group in an experiment that receives the product change being tested. Also called **Experiment Group** or **Treatment Group**. + +**Aliases**: Experiment Group, Treatment Group, Variant + +### Exposure +An event logged when a user is evaluated against a feature flag or experiment. Exposures are used to calculate experiment results and measure feature adoption. + +### Holdout +A group of users excluded from receiving new features or changes, used to measure the cumulative impact of multiple experiments over time. + +### Layer +A mechanism for running multiple mutually exclusive experiments on the same surface or feature. Ensures users are only exposed to one experiment within the layer. + +### Parameter +A configurable value within an experiment or dynamic config that can vary between groups. Parameters can be strings, numbers, booleans, or JSON objects. + +### Randomization Unit +The entity used to assign users to experiment groups. Common units include: +- **User ID**: Individual user accounts +- **Device ID**: Individual devices +- **Session ID**: Individual sessions +- **Custom ID**: Any custom identifier (company ID, team ID, etc.) + +**Aliases**: Unit ID, ID Type + +### Scorecard +The collection of metrics tracked for an experiment, including primary metrics, secondary metrics, and guardrail metrics. + +### Statistical Significance +A measure indicating whether observed metric changes are likely due to the product change rather than random variation. Typically measured using p-values and confidence intervals. + +### Variant +A specific version or configuration in an experiment. Each variant represents a different user experience being tested. + +## Metrics + +### Event +A user action or occurrence logged to Statsig for analysis. Events are used to calculate metrics and measure experiment impact. + +### Metric +A quantitative measure used to evaluate experiment performance or product health. Types include: +- **Count metrics**: Total occurrences of an event +- **Sum metrics**: Sum of a numeric value +- **Ratio metrics**: Ratio between two metrics +- **Mean metrics**: Average value per user + +### Primary Metric +The main metric used to evaluate experiment success. Experiments are typically designed to move primary metrics. + +**Aliases**: Primary KPI, North Star Metric + +### Secondary Metric +Additional metrics tracked to understand broader experiment impact beyond the primary metric. + +### Guardrail Metric +Metrics monitored to ensure experiments don't negatively impact critical business or user experience measures. + +## Targeting & Segmentation + +### Targeting Rule +Conditions that determine which users are eligible for a feature flag or experiment. Rules can be based on user attributes, custom fields, or segments. + +**Aliases**: Rule, Condition + +### Segment +A saved group of targeting rules that can be reused across multiple feature flags and experiments. + +**Aliases**: Audience, User Segment + +### Environment +A deployment context (production, staging, development) with separate configurations for testing and releasing features. + +**Aliases**: Tier + +### User Attribute +Properties associated with a user (email, country, app version, etc.) used for targeting and segmentation. + +**Aliases**: User Property, Custom Field + +## Data & Analytics + +### Pulse +Statsig's experimentation results engine that calculates metric lifts, statistical significance, and confidence intervals. + +### Warehouse Native (WHN) +Statsig's architecture that runs experiments and computes metrics directly on your data warehouse, keeping data in your infrastructure. + +**Aliases**: WHN, Warehouse-Native + +### Data Connector +An integration that forwards Statsig events to external analytics or data platforms. + +**Aliases**: Integration, Event Forwarding + +### ID Resolution +The process of linking multiple identifiers (user ID, device ID, anonymous ID) to the same user for consistent experiment assignment and metric calculation. + +**Aliases**: Identity Resolution, ID Stitching + +## SDK & Implementation + +### Client SDK +A software development kit designed for client-side applications (web, mobile) that evaluates feature flags and logs events from the user's device. + +### Server SDK +A software development kit designed for server-side applications that evaluates feature flags and logs events from your backend infrastructure. + +### Initialization +The process of loading feature flag and experiment configurations from Statsig servers into an SDK instance. + +### Bootstrap +Pre-loading SDK configuration values to reduce initialization time and improve performance. + +### Evaluation +The process of determining a feature flag value or experiment variant for a specific user based on targeting rules. + +**Aliases**: Check, Get + +### Local Evaluation +Evaluating feature flags and experiments within the SDK using cached configurations, without making network requests. + +### Exposure Logging +Automatically logging when a user is evaluated against a feature flag or experiment. Used for experiment analysis and feature adoption tracking. + +## Platform Features + +### Console +The Statsig web application where you create and manage feature flags, experiments, metrics, and view results. + +**Aliases**: Dashboard, UI, Web Console + +### API +Programmatic interfaces for interacting with Statsig: +- **Console API**: Manage Statsig configurations programmatically +- **HTTP API**: Evaluate feature flags and log events via HTTP +- **Stats API**: Query experiment results and metrics + +### Diagnostics +Real-time monitoring tools showing live exposure logs, event streams, and SDK health for debugging. + +**Aliases**: Debug Stream, Live Logs + +### Autotune +An automated experimentation feature that dynamically adjusts traffic allocation to winning variants during an experiment. + +## Statistical Terms + +### Confidence Interval +A range of values that likely contains the true metric lift with a specified level of confidence (typically 95%). + +### P-value +The probability that observed results occurred by chance. Lower p-values indicate stronger evidence of a real effect. + +### Statistical Power +The probability of detecting a real effect if one exists. Higher power reduces false negatives. + +### Sample Size +The number of users or events required to detect a meaningful effect with sufficient statistical power. + +### Lift +The percentage change in a metric between the test group and control group. + +**Aliases**: Delta, Impact + +### Winsorization +A statistical technique that caps extreme values to reduce the impact of outliers on metric calculations. + +## Related Resources + +- [Understanding the Platform](/understanding-platform) +- [Experiments Overview](/experiments/overview) +- [Feature Flags Overview](/feature-flags/overview) +- [Metrics Introduction](/metrics/introduction) +- [SDK Getting Started](/sdks/getting-started) diff --git a/guides/abn-tests.mdx b/guides/abn-tests.mdx index e320b0f6d..321c834a7 100644 --- a/guides/abn-tests.mdx +++ b/guides/abn-tests.mdx @@ -1,12 +1,36 @@ --- title: "Run your first A/B test" +description: Step-by-step guide to creating and implementing your first A/B test experiment in Statsig +doc_type: tutorial +keywords: + - a/b test + - experiment + - tutorial + - getting started + - quickstart + - control group + - test group + - parameters + - sdk integration +related_docs: + - /experiments/overview + - /experiments/create-new + - /sdks/quickstart + - /guides/first-feature +prerequisites: + - Statsig account + - Statsig SDK installed in your application --- In this guide, you will create and implement your first experiment in Statsig from end to end. There are many types of experiments you can set up in Statsig, but this guide will walk through the most common one: an A/B test. +## What you'll learn + By the end of this tutorial, you will have: * Created a new user-level **Experiment** in the Statsig console, with **parameters** set for a Control and Experiment group * **Checked the experiment** in your application code using the **Statsig Client SDK** `getExperiment` function +* Monitored experiment diagnostics in real-time +* Understood how to read experiment results ## Prerequisites 1. You already have a [Statsig account](https://console.statsig.com/sign_up) @@ -51,7 +75,7 @@ Next, we'll use a Statsig Client SDK to check a user's assigned experiment group -```tsx Check Experiment +```typescript Check Experiment const quickstartExperiment = myStatsigClient.getExperiment(user, "quickstart_experiment"); // the second parameter is the default fallback value if the experiment is not found diff --git a/guides/first-feature.mdx b/guides/first-feature.mdx index 414f95f3d..507bbff7f 100644 --- a/guides/first-feature.mdx +++ b/guides/first-feature.mdx @@ -2,6 +2,25 @@ title: Build Your First Feature description: Walk through creating a feature gate, targeting audiences, and rolling out your first feature with the JavaScript SDK. slug: /guides/first-feature +doc_type: tutorial +keywords: + - feature flag + - feature gate + - tutorial + - getting started + - quickstart + - rollout + - targeting + - javascript sdk + - browser sdk +related_docs: + - /feature-flags/overview + - /feature-flags/create + - /sdks/quickstart + - /guides/abn-tests +prerequisites: + - Statsig account + - Access to browser DevTools --- @@ -48,7 +67,7 @@ Once your Statsig account is ready, follow the steps below to create and test-dr Paste the snippet below into the browser console on any site to fetch the SDK from jsDelivr: - ```js + ```javascript const script = document.createElement('script'); script.src = 'https://cdn.jsdelivr.net/npm/@statsig/js-client@3/build/statsig-js-client+session-replay+web-analytics.min.js'; document.head.appendChild(script); @@ -62,14 +81,14 @@ Once your Statsig account is ready, follow the steps below to create and test-dr Replace YOUR_SDK_KEY with the client key from Step 4 and run: - ```js + ```javascript const client = new window.Statsig.StatsigClient('YOUR_SDK_KEY', {}); await client.initializeAsync(); ``` Then call: - ```js + ```javascript client.checkGate('mobile_registration'); ``` @@ -85,7 +104,7 @@ Once your Statsig account is ready, follow the steps below to create and test-dr Re-evaluate the user to pick up the new environment and re-check the gate: - ```js + ```javascript await client.updateUserAsync({}); client.checkGate('mobile_registration'); ``` @@ -100,7 +119,7 @@ Once your Statsig account is ready, follow the steps below to create and test-dr Switch DevTools back to the desktop view and update the user with a company email: - ```js + ```javascript await client.updateUserAsync({ email: 'teammate@statsig.com' }); client.checkGate('mobile_registration'); ``` @@ -113,7 +132,7 @@ Once your Statsig account is ready, follow the steps below to create and test-dr - ```js + ```javascript client.flush(); ``` @@ -129,7 +148,7 @@ Once your Statsig account is ready, follow the steps below to create and test-dr Wrap feature logic in a gate check so only targeted users see the experience: -```js +```javascript if (client.checkGate('mobile_registration')) { show(mobileRegistrationPage); } else { diff --git a/metrics/introduction.mdx b/metrics/introduction.mdx index 339cc5334..978c1e4a1 100644 --- a/metrics/introduction.mdx +++ b/metrics/introduction.mdx @@ -1,6 +1,23 @@ --- title: "Metrics User Guide" description: "Learn about Statsig's comprehensive metrics system, from raw events to precomputed metrics and real-time analytics." +doc_type: guide +keywords: + - metrics + - kpis + - measurement + - analytics + - experiments + - pulse + - scorecard + - raw events + - precomputed metrics + - custom metrics +related_docs: + - /experiments/overview + - /statsig-warehouse-native/configuration/metrics + - /product-analytics/overview + - /metrics/101 --- # Metrics User Guide diff --git a/sdks/getting-started.mdx b/sdks/getting-started.mdx index b85c20eda..2c7170277 100644 --- a/sdks/getting-started.mdx +++ b/sdks/getting-started.mdx @@ -1,5 +1,23 @@ --- title: "SDK Overview" +description: Overview of Statsig SDKs for client-side and server-side applications +doc_type: guide +keywords: + - sdk + - client sdk + - server sdk + - integration + - installation + - getting started + - implementation + - targeting + - event logging +related_docs: + - /sdks/quickstart + - /sdks/client-vs-server + - /client/javascript-sdk + - /server-core/node-core + - /guides/first-feature --- import ListOfSDKs from '/snippets/sdks/list-of-sdks.mdx' diff --git a/server-core/node-core.mdx b/server-core/node-core.mdx index b89f0f595..639fe2738 100644 --- a/server-core/node-core.mdx +++ b/server-core/node-core.mdx @@ -3,6 +3,22 @@ title: Node Server SDK sidebarTitle: Node description: Statsig's next-gen Node Server SDK built on our [Server Core](/server-core) framework icon: "node-js" +doc_type: api-reference +sdk_version: 3.x +replaces: /server/nodejsServerSDK +keywords: + - node.js + - server sdk + - javascript + - backend + - api reference + - typescript + - server core +related_docs: + - /server/nodejsServerSDK + - /server-core/migration-guides/node + - /sdks/getting-started + - /sdks/client-vs-server --- import Installation from '/snippets/server-core/installation.mdx' diff --git a/server/nodejsServerSDK.mdx b/server/nodejsServerSDK.mdx index 2c5805944..4ca8f5605 100644 --- a/server/nodejsServerSDK.mdx +++ b/server/nodejsServerSDK.mdx @@ -3,6 +3,21 @@ title: Legacy Node.js Server SDK sidebarTitle: Node.js description: Statsig's Legacy Server SDK for Node.js applications icon: "node-js" +doc_type: api-reference +sdk_version: legacy +replaces: null +replaced_by: /server-core/node-core +keywords: + - node.js + - server sdk + - legacy sdk + - javascript + - backend + - api reference +related_docs: + - /server-core/node-core + - /sdks/getting-started + - /sdks/client-vs-server --- import CheckGateIntro from '/snippets/server/checkGate.mdx' diff --git a/understanding-platform.mdx b/understanding-platform.mdx index b8af9bcc9..99fae88e8 100644 --- a/understanding-platform.mdx +++ b/understanding-platform.mdx @@ -2,6 +2,22 @@ title: 'Platform Overview' sidebarTitle: 'Statsig Platform Overview' description: 'Learn what Statsig is used for and how to set it up with our Cloud and Warehouse Native deployment models' +doc_type: concept +keywords: + - platform overview + - architecture + - statsig cloud + - warehouse native + - deployment models + - feature flags + - experiments + - product analytics + - setup guide +related_docs: + - /feature-flags/overview + - /experiments/overview + - /statsig-warehouse-native/introduction + - /sdks/getting-started --- ## What do I use Statsig for? diff --git a/welcome.mdx b/welcome.mdx index 64a2fadb1..d5993888b 100644 --- a/welcome.mdx +++ b/welcome.mdx @@ -2,6 +2,16 @@ title: "Statsig Overview" sidebarTitle: "Start Here" description: "Ship, measure, & learn with the same tools as the world's largest Tech companies. Run thousands of A/B tests, safely rollout features, & dive deep on user behavior in a single, unified platform." +doc_type: overview +keywords: + - getting started + - introduction + - statsig platform + - feature flags + - experiments + - a/b testing + - product analytics + - warehouse native --- import ListOfSDKs from '/snippets/sdks/list-of-sdks.mdx'