diff --git a/src/content/docs/client-apis/python.mdx b/src/content/docs/client-apis/python.mdx index 30f7e0f..eeb3bd2 100644 --- a/src/content/docs/client-apis/python.mdx +++ b/src/content/docs/client-apis/python.mdx @@ -35,30 +35,30 @@ curl -L -o ./data/lives-in.csv https://raw.githubusercontent.com/lbugdb/lbug/ref The synchronous API is the default and is a common way to work with Ladybug in Python. ```python -import lbug +import real_ladybug as lb def main() -> None: # Create an empty on-disk database and connect to it - db = lbug.Database("example.lbug") - conn = lbug.Connection(db) + db = lb.Database("example.lbug") + conn = lb.Connection(db) create_tables(conn) copy_data(conn) query(conn) -def create_tables(conn: lbug.Connection) -> None: +def create_tables(conn: lb.Connection) -> None: conn.execute("CREATE NODE TABLE User(name STRING PRIMARY KEY, age INT64)") conn.execute("CREATE NODE TABLE City(name STRING PRIMARY KEY, population INT64)") conn.execute("CREATE REL TABLE Follows(FROM User TO User, since INT64)") conn.execute("CREATE REL TABLE LivesIn(FROM User TO City)") -def copy_data(conn: lbug.Connection) -> None: +def copy_data(conn: lb.Connection) -> None: conn.execute('COPY User FROM "./data/user.csv"') conn.execute('COPY City FROM "./data/city.csv"') conn.execute('COPY Follows FROM "./data/follows.csv"') conn.execute('COPY LivesIn FROM "./data/lives-in.csv"') -def query(conn: lbug.Connection) -> None: +def query(conn: lb.Connection) -> None: results = conn.execute(""" MATCH (a:User)-[f:Follows]->(b:User) RETURN a.name, b.name, f.since; @@ -79,19 +79,19 @@ such as in web frameworks like FastAPI or cases where you need to concurrently r ```python import asyncio -import lbug +import real_ladybug as lb async def main(): # Create an empty on-disk database and connect to it - db = lbug.Database("example.lbug") + db = lb.Database("example.lbug") # The underlying connection pool will be automatically created and managed by the async connection - conn = lbug.AsyncConnection(db, max_concurrent_queries=4) + conn = lb.AsyncConnection(db, max_concurrent_queries=4) await create_tables(conn) await copy_data(conn) await query(conn) -async def create_tables(conn: lbug.AsyncConnection) -> None: +async def create_tables(conn: lb.AsyncConnection) -> None: await conn.execute("CREATE NODE TABLE User(name STRING PRIMARY KEY, age INT64)") await conn.execute( "CREATE NODE TABLE City(name STRING PRIMARY KEY, population INT64)" @@ -99,13 +99,13 @@ async def create_tables(conn: lbug.AsyncConnection) -> None: await conn.execute("CREATE REL TABLE Follows(FROM User TO User, since INT64)") await conn.execute("CREATE REL TABLE LivesIn(FROM User TO City)") -async def copy_data(conn: lbug.AsyncConnection) -> None: +async def copy_data(conn: lb.AsyncConnection) -> None: await conn.execute("COPY User FROM './data/user.csv'") await conn.execute("COPY City FROM './data/city.csv'") await conn.execute("COPY Follows FROM './data/follows.csv'") await conn.execute("COPY LivesIn FROM './data/lives-in.csv'") -async def query(conn: lbug.AsyncConnection) -> None: +async def query(conn: lb.AsyncConnection) -> None: results = await conn.execute(""" MATCH (a:User)-[f:Follows]->(b:User) RETURN a.name, b.name, f.since; @@ -187,11 +187,11 @@ The following examples show how to output query results to each of these data st You can output the results of a Cypher query to a Pandas DataFrame using the `get_as_df()` method: ```py -import lbug +import real_ladybug as lb import pandas as pd -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) conn.execute("CREATE NODE TABLE Person(name STRING PRIMARY KEY, age INT64)") conn.execute("CREATE (a:Person {name: 'Adam', age: 30})") @@ -226,11 +226,11 @@ print(result.get_as_df()) You can output the results of a Cypher query to a Polars DataFrame using the `get_as_pl()` method: ```py -import lbug +import real_ladybug as lb import polars as pl -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) conn.execute("CREATE NODE TABLE Person(name STRING PRIMARY KEY, age INT64)") conn.execute("CREATE (a:Person {name: 'Adam', age: 30})") @@ -277,11 +277,11 @@ shape: (3, 1) You can output the results of a Cypher query to a PyArrow Table using the `get_as_arrow()` method: ```py -import lbug +import real_ladybug as lb import pyarrow as pa -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) conn.execute("CREATE NODE TABLE Person(name STRING PRIMARY KEY, age INT64)") conn.execute("CREATE (a:Person {name: 'Adam', age: 30})") @@ -314,11 +314,11 @@ Scanning a DataFrame or Table does *not* copy the data into Ladybug, it only rea ```py -import lbug +import real_ladybug as lb import pandas as pd -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) df = pd.DataFrame({ "name": ["Adam", "Karissa", "Zhang"], @@ -339,11 +339,11 @@ print(result.get_as_df()) ```py -import lbug +import real_ladybug as lb import polars as pl -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) df = pl.DataFrame({ "name": ["Adam", "Karissa", "Zhang"], @@ -370,11 +370,11 @@ shape: (3, 2) ```py -import lbug +import real_ladybug as lb import pyarrow as pa -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) tbl = pa.table({ "name": ["Adam", "Karissa", "Zhang"], @@ -405,11 +405,11 @@ age: [[30,40,50]] Copy from a Pandas DataFrame into a Ladybug table using the `COPY FROM` command: ```py -import lbug +import real_ladybug as lb import pandas as pd -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) conn.execute("CREATE NODE TABLE Person(name STRING PRIMARY KEY, age INT64)") @@ -436,11 +436,11 @@ print(result.get_as_df()) Copy from a Polars DataFrame into a Ladybug table using the `COPY FROM` command: ```py -import lbug +import real_ladybug as lb import polars as pl -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) conn.execute("CREATE NODE TABLE Person(name STRING PRIMARY KEY, age INT64)") @@ -473,11 +473,11 @@ shape: (3, 2) Copy from a PyArrow Table into a Ladybug table using the `COPY FROM` command: ```py -import lbug +import real_ladybug as lb import pyarrow as pa -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) conn.execute("CREATE NODE TABLE Person(name STRING PRIMARY KEY, age INT64)") @@ -541,20 +541,20 @@ difference between two numbers, and then apply it in a Cypher query. ### Register the UDF ```py -import lbug +import real_ladybug as lb -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) # define your function def difference(a, b): return a - b # define the expected type of your parameters -parameters = [lbug.Type.INT64, lbug.Type.INT64] +parameters = [lb.Type.INT64, lb.Type.INT64] # define expected type of the returned value -return_type = lbug.Type.INT64 +return_type = lb.Type.INT64 # register the UDF conn.create_function("difference", difference, parameters, return_type) @@ -579,10 +579,10 @@ The UDF API's `create_function` provides the following additional parameters: - `name: str` : The name of the function to be invoked in cypher. - `udf: Callable[[...], Any]` : The function to be executed. -- `params_type: Optional[list[Type | str]]` : A list whose elements can either be `lbug.Type` or `str`. `lbug.Type` +- `params_type: Optional[list[Type | str]]` : A list whose elements can either be `lb.Type` or `str`. `lb.Type` can be used to denote nonnested parameter types, while `str` can be used to denote both nested and nonnested parameter types. Details on how to denote types are in the [type notation](#type-notation) section. -- `return_type: Optional[Type | str]` : Either a `lbug.Type` enum or `str`. Details on how to denote types are in the [type notation](#type-notation) section. +- `return_type: Optional[Type | str]` : Either a `lb.Type` enum or `str`. Details on how to denote types are in the [type notation](#type-notation) section. - `default_null_handling: Optional[bool]` : True by default. When true, if any one of the inputs is null, function execution is skipped and the output is resolved to null. - `catch_exceptions: Optional[bool]` : False by default. When true, if the UDF raises an exception, the output is resolved to null. Otherwise the Exception is rethrown. @@ -641,7 +641,7 @@ def calculate_discounted_price(price: float, has_discount: bool) -> float: # Assume 10% discount on all items for simplicity return float(price) * 0.9 if has_discount else price -parameters = ['DECIMAL(7, 2)', lbug.Type.BOOL] +parameters = ['DECIMAL(7, 2)', lb.Type.BOOL] return_type = 'DECIMAL(7, 2)' @@ -666,6 +666,6 @@ print(result.get_as_df()) 0 90.00 100.00 ``` -The second parameter is a built-in native type in Ladybug, i.e., `lbug.Type.BOOL`. For the first parameter, +The second parameter is a built-in native type in Ladybug, i.e., `lb.Type.BOOL`. For the first parameter, we need to specify a string, i.e. `DECIMAL(7,2)` that is then parsed and used by Ladybug to map to the internal decimal representation. diff --git a/src/content/docs/concurrency.md b/src/content/docs/concurrency.md index c77db0c..0fcc22e 100644 --- a/src/content/docs/concurrency.md +++ b/src/content/docs/concurrency.md @@ -43,12 +43,12 @@ Here's a simple example application in Python that demonstrates these two steps database and a connection. The same principles apply to other language APIs as well: ```python -import lbug +import real_ladybug as lb # Open the database in `READ_WRITE` mode. The below code is equivalent to: -# db = lbug.Database("example.lbug", read_only=False) -db = lbug.Database("example.lbug") -conn = lbug.Connection(db) +# db = lb.Database("example.lbug", read_only=False) +db = lb.Database("example.lbug") +conn = lb.Connection(db) conn.execute("CREATE (a:Person {name: 'Alice'});") ``` diff --git a/src/content/docs/cypher/query-clauses/load-from.md b/src/content/docs/cypher/query-clauses/load-from.md index 2f32aed..3de2837 100644 --- a/src/content/docs/cypher/query-clauses/load-from.md +++ b/src/content/docs/cypher/query-clauses/load-from.md @@ -139,11 +139,11 @@ inferred from the schema information of the data frame. Here is an example: ```py # main.py -import lbug +import real_ladybug as lb import pandas as pd -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) df = pd.DataFrame({ "name": ["Adam", "Karissa", "Zhang"], @@ -172,11 +172,11 @@ Ladybug can also scan Polars DataFrames via the underlying PyArrow layer. The ru variable names and data types is identical to scanning Pandas data frames. Here is an example: ```python -import lbug +import real_ladybug as lb import polars as pl -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) df = pl.DataFrame({ "name": ["Adam", "Karissa", "Zhang"], @@ -205,11 +205,11 @@ shape: (3, 2) You can scan an existing PyArrow table as follows: ```python -import lbug +import real_ladybug as lb import pyarrow as pa -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) pa_table = pa.table({ "name": ["Adam", "Karissa", "Zhang"], diff --git a/src/content/docs/extensions/vector.mdx b/src/content/docs/extensions/vector.mdx index 3806d7b..3e0618c 100644 --- a/src/content/docs/extensions/vector.mdx +++ b/src/content/docs/extensions/vector.mdx @@ -36,11 +36,11 @@ extension to directly create the embeddings using Cypher. ```python # create_embeddings.py # pip install sentence-transformers -import lbug +import real_ladybug as lb import os -db = lbug.Database("example.lbug") -conn = lbug.Connection(db) +db = lb.Database("example.lbug") +conn = lb.Connection(db) from sentence_transformers import SentenceTransformer model = SentenceTransformer("all-MiniLM-L6-v2") @@ -96,11 +96,11 @@ for title, publisher in zip(titles, publishers): ```python # create_embeddings.py -import lbug +import real_ladybug as lb import os -db = lbug.Database("example.lbug") -conn = lbug.Connection(db) +db = lb.Database("example.lbug") +conn = lb.Connection(db) conn.execute("INSTALL llm; LOAD llm;") os.environ["OPENAI_API_KEY"] = "sk-proj-key" # Replace with your own OpenAI API key @@ -269,11 +269,11 @@ Let's run some example search queries on our newly created vector index. ```python -import lbug +import real_ladybug as lb # Initialize the database -db = lbug.Database("example.lbug") -conn = lbug.Connection(db) +db = lb.Database("example.lbug") +conn = lb.Connection(db) # Install and load vector extension once again conn.execute("INSTALL VECTOR;") @@ -304,11 +304,11 @@ print(result.get_as_pl()) ```python -import lbug +import real_ladybug as lb # Initialize the database -db = lbug.Database("example.lbug") -conn = lbug.Connection(db) +db = lb.Database("example.lbug") +conn = lb.Connection(db) # Install and load vector extension once again conn.execute("INSTALL VECTOR;") diff --git a/src/content/docs/get-started/graph-algorithms.md b/src/content/docs/get-started/graph-algorithms.md index 9ea57ba..eb2acc9 100644 --- a/src/content/docs/get-started/graph-algorithms.md +++ b/src/content/docs/get-started/graph-algorithms.md @@ -34,15 +34,15 @@ First, initialize a connection to a new Ladybug database named `example.lbug`: ```py from pathlib import Path -import lbug +import real_ladybug as lb db_path = "example.lbug" Path(db_path).unlink(missing_ok=True) Path(db_path + ".wal").unlink(missing_ok=True) -db = lbug.Database(db_path) -conn = lbug.Connection(db) +db = lb.Database(db_path) +conn = lb.Connection(db) ``` There will be one node table `Scholar`, and one relationship table `MENTORED` in this graph: @@ -112,12 +112,12 @@ The first method to run a graph algorithm natively in Ladybug is using the `algo #### Install and load the extension ```py -import lbug +import real_ladybug as lb db_path = "example.lbug" -db = lbug.Database(db_path) -conn = lbug.Connection(db) +db = lb.Database(db_path) +conn = lb.Connection(db) # Install and load the Ladybug algo extension conn.execute("INSTALL algo; LOAD algo;") @@ -237,12 +237,12 @@ a NetworkX algorithm result into a Pandas/Polars DataFrame and write it back to First, obtain a connection to the existing `example.lbug` database: ```py -import lbug +import real_ladybug as lb db_path = "example.lbug" -db = lbug.Database(db_path) -conn = lbug.Connection(db) +db = lb.Database(db_path) +conn = lb.Connection(db) ``` #### Create a NetworkX graph diff --git a/src/content/docs/get-started/index.mdx b/src/content/docs/get-started/index.mdx index 48486da..ba3b304 100644 --- a/src/content/docs/get-started/index.mdx +++ b/src/content/docs/get-started/index.mdx @@ -12,12 +12,12 @@ import { Code } from 'astro-expressive-code/components'; export const CODE_BLOCKS = { java: { maven: ` - com.lbugdb + com.ladybugdb lbug ${version} `, - groovy: `implementation 'com.lbugdb:lbug:${version}'`, - kotlin: `implementation("com.lbugdb:lbug:${version}")`, + groovy: `implementation 'com.ladybugdb:lbug:${version}'`, + kotlin: `implementation("com.ladybugdb:lbug:${version}")`, gradle: `plugins { id 'java' id 'application' @@ -29,7 +29,7 @@ repositories { mavenCentral() } dependencies { - implementation 'com.lbugdb:lbug:${version}' + implementation 'com.ladybugdb:lbug:${version}' }`, }, swift:`import PackageDescription @@ -115,12 +115,12 @@ below demonstrate how to create a graph schema and import data into an on-disk L ```py # main.py -import lbug +import real_ladybug as lb def main(): # Create an empty on-disk database and connect to it - db = lbug.Database("example.lbug") - conn = lbug.Connection(db) + db = lb.Database("example.lbug") + conn = lb.Connection(db) # Create schema conn.execute("CREATE NODE TABLE User(name STRING PRIMARY KEY, age INT64)") @@ -274,8 +274,8 @@ const lbug = require("lbug"); (async () => { // Create an empty on-disk database and connect to it - const db = new lbug.Database("example.lbug"); - const conn = new lbug.Connection(db); + const db = new lb.Database("example.lbug"); + const conn = new lb.Connection(db); // Create the tables await conn.query("CREATE NODE TABLE User(name STRING PRIMARY KEY, age INT64)"); @@ -312,7 +312,7 @@ const lbug = require("lbug"); -Ladybug's Java client library is available on [Maven Central](https://central.sonatype.com/artifact/com.lbugdb/lbug). You can add the following snippet to your `pom.xml` to get it installed: +Ladybug's Java client library is available on [Maven Central](https://central.sonatype.com/artifact/com.ladybugdb/lbug). You can add the following snippet to your `pom.xml` to get it installed: @@ -346,7 +346,7 @@ The `Main.java` contains the following code: ```java // Main.java -import com.lbugdb.*; +import com.ladybugdb.*; public class Main { public static void main(String[] args) throws ObjectRefDestroyedException { @@ -456,15 +456,15 @@ import ( func main() { // Create an empty on-disk database and connect to it - systemConfig := lbug.DefaultSystemConfig() + systemConfig := lb.DefaultSystemConfig() systemConfig.BufferPoolSize = 1024 * 1024 * 1024 - db, err := lbug.OpenDatabase("example.lbug", systemConfig) + db, err := lb.OpenDatabase("example.lbug", systemConfig) if err != nil { panic(err) } defer db.Close() - conn, err := lbug.OpenConnection(db) + conn, err := lb.OpenConnection(db) if err != nil { panic(err) } @@ -602,7 +602,7 @@ You should see the following output: -The Ladybug C++ client is distributed as `so`/`dylib`/`dll+lib` library files along with a header file (`lbug.hpp`). +The Ladybug C++ client is distributed as `so`/`dylib`/`dll+lib` library files along with a header file (`lb.hpp`). Once you've downloaded and extracted the C++ files into a directory, they are ready to use without any additional installation. You just need to specify the library and include file search paths. @@ -610,10 +610,10 @@ For example, organize the files and directories as follows: ```bash ├── include -│ ├── lbug.hpp +│ ├── lb.hpp │ └── ...... ├── lib -│ ├── liblbug.so / liblbug.dylib / lbug_shared.dll + lbug_shared.lib +│ ├── liblb.so / liblb.dylib / lbug_shared.dll + lbug_shared.lib │ └── ...... ├── data │ ├── city.csv @@ -628,7 +628,7 @@ For example, organize the files and directories as follows: // main.cpp #include -#include "include/lbug.hpp" +#include "include/lb.hpp" using namespace lbug::main; using namespace std; @@ -644,7 +644,7 @@ unique_ptr runQuery(const string_view &query, unique_ptr The Ladybug C API shares the same `so`/`dylib` library files with the C++ API and can be used by -including the C header file (`lbug.h`). +including the C header file (`lb.h`). In this example, we assume that the `so`/`dylib`, the header file, the CSV files, and the C code file are all under the same directory: ```bash ├── include -│ ├── lbug.h +│ ├── lb.h │ └── ...... -├── liblbug.so / liblbug.dylib +├── liblb.so / liblb.dylib ├── main.c ├── user.csv ├── city.csv @@ -766,7 +766,7 @@ The file `main.c` contains the following code: // main.c #include -#include "include/lbug.h" +#include "include/lb.h" int main() { diff --git a/src/content/docs/import/copy-from-dataframe.mdx b/src/content/docs/import/copy-from-dataframe.mdx index 9cc4f96..3af9183 100644 --- a/src/content/docs/import/copy-from-dataframe.mdx +++ b/src/content/docs/import/copy-from-dataframe.mdx @@ -14,11 +14,11 @@ There are two different ways in Ladybug to import dataframes: The dataframe can be passed as a query parameter: ```python -import lbug +import real_ladybug as lb import polars as pl -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) conn.execute( """ @@ -117,10 +117,10 @@ We can ignore these erroneous rows during import by setting the `ignore_errors` in the `COPY FROM` command: ```py -import lbug +import real_ladybug as lb -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) # Create a Person node table with name as the primary key conn.execute("CREATE NODE TABLE Person(name STRING PRIMARY KEY, age INT64)") @@ -157,11 +157,11 @@ You can directly copy from a Pandas dataframe into Ladybug. Both numpy-backed an dataframes are supported. ```python -import lbug +import real_ladybug as lb import pandas as pd -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) conn.execute("CREATE NODE TABLE Person(name STRING PRIMARY KEY, age INT64);") @@ -188,11 +188,11 @@ You can utilize an existing in-memory PyArrow table to copy data directly into L Internally, the Polars dataframe example above also leverages scanning from PyArrow tables. ```python -import lbug +import real_ladybug as lb import pyarrow as pa -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) conn.execute("CREATE NODE TABLE Person(name STRING PRIMARY KEY, age INT64);") @@ -218,11 +218,11 @@ print(res.get_as_df()) This example shows a basic use case where you have no nesting of data and no null values. ```python -import lbug +import real_ladybug as lb import polars as pl -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) conn.execute("CREATE NODE TABLE Person(name STRING PRIMARY KEY, age INT64);") @@ -338,10 +338,10 @@ We can now scan the DataFrame using the `LOAD FROM` clause in Ladybug in combina obtain each of the insurance provider records in a row-wise manner. ```python -import lbug +import real_ladybug as lb -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) result = conn.execute( """ @@ -368,10 +368,10 @@ We now have the necessary information to create our `Patient`, `InsuranceProvide tables and copy the relevant data into them. ```python -import lbug +import real_ladybug as lb -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) conn.execute( """ diff --git a/src/content/docs/import/copy-from-subquery.md b/src/content/docs/import/copy-from-subquery.md index 27b4556..0bf2ea2 100644 --- a/src/content/docs/import/copy-from-subquery.md +++ b/src/content/docs/import/copy-from-subquery.md @@ -43,11 +43,11 @@ object, such as a Pandas DataFrame using `LOAD FROM` and use its results as inpu command. This can be combined with predicate filters as follows: ```python -import lbug +import real_ladybug as lb import pandas as pd -db = lbug.Database("example.lbug") -conn = lbug.Connection(db) +db = lb.Database("example.lbug") +conn = lb.Connection(db) df = pd.DataFrame({ "name": ["Adam", "Karissa", "Zhang", "Noura"], diff --git a/src/content/docs/import/merge.md b/src/content/docs/import/merge.md index 2f40ca5..0c96819 100644 --- a/src/content/docs/import/merge.md +++ b/src/content/docs/import/merge.md @@ -65,11 +65,11 @@ without having to merge the rows one by one (i.e., **without for loops**). Let's see this in action with an example. ```py -import lbug +import real_ladybug as lb import pandas as pd -db = lbug.Database('example.lbug') -conn = lbug.Connection(db) +db = lb.Database('example.lbug') +conn = lb.Connection(db) df = pd.DataFrame({ 'name': ['Karissa', 'Rhea', 'James'], diff --git a/src/content/docs/installation.mdx b/src/content/docs/installation.mdx index 1962b37..259b37a 100644 --- a/src/content/docs/installation.mdx +++ b/src/content/docs/installation.mdx @@ -24,7 +24,7 @@ win: `curl -L -O https://github.com/LadybugDB/ladybug/releases/download/v${versi winUrl: `https://github.com/LadybugDB/ladybug/releases/download/v${version}/lbug_cli-windows-x86_64.zip`, }, java: ` - com.lbugdb + com.ladybugdb lbug ${version} @@ -151,7 +151,7 @@ uv add lbug ```bash -pip install lbug +pip install real_ladybug ``` @@ -176,7 +176,7 @@ npm install lbug ## Java -The latest stable version is available on [Maven Central](https://central.sonatype.com/artifact/com.lbugdb/lbug). +The latest stable version is available on [Maven Central](https://central.sonatype.com/artifact/com.ladybugdb/lbug). diff --git a/src/content/docs/tutorials/python/index.mdx b/src/content/docs/tutorials/python/index.mdx index 0e1e229..19c449e 100644 --- a/src/content/docs/tutorials/python/index.mdx +++ b/src/content/docs/tutorials/python/index.mdx @@ -24,7 +24,7 @@ for alternative installation methods. ```bash python -m venv .venv source .venv/bin/activate -pip install lbug +pip install real_ladybug ``` Next, [download the zipped data](https://lbugdb.github.io/data/tutorial/tutorial_data.zip) and unzip the files. @@ -70,13 +70,13 @@ We are now ready to start the tutorial. ## Create the database Clear out the contents of `src/create_db.py` and import Ladybug: ```py -import lbug +import real_ladybug as lb ``` Next, create and connect to an empty Ladybug database: ```py def main() -> None: - db = lbug.Database("social_network.lbug") - conn = lbug.Connection(db) + db = lb.Database("social_network.lbug") + conn = lb.Connection(db) # Rest of the code goes here @@ -169,11 +169,11 @@ We can now start querying the graph to answer some questions about the social ne Replace the contents of `src/main.py` with the following code snippet: ```py -import lbug +import real_ladybug as lb def main() -> None: - db = lbug.Database("social_network.lbug") - conn = lbug.Connection(db) + db = lb.Database("social_network.lbug") + conn = lb.Connection(db) # Query to be filled out below result = conn.execute(...) @@ -344,11 +344,11 @@ python src/main.py ```py -import lbug +import real_ladybug as lb def main() -> None: - db = lbug.Database("social_network.lbug") - conn = lbug.Connection(db) + db = lb.Database("social_network.lbug") + conn = lb.Connection(db) conn.execute(""" CREATE NODE TABLE User ( @@ -396,11 +396,11 @@ if __name__ == "__main__": ```py -import lbug +import real_ladybug as lb def main() -> None: - db = lbug.Database("social_network.lbug") - conn = lbug.Connection(db) + db = lb.Database("social_network.lbug") + conn = lb.Connection(db) # Query to be filled out below result = conn.execute(...) diff --git a/src/content/docs/visualization/third-party-integrations/yfiles.mdx b/src/content/docs/visualization/third-party-integrations/yfiles.mdx index c887f2d..bc6ce35 100644 --- a/src/content/docs/visualization/third-party-integrations/yfiles.mdx +++ b/src/content/docs/visualization/third-party-integrations/yfiles.mdx @@ -25,7 +25,7 @@ The yFiles Jupyter Graphs widget can be installed using `uv` or `pip`: ```bash # Install lbug as a pre-requisite uv init -uv add lbug yfiles-jupyter-graphs-for-lbug +uv add real_ladybug yfiles-jupyter-graphs-for-lbug ``` @@ -33,7 +33,7 @@ uv add lbug yfiles-jupyter-graphs-for-lbug ```bash # Install lbug as a pre-requisite -pip install lbug yfiles-jupyter-graphs-for-lbug +pip install real_ladybug yfiles-jupyter-graphs-for-lbug ``` @@ -46,11 +46,11 @@ table `Person`, and one relationship table `MENTORED`, which describes who mento Each person has a `name` and an `alias`, which is the name they are best known by. ```py -import lbug +import real_ladybug as lb from yfiles_jupyter_graphs_for_lbug import LadybugGraphWidget # Open a new in-memory database -db = lbug.Database(":memory:") -conn = lbug.Connection(db) +db = lb.Database(":memory:") +conn = lb.Connection(db) # Create tables conn.execute("CREATE NODE TABLE Person(name STRING PRIMARY KEY, alias STRING)") conn.execute("CREATE REL TABLE MENTORED(FROM Person TO Person)")