Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
96 changes: 48 additions & 48 deletions src/content/docs/client-apis/python.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -35,30 +35,30 @@ curl -L -o ./data/lives-in.csv https://raw.githubusercontent.com/lbugdb/lbug/ref
The synchronous API is the default and is a common way to work with Ladybug in Python.

```python
import lbug
import real_ladybug as lb

def main() -> None:
# Create an empty on-disk database and connect to it
db = lbug.Database("example.lbug")
conn = lbug.Connection(db)
db = lb.Database("example.lbug")
conn = lb.Connection(db)

create_tables(conn)
copy_data(conn)
query(conn)

def create_tables(conn: lbug.Connection) -> None:
def create_tables(conn: lb.Connection) -> None:
conn.execute("CREATE NODE TABLE User(name STRING PRIMARY KEY, age INT64)")
conn.execute("CREATE NODE TABLE City(name STRING PRIMARY KEY, population INT64)")
conn.execute("CREATE REL TABLE Follows(FROM User TO User, since INT64)")
conn.execute("CREATE REL TABLE LivesIn(FROM User TO City)")

def copy_data(conn: lbug.Connection) -> None:
def copy_data(conn: lb.Connection) -> None:
conn.execute('COPY User FROM "./data/user.csv"')
conn.execute('COPY City FROM "./data/city.csv"')
conn.execute('COPY Follows FROM "./data/follows.csv"')
conn.execute('COPY LivesIn FROM "./data/lives-in.csv"')

def query(conn: lbug.Connection) -> None:
def query(conn: lb.Connection) -> None:
results = conn.execute("""
MATCH (a:User)-[f:Follows]->(b:User)
RETURN a.name, b.name, f.since;
Expand All @@ -79,33 +79,33 @@ such as in web frameworks like FastAPI or cases where you need to concurrently r

```python
import asyncio
import lbug
import real_ladybug as lb

async def main():
# Create an empty on-disk database and connect to it
db = lbug.Database("example.lbug")
db = lb.Database("example.lbug")
# The underlying connection pool will be automatically created and managed by the async connection
conn = lbug.AsyncConnection(db, max_concurrent_queries=4)
conn = lb.AsyncConnection(db, max_concurrent_queries=4)

await create_tables(conn)
await copy_data(conn)
await query(conn)

async def create_tables(conn: lbug.AsyncConnection) -> None:
async def create_tables(conn: lb.AsyncConnection) -> None:
await conn.execute("CREATE NODE TABLE User(name STRING PRIMARY KEY, age INT64)")
await conn.execute(
"CREATE NODE TABLE City(name STRING PRIMARY KEY, population INT64)"
)
await conn.execute("CREATE REL TABLE Follows(FROM User TO User, since INT64)")
await conn.execute("CREATE REL TABLE LivesIn(FROM User TO City)")

async def copy_data(conn: lbug.AsyncConnection) -> None:
async def copy_data(conn: lb.AsyncConnection) -> None:
await conn.execute("COPY User FROM './data/user.csv'")
await conn.execute("COPY City FROM './data/city.csv'")
await conn.execute("COPY Follows FROM './data/follows.csv'")
await conn.execute("COPY LivesIn FROM './data/lives-in.csv'")

async def query(conn: lbug.AsyncConnection) -> None:
async def query(conn: lb.AsyncConnection) -> None:
results = await conn.execute("""
MATCH (a:User)-[f:Follows]->(b:User)
RETURN a.name, b.name, f.since;
Expand Down Expand Up @@ -187,11 +187,11 @@ The following examples show how to output query results to each of these data st
You can output the results of a Cypher query to a Pandas DataFrame using the `get_as_df()` method:

```py
import lbug
import real_ladybug as lb
import pandas as pd

db = lbug.Database(":memory:")
conn = lbug.Connection(db)
db = lb.Database(":memory:")
conn = lb.Connection(db)

conn.execute("CREATE NODE TABLE Person(name STRING PRIMARY KEY, age INT64)")
conn.execute("CREATE (a:Person {name: 'Adam', age: 30})")
Expand Down Expand Up @@ -226,11 +226,11 @@ print(result.get_as_df())
You can output the results of a Cypher query to a Polars DataFrame using the `get_as_pl()` method:

```py
import lbug
import real_ladybug as lb
import polars as pl

db = lbug.Database(":memory:")
conn = lbug.Connection(db)
db = lb.Database(":memory:")
conn = lb.Connection(db)

conn.execute("CREATE NODE TABLE Person(name STRING PRIMARY KEY, age INT64)")
conn.execute("CREATE (a:Person {name: 'Adam', age: 30})")
Expand Down Expand Up @@ -277,11 +277,11 @@ shape: (3, 1)
You can output the results of a Cypher query to a PyArrow Table using the `get_as_arrow()` method:

```py
import lbug
import real_ladybug as lb
import pyarrow as pa

db = lbug.Database(":memory:")
conn = lbug.Connection(db)
db = lb.Database(":memory:")
conn = lb.Connection(db)

conn.execute("CREATE NODE TABLE Person(name STRING PRIMARY KEY, age INT64)")
conn.execute("CREATE (a:Person {name: 'Adam', age: 30})")
Expand Down Expand Up @@ -314,11 +314,11 @@ Scanning a DataFrame or Table does *not* copy the data into Ladybug, it only rea
<TabItem label="Pandas">

```py
import lbug
import real_ladybug as lb
import pandas as pd

db = lbug.Database(":memory:")
conn = lbug.Connection(db)
db = lb.Database(":memory:")
conn = lb.Connection(db)

df = pd.DataFrame({
"name": ["Adam", "Karissa", "Zhang"],
Expand All @@ -339,11 +339,11 @@ print(result.get_as_df())
<TabItem label="Polars">

```py
import lbug
import real_ladybug as lb
import polars as pl

db = lbug.Database(":memory:")
conn = lbug.Connection(db)
db = lb.Database(":memory:")
conn = lb.Connection(db)

df = pl.DataFrame({
"name": ["Adam", "Karissa", "Zhang"],
Expand All @@ -370,11 +370,11 @@ shape: (3, 2)
<TabItem label="Arrow Table">

```py
import lbug
import real_ladybug as lb
import pyarrow as pa

db = lbug.Database(":memory:")
conn = lbug.Connection(db)
db = lb.Database(":memory:")
conn = lb.Connection(db)

tbl = pa.table({
"name": ["Adam", "Karissa", "Zhang"],
Expand Down Expand Up @@ -405,11 +405,11 @@ age: [[30,40,50]]
Copy from a Pandas DataFrame into a Ladybug table using the `COPY FROM` command:

```py
import lbug
import real_ladybug as lb
import pandas as pd

db = lbug.Database(":memory:")
conn = lbug.Connection(db)
db = lb.Database(":memory:")
conn = lb.Connection(db)

conn.execute("CREATE NODE TABLE Person(name STRING PRIMARY KEY, age INT64)")

Expand All @@ -436,11 +436,11 @@ print(result.get_as_df())
Copy from a Polars DataFrame into a Ladybug table using the `COPY FROM` command:

```py
import lbug
import real_ladybug as lb
import polars as pl

db = lbug.Database(":memory:")
conn = lbug.Connection(db)
db = lb.Database(":memory:")
conn = lb.Connection(db)

conn.execute("CREATE NODE TABLE Person(name STRING PRIMARY KEY, age INT64)")

Expand Down Expand Up @@ -473,11 +473,11 @@ shape: (3, 2)
Copy from a PyArrow Table into a Ladybug table using the `COPY FROM` command:

```py
import lbug
import real_ladybug as lb
import pyarrow as pa

db = lbug.Database(":memory:")
conn = lbug.Connection(db)
db = lb.Database(":memory:")
conn = lb.Connection(db)

conn.execute("CREATE NODE TABLE Person(name STRING PRIMARY KEY, age INT64)")

Expand Down Expand Up @@ -541,20 +541,20 @@ difference between two numbers, and then apply it in a Cypher query.
### Register the UDF

```py
import lbug
import real_ladybug as lb

db = lbug.Database(":memory:")
conn = lbug.Connection(db)
db = lb.Database(":memory:")
conn = lb.Connection(db)

# define your function
def difference(a, b):
return a - b

# define the expected type of your parameters
parameters = [lbug.Type.INT64, lbug.Type.INT64]
parameters = [lb.Type.INT64, lb.Type.INT64]

# define expected type of the returned value
return_type = lbug.Type.INT64
return_type = lb.Type.INT64

# register the UDF
conn.create_function("difference", difference, parameters, return_type)
Expand All @@ -579,10 +579,10 @@ The UDF API's `create_function` provides the following additional parameters:

- `name: str` : The name of the function to be invoked in cypher.
- `udf: Callable[[...], Any]` : The function to be executed.
- `params_type: Optional[list[Type | str]]` : A list whose elements can either be `lbug.Type` or `str`. `lbug.Type`
- `params_type: Optional[list[Type | str]]` : A list whose elements can either be `lb.Type` or `str`. `lb.Type`
can be used to denote nonnested parameter types, while `str` can be used to denote both nested and nonnested parameter types.
Details on how to denote types are in the [type notation](#type-notation) section.
- `return_type: Optional[Type | str]` : Either a `lbug.Type` enum or `str`. Details on how to denote types are in the [type notation](#type-notation) section.
- `return_type: Optional[Type | str]` : Either a `lb.Type` enum or `str`. Details on how to denote types are in the [type notation](#type-notation) section.
- `default_null_handling: Optional[bool]` : True by default. When true, if any one of the inputs is null, function execution is skipped and the output is resolved to null.
- `catch_exceptions: Optional[bool]` : False by default. When true, if the UDF raises an exception, the output is resolved to null. Otherwise the Exception is rethrown.

Expand Down Expand Up @@ -641,7 +641,7 @@ def calculate_discounted_price(price: float, has_discount: bool) -> float:
# Assume 10% discount on all items for simplicity
return float(price) * 0.9 if has_discount else price

parameters = ['DECIMAL(7, 2)', lbug.Type.BOOL]
parameters = ['DECIMAL(7, 2)', lb.Type.BOOL]

return_type = 'DECIMAL(7, 2)'

Expand All @@ -666,6 +666,6 @@ print(result.get_as_df())
0 90.00 100.00
```

The second parameter is a built-in native type in Ladybug, i.e., `lbug.Type.BOOL`. For the first parameter,
The second parameter is a built-in native type in Ladybug, i.e., `lb.Type.BOOL`. For the first parameter,
we need to specify a string, i.e. `DECIMAL(7,2)` that is then parsed and used by Ladybug
to map to the internal decimal representation.
8 changes: 4 additions & 4 deletions src/content/docs/concurrency.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,12 +43,12 @@ Here's a simple example application in Python that demonstrates these two steps
database and a connection. The same principles apply to other language APIs as well:

```python
import lbug
import real_ladybug as lb

# Open the database in `READ_WRITE` mode. The below code is equivalent to:
# db = lbug.Database("example.lbug", read_only=False)
db = lbug.Database("example.lbug")
conn = lbug.Connection(db)
# db = lb.Database("example.lbug", read_only=False)
db = lb.Database("example.lbug")
conn = lb.Connection(db)
conn.execute("CREATE (a:Person {name: 'Alice'});")
```

Expand Down
18 changes: 9 additions & 9 deletions src/content/docs/cypher/query-clauses/load-from.md
Original file line number Diff line number Diff line change
Expand Up @@ -139,11 +139,11 @@ inferred from the schema information of the data frame. Here is an example:

```py
# main.py
import lbug
import real_ladybug as lb
import pandas as pd

db = lbug.Database(":memory:")
conn = lbug.Connection(db)
db = lb.Database(":memory:")
conn = lb.Connection(db)

df = pd.DataFrame({
"name": ["Adam", "Karissa", "Zhang"],
Expand Down Expand Up @@ -172,11 +172,11 @@ Ladybug can also scan Polars DataFrames via the underlying PyArrow layer. The ru
variable names and data types is identical to scanning Pandas data frames. Here is an example:

```python
import lbug
import real_ladybug as lb
import polars as pl

db = lbug.Database(":memory:")
conn = lbug.Connection(db)
db = lb.Database(":memory:")
conn = lb.Connection(db)

df = pl.DataFrame({
"name": ["Adam", "Karissa", "Zhang"],
Expand Down Expand Up @@ -205,11 +205,11 @@ shape: (3, 2)
You can scan an existing PyArrow table as follows:

```python
import lbug
import real_ladybug as lb
import pyarrow as pa

db = lbug.Database(":memory:")
conn = lbug.Connection(db)
db = lb.Database(":memory:")
conn = lb.Connection(db)

pa_table = pa.table({
"name": ["Adam", "Karissa", "Zhang"],
Expand Down
24 changes: 12 additions & 12 deletions src/content/docs/extensions/vector.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -36,11 +36,11 @@ extension to directly create the embeddings using Cypher.
```python
# create_embeddings.py
# pip install sentence-transformers
import lbug
import real_ladybug as lb
import os

db = lbug.Database("example.lbug")
conn = lbug.Connection(db)
db = lb.Database("example.lbug")
conn = lb.Connection(db)

from sentence_transformers import SentenceTransformer
model = SentenceTransformer("all-MiniLM-L6-v2")
Expand Down Expand Up @@ -96,11 +96,11 @@ for title, publisher in zip(titles, publishers):
<TabItem value="llm" label="LLM extension">
```python
# create_embeddings.py
import lbug
import real_ladybug as lb
import os

db = lbug.Database("example.lbug")
conn = lbug.Connection(db)
db = lb.Database("example.lbug")
conn = lb.Connection(db)

conn.execute("INSTALL llm; LOAD llm;")
os.environ["OPENAI_API_KEY"] = "sk-proj-key" # Replace with your own OpenAI API key
Expand Down Expand Up @@ -269,11 +269,11 @@ Let's run some example search queries on our newly created vector index.
<Tabs syncKey="vector">
<TabItem value="sentence_transformers" label="sentence_transformers">
```python
import lbug
import real_ladybug as lb

# Initialize the database
db = lbug.Database("example.lbug")
conn = lbug.Connection(db)
db = lb.Database("example.lbug")
conn = lb.Connection(db)

# Install and load vector extension once again
conn.execute("INSTALL VECTOR;")
Expand Down Expand Up @@ -304,11 +304,11 @@ print(result.get_as_pl())
</TabItem>
<TabItem value="llm" label="LLM extension">
```python
import lbug
import real_ladybug as lb

# Initialize the database
db = lbug.Database("example.lbug")
conn = lbug.Connection(db)
db = lb.Database("example.lbug")
conn = lb.Connection(db)

# Install and load vector extension once again
conn.execute("INSTALL VECTOR;")
Expand Down
Loading