-
Notifications
You must be signed in to change notification settings - Fork 97
SeBS Cloudflare Compatibility #274
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
9a9d42e
aa24a07
4cc0476
cd24fcf
eaa42a1
57452fa
9e47e0f
822a9d9
f7bb950
1f0a979
b117e75
d42b157
ffd3f78
272a372
556d799
93c8a73
e17982f
214c947
b8f7c5c
dba2992
5390021
24497a2
8812708
5b3d784
cd183b8
9379f39
5f9ad9c
92db5ae
51892b0
3235d3f
416b67b
5284880
b6de39b
812f592
9229f9f
5899d87
6e0cd2b
3cd741f
2615a36
e69243a
f39aad0
e76f846
dc2f6ed
437cc97
1eb375c
6c0768e
0dfcfa8
b2465f9
0eb4d0b
db84f2d
7e2d8ac
35a556d
92c5dea
bcd5ecb
96ac2c1
b028151
03e274e
a11236a
35755d6
b427c5b
734eadf
5ffcb06
4da0c31
4874794
6b8e695
865ca06
20eb8db
ebe0794
9dd0a6e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,6 +1,6 @@ | ||
| { | ||
| "timeout": 120, | ||
| "memory": 128, | ||
| "languages": ["python", "nodejs"], | ||
| "languages": ["python"], | ||
| "modules": [] | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,56 @@ | ||
|
|
||
| import datetime | ||
| import os | ||
|
|
||
| from pyodide.ffi import run_sync | ||
| from pyodide.http import pyfetch | ||
|
|
||
| from . import storage | ||
| client = storage.storage.get_instance() | ||
|
|
||
| SEBS_USER_AGENT = "SeBS/1.2 (https://github.com/spcl/serverless-benchmarks) SeBS Benchmark Suite/1.2" | ||
|
|
||
| async def do_request(url, download_path): | ||
| headers = {'User-Agent': SEBS_USER_AGENT} | ||
|
|
||
| res = await pyfetch(url, headers=headers) | ||
| bs = await res.bytes() | ||
|
|
||
| with open(download_path, 'wb') as f: | ||
| f.write(bs) | ||
|
|
||
| def handler(event): | ||
|
|
||
| bucket = event.get('bucket').get('bucket') | ||
| output_prefix = event.get('bucket').get('output') | ||
| url = event.get('object').get('url') | ||
| name = os.path.basename(url) | ||
| download_path = '/tmp/{}'.format(name) | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Use a secure temporary file path instead of a predictable The current path is predictable and can collide across concurrent invocations. Use 🔒 Proposed fix import datetime
import os
+import tempfile
@@
- name = os.path.basename(url)
- download_path = '/tmp/{}'.format(name)
+ name = os.path.basename(url)
+ fd, download_path = tempfile.mkstemp(prefix="sebs-", suffix=f"-{name}", dir="/tmp")
+ os.close(fd)🧰 Tools🪛 Ruff (0.15.6)[error] 28-28: Probable insecure usage of temporary file or directory: "/tmp/{}" (S108) 🤖 Prompt for AI Agents |
||
|
|
||
| process_begin = datetime.datetime.now() | ||
|
|
||
| run_sync(do_request(url, download_path)) | ||
|
|
||
| size = os.path.getsize(download_path) | ||
| process_end = datetime.datetime.now() | ||
|
|
||
| upload_begin = datetime.datetime.now() | ||
| key_name = client.upload(bucket, os.path.join(output_prefix, name), download_path) | ||
| upload_end = datetime.datetime.now() | ||
|
|
||
| process_time = (process_end - process_begin) / datetime.timedelta(microseconds=1) | ||
| upload_time = (upload_end - upload_begin) / datetime.timedelta(microseconds=1) | ||
| return { | ||
| 'result': { | ||
| 'bucket': bucket, | ||
| 'url': url, | ||
| 'key': key_name | ||
| }, | ||
| 'measurement': { | ||
| 'download_time': 0, | ||
| 'download_size': 0, | ||
| 'upload_time': upload_time, | ||
| 'upload_size': size, | ||
| 'compute_time': process_time | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,78 @@ | ||
| const nosql = require('./nosql'); | ||
|
|
||
| const nosqlClient = nosql.nosql.get_instance(); | ||
| const nosqlTableName = "shopping_cart"; | ||
|
|
||
| async function addProduct(cartId, productId, productName, price, quantity) { | ||
| await nosqlClient.insert( | ||
| nosqlTableName, | ||
| ["cart_id", cartId], | ||
| ["product_id", productId], | ||
| { price: price, quantity: quantity, name: productName } | ||
| ); | ||
| } | ||
|
|
||
| async function getProducts(cartId, productId) { | ||
| return await nosqlClient.get( | ||
| nosqlTableName, | ||
| ["cart_id", cartId], | ||
| ["product_id", productId] | ||
| ); | ||
| } | ||
|
|
||
| async function queryProducts(cartId) { | ||
| const res = await nosqlClient.query( | ||
| nosqlTableName, | ||
| ["cart_id", cartId], | ||
| "product_id" | ||
| ); | ||
|
|
||
| const products = []; | ||
| let priceSum = 0; | ||
| let quantitySum = 0; | ||
|
|
||
| for (const product of res) { | ||
| products.push(product.name); | ||
| priceSum += product.price; | ||
| quantitySum += product.quantity; | ||
| } | ||
|
|
||
| const avgPrice = quantitySum > 0 ? priceSum / quantitySum : 0.0; | ||
|
|
||
| return { | ||
| products: products, | ||
| total_cost: priceSum, | ||
| avg_price: avgPrice | ||
| }; | ||
| } | ||
userlaurin marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| exports.handler = async function(event) { | ||
| const results = []; | ||
|
|
||
| for (const request of event.requests) { | ||
| const route = request.route; | ||
| const body = request.body; | ||
| let res; | ||
|
|
||
| if (route === "PUT /cart") { | ||
| await addProduct( | ||
| body.cart, | ||
| body.product_id, | ||
| body.name, | ||
| body.price, | ||
| body.quantity | ||
| ); | ||
| res = {}; | ||
| } else if (route === "GET /cart/{id}") { | ||
| res = await getProducts(body.cart, request.path.id); | ||
| } else if (route === "GET /cart") { | ||
| res = await queryProducts(body.cart); | ||
| } else { | ||
| throw new Error(`Unknown request route: ${route}`); | ||
| } | ||
|
|
||
| results.push(res); | ||
| } | ||
|
|
||
| return { result: results }; | ||
| }; | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,9 @@ | ||
| { | ||
| "name": "crud-api", | ||
| "version": "1.0.0", | ||
| "description": "CRUD API benchmark", | ||
| "author": "", | ||
| "license": "", | ||
| "dependencies": { | ||
| } | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,147 @@ | ||
| const fs = require('fs'); | ||
| const path = require('path'); | ||
| const zlib = require('zlib'); | ||
| const { v4: uuidv4 } = require('uuid'); | ||
| const storage = require('./storage'); | ||
|
|
||
| let storage_handler = new storage.storage(); | ||
|
|
||
| /** | ||
| * Calculate total size of a directory recursively | ||
| * @param {string} directory - Path to directory | ||
| * @returns {number} Total size in bytes | ||
| */ | ||
| function parseDirectory(directory) { | ||
| let size = 0; | ||
|
|
||
| function walkDir(dir) { | ||
| const files = fs.readdirSync(dir); | ||
| for (const file of files) { | ||
| const filepath = path.join(dir, file); | ||
| const stat = fs.statSync(filepath); | ||
| if (stat.isDirectory()) { | ||
| walkDir(filepath); | ||
| } else { | ||
| size += stat.size; | ||
| } | ||
| } | ||
| } | ||
|
|
||
| walkDir(directory); | ||
| return size; | ||
| } | ||
|
|
||
| /** | ||
| * Create a simple tar.gz archive from a directory using native zlib | ||
| * This creates a gzip-compressed tar archive without external dependencies | ||
| * @param {string} sourceDir - Directory to compress | ||
| * @param {string} outputPath - Path for the output archive file | ||
| * @returns {Promise<void>} | ||
| */ | ||
| async function createTarGzArchive(sourceDir, outputPath) { | ||
| // Create a simple tar-like format (concatenated files with headers) | ||
| const files = []; | ||
|
|
||
| function collectFiles(dir, baseDir = '') { | ||
| const entries = fs.readdirSync(dir); | ||
| for (const entry of entries) { | ||
| const fullPath = path.join(dir, entry); | ||
| const relativePath = path.join(baseDir, entry); | ||
| const stat = fs.statSync(fullPath); | ||
|
|
||
| if (stat.isDirectory()) { | ||
| collectFiles(fullPath, relativePath); | ||
| } else { | ||
| files.push({ | ||
| path: relativePath, | ||
| fullPath: fullPath, | ||
| size: stat.size | ||
| }); | ||
| } | ||
| } | ||
| } | ||
|
|
||
| collectFiles(sourceDir); | ||
|
|
||
| // Create a concatenated buffer of all files with simple headers | ||
| const chunks = []; | ||
| for (const file of files) { | ||
| const content = fs.readFileSync(file.fullPath); | ||
| // Simple header: filename length (4 bytes) + filename + content length (4 bytes) + content | ||
| const pathBuffer = Buffer.from(file.path); | ||
| const pathLengthBuffer = Buffer.allocUnsafe(4); | ||
| pathLengthBuffer.writeUInt32BE(pathBuffer.length, 0); | ||
| const contentLengthBuffer = Buffer.allocUnsafe(4); | ||
| contentLengthBuffer.writeUInt32BE(content.length, 0); | ||
|
|
||
| chunks.push(pathLengthBuffer); | ||
| chunks.push(pathBuffer); | ||
| chunks.push(contentLengthBuffer); | ||
| chunks.push(content); | ||
| } | ||
|
|
||
| const combined = Buffer.concat(chunks); | ||
|
|
||
| // Compress using gzip | ||
| const compressed = zlib.gzipSync(combined, { level: 9 }); | ||
| fs.writeFileSync(outputPath, compressed); | ||
| } | ||
|
|
||
| exports.handler = async function(event) { | ||
| const bucket = event.bucket.bucket; | ||
| const input_prefix = event.bucket.input; | ||
| const output_prefix = event.bucket.output; | ||
| const key = event.object.key; | ||
|
|
||
| // Create unique download path | ||
| const download_path = path.join('/tmp', `${key}-${uuidv4()}`); | ||
| fs.mkdirSync(download_path, { recursive: true }); | ||
|
|
||
| // Download directory from storage | ||
| const s3_download_begin = Date.now(); | ||
| await storage_handler.download_directory(bucket, path.join(input_prefix, key), download_path); | ||
| const s3_download_stop = Date.now(); | ||
|
|
||
| // Calculate size of downloaded files | ||
| const size = parseDirectory(download_path); | ||
|
|
||
| // Compress directory | ||
| const compress_begin = Date.now(); | ||
| const archive_name = `${key}.tar.gz`; | ||
| const archive_path = path.join(download_path, archive_name); | ||
| await createTarGzArchive(download_path, archive_path); | ||
| const compress_end = Date.now(); | ||
|
|
||
| // Get archive size | ||
| const archive_size = fs.statSync(archive_path).size; | ||
|
|
||
| // Upload compressed archive | ||
| const s3_upload_begin = Date.now(); | ||
| const [key_name, uploadPromise] = storage_handler.upload( | ||
| bucket, | ||
| path.join(output_prefix, archive_name), | ||
| archive_path | ||
| ); | ||
| await uploadPromise; | ||
| const s3_upload_stop = Date.now(); | ||
|
|
||
| // Calculate times in microseconds | ||
| const download_time = (s3_download_stop - s3_download_begin) * 1000; | ||
| const upload_time = (s3_upload_stop - s3_upload_begin) * 1000; | ||
| const process_time = (compress_end - compress_begin) * 1000; | ||
|
|
||
| return { | ||
| result: { | ||
| bucket: bucket, | ||
| key: key_name | ||
| }, | ||
| measurement: { | ||
| download_time: download_time, | ||
| download_size: size, | ||
| upload_time: upload_time, | ||
| upload_size: archive_size, | ||
| compute_time: process_time | ||
| } | ||
| }; | ||
| }; | ||
|
|
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,9 @@ | ||
| { | ||
| "name": "compression-benchmark", | ||
| "version": "1.0.0", | ||
| "description": "Compression benchmark for serverless platforms", | ||
| "main": "function.js", | ||
| "dependencies": { | ||
| "uuid": "^10.0.0" | ||
| } | ||
| } |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Validate HTTP download success before writing and uploading.
Right now any HTTP response body (including 404/500 pages) is written and then uploaded as if it were valid content. Add an explicit status check and fail fast on non-success responses.
🔧 Proposed fix
async def do_request(url, download_path): headers = {'User-Agent': SEBS_USER_AGENT} res = await pyfetch(url, headers=headers) + if not res.ok: + raise RuntimeError(f"Download failed with status {res.status} for URL: {url}") bs = await res.bytes() with open(download_path, 'wb') as f: f.write(bs)🤖 Prompt for AI Agents