support data: in URL for memory. Add ootb support for pdfs

This commit is contained in:
Hardik Shah 2024-09-12 10:54:55 -07:00
parent a11d92601b
commit 5f49dce839
5 changed files with 82 additions and 12 deletions

View file

@ -83,13 +83,12 @@ class AgenticSystemClient(AgenticSystem):
if line.startswith("data:"): if line.startswith("data:"):
data = line[len("data: ") :] data = line[len("data: ") :]
try: try:
if "error" in data: jdata = json.loads(data)
if "error" in jdata:
cprint(data, "red") cprint(data, "red")
continue continue
yield AgenticSystemTurnResponseStreamChunk( yield AgenticSystemTurnResponseStreamChunk(**jdata)
**json.loads(data)
)
except Exception as e: except Exception as e:
print(data) print(data)
print(f"Error with parsing or validation: {e}") print(f"Error with parsing or validation: {e}")

View file

@ -8,7 +8,9 @@
# #
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
import base64
import mimetypes
import os
from typing import List, Optional, Protocol from typing import List, Optional, Protocol
from llama_models.schema_utils import json_schema_type, webmethod from llama_models.schema_utils import json_schema_type, webmethod
@ -23,10 +25,25 @@ from llama_models.llama3.api.datatypes import * # noqa: F403
class MemoryBankDocument(BaseModel): class MemoryBankDocument(BaseModel):
document_id: str document_id: str
content: InterleavedTextMedia | URL content: InterleavedTextMedia | URL
mime_type: str mime_type: str | None = None
metadata: Dict[str, Any] = Field(default_factory=dict) metadata: Dict[str, Any] = Field(default_factory=dict)
def data_url_from_file(file_path: str) -> URL:
if not os.path.exists(file_path):
raise FileNotFoundError(f"File not found: {file_path}")
with open(file_path, "rb") as file:
file_content = file.read()
base64_content = base64.b64encode(file_content).decode("utf-8")
mime_type, _ = mimetypes.guess_type(file_path)
data_url = f"data:{mime_type};base64,{base64_content}"
return URL(uri=data_url)
@json_schema_type @json_schema_type
class MemoryBankType(Enum): class MemoryBankType(Enum):
vector = "vector" vector = "vector"

View file

@ -5,11 +5,14 @@
# the root directory of this source tree. # the root directory of this source tree.
import asyncio import asyncio
import json
from pathlib import Path
from typing import Any, Dict, List, Optional from typing import Any, Dict, List, Optional
import fire import fire
import httpx import httpx
from termcolor import cprint
from llama_toolchain.core.datatypes import RemoteProviderConfig from llama_toolchain.core.datatypes import RemoteProviderConfig
@ -120,7 +123,7 @@ async def run_main(host: str, port: int, stream: bool):
overlap_size_in_tokens=64, overlap_size_in_tokens=64,
), ),
) )
print(bank) cprint(json.dumps(bank.dict(), indent=4), "green")
retrieved_bank = await client.get_memory_bank(bank.bank_id) retrieved_bank = await client.get_memory_bank(bank.bank_id)
assert retrieved_bank is not None assert retrieved_bank is not None
@ -145,6 +148,16 @@ async def run_main(host: str, port: int, stream: bool):
for i, url in enumerate(urls) for i, url in enumerate(urls)
] ]
this_dir = os.path.dirname(__file__)
files = [Path(this_dir).parent.parent / "CONTRIBUTING.md"]
documents += [
MemoryBankDocument(
document_id=f"num-{i}",
content=data_url_from_file(path),
)
for i, path in enumerate(files)
]
# insert some documents # insert some documents
await client.insert_documents( await client.insert_documents(
bank_id=bank.bank_id, bank_id=bank.bank_id,

View file

@ -3,21 +3,23 @@
# #
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
import io
import re
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from dataclasses import dataclass from dataclasses import dataclass
from typing import Any, Dict, List, Optional from typing import Any, Dict, List, Optional
import chardet
import httpx import httpx
import numpy as np import numpy as np
from numpy.typing import NDArray from numpy.typing import NDArray
from pypdf import PdfReader
from llama_models.llama3.api.datatypes import * # noqa: F403 from llama_models.llama3.api.datatypes import * # noqa: F403
from llama_models.llama3.api.tokenizer import Tokenizer from llama_models.llama3.api.tokenizer import Tokenizer
from llama_toolchain.memory.api import * # noqa: F403 from llama_toolchain.memory.api import * # noqa: F403
ALL_MINILM_L6_V2_DIMENSION = 384 ALL_MINILM_L6_V2_DIMENSION = 384
EMBEDDING_MODEL = None EMBEDDING_MODEL = None
@ -36,11 +38,48 @@ def get_embedding_model() -> "SentenceTransformer":
return EMBEDDING_MODEL return EMBEDDING_MODEL
def content_from_data(data_url: str) -> str:
match = re.match(r"data:([^;,]+)(?:;charset=([^;,]+))?(?:;base64)?,(.+)", data_url)
if not match:
raise ValueError("Invalid Data URL format")
mime_type, charset, data = match.groups()
if ";base64," in data_url:
data = base64.b64decode(data)
else:
data = data.encode("utf-8")
mime_category = mime_type.split("/")[0]
if mime_category == "text":
# For text-based files (including CSV, MD)
if charset:
return data.decode(charset)
else:
# Try to detect encoding if charset is not specified
detected = chardet.detect(data)
return data.decode(detected["encoding"])
elif mime_type == "application/pdf":
# For PDF and DOC/DOCX files, we can't reliably convert to string)
pdf_bytes = io.BytesIO(data)
pdf_reader = PdfReader(pdf_bytes)
return "\n".join([page.extract_text() for page in pdf_reader.pages])
else:
cprint("Could not extract content from data_url properly.", color="red")
return ""
async def content_from_doc(doc: MemoryBankDocument) -> str: async def content_from_doc(doc: MemoryBankDocument) -> str:
if isinstance(doc.content, URL): if isinstance(doc.content, URL):
async with httpx.AsyncClient() as client: if doc.content.uri.startswith("data:"):
r = await client.get(doc.content.uri) return content_from_data(doc.content.uri)
return r.text else:
async with httpx.AsyncClient() as client:
r = await client.get(doc.content.uri)
return r.text
return interleaved_text_media_as_str(doc.content) return interleaved_text_media_as_str(doc.content)

View file

@ -10,6 +10,8 @@ from llama_toolchain.core.datatypes import * # noqa: F403
EMBEDDING_DEPS = [ EMBEDDING_DEPS = [
"blobfile", "blobfile",
"chardet",
"PdfReader",
"sentence-transformers", "sentence-transformers",
] ]