Skip to content

Commit

Permalink
Merge branch 'microsoft:main' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
KylinMountain authored Aug 10, 2024
2 parents 4a005d6 + 073f650 commit 9109de3
Show file tree
Hide file tree
Showing 9 changed files with 45 additions and 21 deletions.
4 changes: 4 additions & 0 deletions .semversioner/next-release/patch-20240808225534741702.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"type": "patch",
"description": "Fix file dumps using json for non ASCII chars"
}
4 changes: 3 additions & 1 deletion graphrag/index/cache/json_pipeline_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,9 @@ async def set(self, key: str, value: Any, debug_data: dict | None = None) -> Non
if value is None:
return
data = {"result": value, **(debug_data or {})}
await self._storage.set(key, json.dumps(data), encoding=self._encoding)
await self._storage.set(
key, json.dumps(data, ensure_ascii=False), encoding=self._encoding
)

async def has(self, key: str) -> bool:
"""Has method definition."""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,9 @@ async def _summarize_descriptions_with_llm(
name="summarize",
variables={
self._entity_name_key: json.dumps(items),
self._input_descriptions_key: json.dumps(sorted(descriptions)),
self._input_descriptions_key: json.dumps(
sorted(descriptions), ensure_ascii=False
),
},
model_parameters={"max_tokens": self._max_summary_length},
)
Expand Down
2 changes: 1 addition & 1 deletion graphrag/index/reporting/blob_workflow_callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def _write_log(self, log: dict[str, Any]):
blob_client = self._blob_service_client.get_blob_client(
self._container_name, self._blob_name
)
blob_client.append_block(json.dumps(log) + "\n")
blob_client.append_block(json.dumps(log, ensure_ascii=False) + "\n")

# update the blob's block count
self._num_blocks += 1
Expand Down
28 changes: 19 additions & 9 deletions graphrag/index/reporting/file_workflow_callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,16 @@ def on_error(
):
"""Handle when an error occurs."""
self._out_stream.write(
json.dumps({
"type": "error",
"data": message,
"stack": stack,
"source": str(cause),
"details": details,
})
json.dumps(
{
"type": "error",
"data": message,
"stack": stack,
"source": str(cause),
"details": details,
},
ensure_ascii=False,
)
+ "\n"
)
message = f"{message} details={details}"
Expand All @@ -49,14 +52,21 @@ def on_error(
def on_warning(self, message: str, details: dict | None = None):
"""Handle when a warning occurs."""
self._out_stream.write(
json.dumps({"type": "warning", "data": message, "details": details}) + "\n"
json.dumps(
{"type": "warning", "data": message, "details": details},
ensure_ascii=False,
)
+ "\n"
)
_print_warning(message)

def on_log(self, message: str, details: dict | None = None):
"""Handle when a log message is produced."""
self._out_stream.write(
json.dumps({"type": "log", "data": message, "details": details}) + "\n"
json.dumps(
{"type": "log", "data": message, "details": details}, ensure_ascii=False
)
+ "\n"
)

message = f"{message} details={details}"
Expand Down
4 changes: 3 additions & 1 deletion graphrag/index/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,9 @@ async def run_pipeline(
)

async def dump_stats() -> None:
await storage.set("stats.json", json.dumps(asdict(stats), indent=4))
await storage.set(
"stats.json", json.dumps(asdict(stats), indent=4, ensure_ascii=False)
)

async def load_table_from_storage(name: str) -> pd.DataFrame:
if not await storage.has(name):
Expand Down
2 changes: 1 addition & 1 deletion graphrag/index/text_splitting/text_splitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ def _append_to_result(self, chunk_list: list[str], new_chunk: list[str]):
"""Append the current chunk to the result."""
if new_chunk and len(new_chunk) > 0:
if self._type == TextListSplitterType.JSON:
chunk_list.append(json.dumps(new_chunk))
chunk_list.append(json.dumps(new_chunk, ensure_ascii=False))
else:
chunk_list.append(self._output_delimiter.join(new_chunk))

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ async def _run_extractor(
rank_explanation=report.get("rating_explanation", ""),
summary=report.get("summary", ""),
findings=report.get("findings", []),
full_content_json=json.dumps(report, indent=4),
full_content_json=json.dumps(report, indent=4, ensure_ascii=False),
)
except Exception as e:
log.exception("Error processing community: %s", community)
Expand Down
16 changes: 10 additions & 6 deletions graphrag/index/verbs/snapshot_rows.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,11 @@ def get_row_name(row: Any, row_idx: Any):
if fmt.format == "json":
await storage.set(
f"{row_name}.{extension}",
json.dumps(row[column])
if column is not None
else json.dumps(row.to_dict()),
(
json.dumps(row[column], ensure_ascii=False)
if column is not None
else json.dumps(row.to_dict(), ensure_ascii=False)
),
)
elif fmt.format == "text":
if column is None:
Expand All @@ -65,9 +67,11 @@ def get_row_name(row: Any, row_idx: Any):
def _parse_formats(formats: list[str | dict[str, Any]]) -> list[FormatSpecifier]:
"""Parse the formats into a list of FormatSpecifiers."""
return [
FormatSpecifier(**fmt)
if isinstance(fmt, dict)
else FormatSpecifier(format=fmt, extension=_get_format_extension(fmt))
(
FormatSpecifier(**fmt)
if isinstance(fmt, dict)
else FormatSpecifier(format=fmt, extension=_get_format_extension(fmt))
)
for fmt in formats
]

Expand Down

0 comments on commit 9109de3

Please sign in to comment.