Skip to content

Commit

Permalink
Merge pull request freelawproject#4334 from RomaDF/issue-4140-add-dow…
Browse files Browse the repository at this point in the history
…nload-button-de

Issue 4140 add download button de
  • Loading branch information
mlissner authored Aug 27, 2024
2 parents 5b5d939 + a050be5 commit 5144975
Show file tree
Hide file tree
Showing 8 changed files with 387 additions and 16 deletions.
11 changes: 10 additions & 1 deletion cl/assets/static-global/css/override.css
Original file line number Diff line number Diff line change
Expand Up @@ -745,7 +745,6 @@ div.shown ul {
padding: 3px 0 5px 0;
}


#summaries ul {
padding-inline-start: 15px;
border-bottom: 1pt solid #DDD;
Expand Down Expand Up @@ -893,6 +892,16 @@ input.court-checkbox, input.status-checkbox {
border-bottom: 1px solid #dddddd;
}

.description-header,
.export-csv {
padding: 0px;
}

@media (min-width: 767px) {
.export-csv {
padding: 5px 10px;
}
}
#docket-entry-table .recap-documents.row {
padding-top: 0px;
border-bottom: none;
Expand Down
45 changes: 44 additions & 1 deletion cl/lib/model_helpers.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import contextlib
import os
import re
from typing import Optional
from typing import Callable, Optional

from django.core.exceptions import ValidationError
from django.utils.text import get_valid_filename, slugify
Expand Down Expand Up @@ -562,3 +562,46 @@ def linkify_orig_docket_number(agency: str, og_docket_number: str) -> str:
"""
# If no match is found, return empty str
return ""


class CSVExportMixin:

def get_csv_columns(self, get_column_name: bool = False) -> list[str]:
"""Get list of column names required in a csv file.
If get column name is True. It will add class name to attribute
:param: get_column_name: bool. Whether add class name to attr name
:return: list of attrs of class to get into csv file"""
raise NotImplementedError(
"Subclass must implement get_csv_columns method"
)

def get_column_function(self) -> dict[str, Callable[[str], str]]:
"""Get dict of attrs: function to apply on field value if it needs
to be pre-processed before being add to csv
returns: dict -- > {attr1: function}"""
raise NotImplementedError(
"Subclass must implement get_column_fuction method"
)

def to_csv_row(self) -> list[str]:
"""Get fields in model based on attrs column names.
Apply function to attr value if required.
Return list of modified values for csv row"""
row = []
functions = self.get_column_function()
columns = self.get_csv_columns(get_column_name=False)
for field in columns:
attr = getattr(self, field)
if not attr:
attr = ""
function = functions.get(field)
if function:
attr = function(field)
row.append(attr)
return row

def add_class_name(self, attribute_name: str) -> str:
return f"{self.__class__.__name__.lower()}_{attribute_name}"
23 changes: 22 additions & 1 deletion cl/opinion_page/templates/includes/de_list.html
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,28 @@
<p class="hidden-xs">Document Number</p>
</div>
<div class="col-xs-3 col-sm-2">Date&nbsp;Filed</div>
<div class="col-xs-7 col-sm-6">Description</div>
<div class="col-xs-8 col-sm-6">
<div class="col-xs-9 description-header">
Description
</div>
<div class="col-xs-3">
<a href="{% url 'view_download_docket' docket.id %}" class="btn export-csv visible-xs">
<div class="flex align-items-center">
<i class="fa fa-download gray"></i>&nbsp;
<span>CSV</span>
</div>
</a>
</div>
</div>
<div class="flex justify-content-end col-xs-3">
<a href="{% url 'view_download_docket' docket.id %}" class="btn btn-default hidden-xs export-csv">
<div class="flex align-items-center">
<i class="fa fa-download gray hidden-lg"></i>
<i class="fa fa-download fa-lg gray visible-lg"></i>&nbsp;
<span>Export CSV</span>
</div>
</a>
</div>
</div>
{% for de in docket_entries %}
<div class="row {% cycle "odd" "even" %}"
Expand Down
160 changes: 156 additions & 4 deletions cl/opinion_page/tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,15 @@
from datetime import date
from http import HTTPStatus
from unittest import mock
from unittest.mock import MagicMock, PropertyMock
from unittest.mock import AsyncMock, MagicMock, PropertyMock

from asgiref.sync import async_to_sync, sync_to_async
from django.conf import settings
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import Group
from django.contrib.auth.models import Group, User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.management import call_command
from django.test import override_settings
from django.test import RequestFactory, override_settings
from django.test.client import AsyncClient
from django.urls import reverse
from django.utils.text import slugify
Expand All @@ -38,9 +38,14 @@
)
from cl.opinion_page.utils import (
es_get_citing_clusters_with_cache,
generate_docket_entries_csv_data,
make_docket_title,
)
from cl.opinion_page.views import get_prev_next_volumes
from cl.opinion_page.views import (
download_docket_entries_csv,
fetch_docket_entries,
get_prev_next_volumes,
)
from cl.people_db.factories import (
PersonFactory,
PersonWithChildrenFactory,
Expand All @@ -50,24 +55,28 @@
from cl.recap.factories import (
AppellateAttachmentFactory,
AppellateAttachmentPageFactory,
DocketDataFactory,
DocketEntriesDataFactory,
DocketEntryDataFactory,
)
from cl.recap.mergers import add_docket_entries, merge_attachment_page_data
from cl.search.factories import (
CitationWithParentsFactory,
CourtFactory,
DocketEntryFactory,
DocketFactory,
OpinionClusterFactoryWithChildrenAndParents,
OpinionClusterWithParentsFactory,
OpinionFactory,
OpinionsCitedWithParentsFactory,
RECAPDocumentFactory,
)
from cl.search.models import (
PRECEDENTIAL_STATUS,
SEARCH_TYPES,
Citation,
Docket,
DocketEntry,
Opinion,
OpinionCluster,
RECAPDocument,
Expand Down Expand Up @@ -1523,3 +1532,146 @@ async def test_block_cluster_and_docket_via_ajax_view(self) -> None:

await self.cluster.arefresh_from_db()
self.assertTrue(self.cluster.blocked)


class DocketEntryFileDownload(TestCase):
"""Test Docket entries File Download and required functions."""

def setUp(self):
court = CourtFactory(id="ca5", jurisdiction="F")
# Main docket to test
docket = DocketFactory(
court=court,
case_name="Foo v. Bar",
docket_number="12-11111",
pacer_case_id="12345",
)

de1 = DocketEntryFactory(
docket=docket,
entry_number=506581111,
)
RECAPDocumentFactory(
docket_entry=de1,
pacer_doc_id="00506581111",
document_number="00506581111",
document_type=RECAPDocument.PACER_DOCUMENT,
)
de1_2 = DocketEntryFactory(
docket=docket,
entry_number=1,
)
RECAPDocumentFactory(
docket_entry=de1_2,
pacer_doc_id="00506581111",
document_number="1",
document_type=RECAPDocument.PACER_DOCUMENT,
)

de2 = DocketEntryFactory(
docket=docket,
entry_number=2,
description="Lorem ipsum dolor sit amet",
)
RECAPDocumentFactory(
docket_entry=de2,
pacer_doc_id="",
document_number="2",
document_type=RECAPDocument.PACER_DOCUMENT,
)

de3 = DocketEntryFactory(
docket=docket,
entry_number=506582222,
)
RECAPDocumentFactory(
docket_entry=de3,
pacer_doc_id="00506582222",
document_number="3",
document_type=RECAPDocument.PACER_DOCUMENT,
)
# Create extra docket and docket entries to make sure it only fetch
# required docket_entries
docket1 = DocketFactory(
court=court,
case_name="Test v. Test1",
docket_number="12-222222",
pacer_case_id="12345",
)
de4 = DocketEntryFactory(
docket=docket1,
entry_number=506582222,
)
RECAPDocumentFactory(
docket_entry=de4,
pacer_doc_id="00506582222",
document_number="005506582222",
document_type=RECAPDocument.PACER_DOCUMENT,
)
self.mocked_docket = docket
self.mocked_extra_docket = docket1
self.mocked_docket_entries = [de1, de1_2, de2, de3]
self.mocked_extra_docket_entries = [de4]

request_factory = RequestFactory()
self.request = request_factory.get("/mock-url/")
self.user = UserFactory.create(
username="learned",
email="[email protected]",
)
self.request.auser = AsyncMock(return_value=self.user)

def tearDown(self):
# Clear all test data
Docket.objects.all().delete()
DocketEntry.objects.all().delete()
RECAPDocument.objects.all().delete()
User.objects.all().delete()

async def test_fetch_docket_entries(self) -> None:
"""Verify that fetch entries function returns right docket_entries"""
res = await fetch_docket_entries(self.mocked_docket)
self.assertEqual(await res.acount(), len(self.mocked_docket_entries))
self.assertTrue(await res.acontains(self.mocked_docket_entries[0]))
self.assertFalse(
await res.acontains(self.mocked_extra_docket_entries[0])
)

def test_generate_docket_entries_csv_data(self) -> None:
"""Verify str with csv data is created. Check column and data entry"""
res = generate_docket_entries_csv_data(self.mocked_docket_entries)
res_lines = res.split("\r\n")
res_line_data = res_lines[1].split(",")
self.assertEqual(res[:16], '"docketentry_id"')
self.assertEqual(res_line_data[1], '"506581111"')

@mock.patch("cl.opinion_page.utils.user_has_alert")
@mock.patch("cl.opinion_page.utils.core_docket_data")
@mock.patch("cl.opinion_page.utils.generate_docket_entries_csv_data")
def test_view_download_docket_entries_csv(
self,
mock_download_function,
mock_core_docket_data,
mock_user_has_alert,
) -> None:
"""Test download_docket_entries_csv returns csv content"""

mock_download_function.return_value = (
'"col1","col2","col3"\r\n"value1","value2","value3"'
)
mock_user_has_alert.return_value = False
mock_core_docket_data.return_value = (
self.mocked_docket,
{
"docket": self.mocked_docket,
"title": "title",
"note_form": "note_form",
"has_alert": mock_user_has_alert.return_value,
"timezone": "EST",
"private": True,
},
)
response = async_to_sync(download_docket_entries_csv)(
self.request, self.mocked_docket.id
)
self.assertEqual(response["Content-Type"], "text/csv")
6 changes: 6 additions & 0 deletions cl/opinion_page/urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
court_publish_page,
docket_authorities,
docket_idb_data,
download_docket_entries_csv,
redirect_docket_recap,
redirect_og_lookup,
view_authorities,
Expand Down Expand Up @@ -54,6 +55,11 @@
path(
"docket/<int:pk>/<blank-slug:slug>/", view_docket, name="view_docket" # type: ignore[arg-type]
),
path(
"docket/<int:docket_id>/download/",
download_docket_entries_csv, # type: ignore[arg-type]
name="view_download_docket",
),
path(
"recap/gov.uscourts.<str:court>.<str:pacer_case_id>/",
redirect_docket_recap, # type: ignore[arg-type]
Expand Down
31 changes: 31 additions & 0 deletions cl/opinion_page/utils.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import csv
from io import StringIO
from typing import Dict, Tuple, Union

from asgiref.sync import sync_to_async
Expand Down Expand Up @@ -131,3 +133,32 @@ async def es_get_citing_clusters_with_cache(
cache_key, (citing_clusters, citing_cluster_count), a_week
)
return citing_clusters, citing_cluster_count


def generate_docket_entries_csv_data(docket_entries):
"""Get str representing in memory file from docket_entries.
:param docket_entries: List of DocketEntry that implements CSVExportMixin.
:returns str with csv in memory content
"""
output: StringIO = StringIO()
csvwriter = csv.writer(output, quotechar='"', quoting=csv.QUOTE_ALL)
columns = []

columns = docket_entries[0].get_csv_columns(get_column_name=True)
columns += (
docket_entries[0]
.recap_documents.first()
.get_csv_columns(get_column_name=True)
)
csvwriter.writerow(columns)

for docket_entry in docket_entries:
row = docket_entry.to_csv_row()
for recap_doc in docket_entry.recap_documents.all():
row += recap_doc.to_csv_row()
csvwriter.writerow(row)

csv_content: str = output.getvalue()
output.close()
return csv_content
Loading

0 comments on commit 5144975

Please sign in to comment.