Commit 8d47da1a authored by Tiago Peixoto's avatar Tiago Peixoto

Add scotus_majority

parent e04a83d5
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Tiago de Paula Peixoto <tiago@skewed.de>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
from .. import *
import pandas
title = "SCOTUS majority opinions"
description = """Network of legal citations among majority opinions written by the Supreme Court of the United States (SCOTUS), from 1754-2002 (2008 version) and 1792-2006 (2007 version). In addition to the citation network, node metadata is included giving some description of each opinion.[^icon]
[^icon]: Description obtained from the [ICON](https://icon.colorado.edu) project."""
tags = ['Informational', 'Legal', 'Unweighted', 'Metadata', 'Temporal']
url = 'http://fowler.ucsd.edu/judicial.htm'
citation = [('J. H. Fowler and S. Jeon, "The Authority of Supreme Court Precedent." Social Networks 30(1), 16-30 (2008)', 'http://jhfowler.ucsd.edu/authority_of_supreme_court_precedent.pdf'),
('J. H. Fowler et al., "Network Analysis and the Law: Measuring the Legal Importance of Supreme Court Precedents." Political Analysis 15(3), 324-346 (2007)', 'http://jhfowler.ucsd.edu/network_analysis_and_the_law.pdf')]
icon_hash = '57114fb76afce8e9253875c7'
ustream_license = None
upstream_prefix = 'http://jhfowler.ucsd.edu/data'
files = [(('judicial.zip:allcites.txt', 'judicial.zip:judicial.csv'), "2008", 'loadtxt'),
(('Pol_Analysis_ussc_cites.csv.zip:Pol_Analysis_ussc_cites.csv',
'Pol_Analysis_replication.zip:Pol_Analysis_replication.dta'), "2007", 'csv')]
def fetch_upstream(force=False):
return fetch_upstream_files(__name__.split(".")[-1], upstream_prefix, files,
force)
@cache_network()
@coerce_props()
@annotate()
def parse(alts=None):
global files
name = __name__.split(".")[-1]
for fnames, alt, fmt in files:
if alts is not None and alt not in alts:
continue
if isinstance(fnames, str):
fnames = [fnames]
with ExitStack() as stack:
fs = [stack.enter_context(open_upstream_file(name, fn, "rb")) for fn in fnames]
g = parse_graph([fs[0]], fmt, directed=True)
vs = {g.vp.name[v] : v for v in g.vertices()}
if alt == "2008":
vps = ["usid", "parties", "overruled", "overruling", "oxford"]
for p in vps:
g.vp[p] = g.new_vp("string")
reader = csv.reader(io.TextIOWrapper(fs[1]))
next(reader)
for row in reader:
id = row[0]
try:
v = vs[id]
except KeyError:
continue
for p, val in zip(vps, row[1:len(vps)+1]):
g.vp[p][v] = val
else:
g.vp.lexid = g.vp.name
del g.vp["name"]
vps = ["usid", "name", "amiciz_num", "auth_num", "oxford_num",
"cq_num", "nyt_num", "insc_num", "apin_num", "stin_num",
"citedyr_num", "citingyr_num", "size_num", "age",
"insc_num_lag", "apin_num_lag", "stin_num_lag",
"outsc_num_lag", "sc_cumul_cites_lag"]
for p in vps:
g.vp[p] = g.new_vp("string")
df = pandas.read_stata(fs[1])
for index, row in df.iterrows():
try:
v = vs[row["lexid"]]
except KeyError:
continue
for p in vps:
g.vp[p][v] = row[p]
for p in g.vp.keys():
try:
g.vp[p] = g.vp[p].copy("int")
except ValueError:
continue
yield alt, g
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment