Lab Solutions: Difference between revisions
mNo edit summary |
(Added proposed solution for Lab 4) |
||
Line 357: | Line 357: | ||
tree = create_Tree(g, [ex.Donald_Trump]) | tree = create_Tree(g, [ex.Donald_Trump]) | ||
print_Tree(tree, ex.Donald_Trump) | print_Tree(tree, ex.Donald_Trump) | ||
</syntaxhighlight> | |||
==SPARQL Programming (Lab 4)== | |||
'''NOTE: These tasks were performed on the old dataset, with the new dataset, some of these answers would be different.''' | |||
<syntaxhighlight> | |||
from rdflib import Graph, Namespace, RDF, FOAF | |||
from SPARQLWrapper import SPARQLWrapper, JSON, POST, GET, TURTLE | |||
g = Graph() | |||
g.parse("Russia_investigation_kg.ttl") | |||
# ----- RDFLIB ----- | |||
ex = Namespace('http://example.org#') | |||
NS = { | |||
'': ex, | |||
'rdf': RDF, | |||
'foaf': FOAF, | |||
} | |||
# Print out a list of all the predicates used in your graph. | |||
task1 = g.query(""" | |||
SELECT DISTINCT ?p WHERE{ | |||
?s ?p ?o . | |||
} | |||
""", initNs=NS) | |||
print(list(task1)) | |||
# Print out a sorted list of all the presidents represented in your graph. | |||
task2 = g.query(""" | |||
SELECT DISTINCT ?president WHERE{ | |||
?s :president ?president . | |||
} | |||
ORDER BY ?president | |||
""", initNs=NS) | |||
print(list(task2)) | |||
# Create dictionary (Python dict) with all the represented presidents as keys. For each key, the value is a list of names of people indicted under that president. | |||
task3_dic = {} | |||
task3 = g.query(""" | |||
SELECT ?president ?person WHERE{ | |||
?s :president ?president; | |||
:name ?person; | |||
:outcome :indictment. | |||
} | |||
""", initNs=NS) | |||
for president, person in task3: | |||
if president not in task3_dic: | |||
task3_dic[president] = [person] | |||
else: | |||
task3_dic[president].append(person) | |||
print(task3_dic) | |||
# Use an ASK query to investigate whether Donald Trump has pardoned more than 5 people. | |||
# This task is a lot trickier than it needs to be. As far as I'm aware RDFLib has no HAVING support, so a query like this: | |||
task4 = g.query(""" | |||
ASK { | |||
SELECT (COUNT(?s) as ?count) WHERE{ | |||
?s :pardoned :true; | |||
:president :Bill_Clinton . | |||
} | |||
HAVING (?count > 5) | |||
} | |||
""", initNs=NS) | |||
print(task4.askAnswer) | |||
# Which works fine in Blazegraph and is a valid SPARQL query will always provide false in RDFLib, cause it uses HAVING. Instead you have to use a nested SELECT query like below, where you use FILTER instead of HAVING. Donald Trump has no pardons, so I have instead chosen Bill Clinton (which has 13 pardons) to check if the query works. | |||
task4 = g.query(""" | |||
ASK{ | |||
SELECT ?count WHERE{{ | |||
SELECT (COUNT(?s) as ?count) WHERE{ | |||
?s :pardoned :true; | |||
:president :Bill_Clinton . | |||
}} | |||
FILTER (?count > 5) | |||
} | |||
} | |||
""", initNs=NS) | |||
print(task4.askAnswer) | |||
# Use a DESCRIBE query to create a new graph with information about Donald Trump. Print out the graph in Turtle format. | |||
# By all accounts, it seems DESCRIBE queries are yet to be implemented in RDFLib, but they are attempting to implement it: https://github.com/RDFLib/rdflib/pull/2221 (Issue and proposed solution raised) & https://github.com/RDFLib/rdflib/commit/2325b4a81724c1ccee3a131067db4fbf9b4e2629 (Solution committed to RDFLib). This solution does not work. However, this proposed solution should work if DESCRIBE is implemented in RDFLib | |||
# task5 = g.query(""" | |||
# DESCRIBE :Donald_Trump | |||
# """, initNs=NS) | |||
# print(task5.serialize()) | |||
# ----- SPARQLWrapper ----- | |||
namespace = "kb" #Default namespace | |||
sparql = SPARQLWrapper("http://localhost:9999/blazegraph/namespace/"+ namespace + "/sparql") #Replace localhost:9999 with your URL | |||
# The current dates are URIs, we would want to change them to Literals with datatype "date" for task 1 & 2 | |||
update_str = """ | |||
PREFIX ns1: <http://example.org#> | |||
DELETE { | |||
?s ns1:cp_date ?cp; | |||
ns1:investigation_end ?end; | |||
ns1:investigation_start ?start. | |||
} | |||
INSERT{ | |||
?s ns1:cp_date ?cpDate; | |||
ns1:investigation_end ?endDate; | |||
ns1:investigation_start ?startDate. | |||
} | |||
WHERE{ | |||
?s ns1:cp_date ?cp . #Date conviction was recieved | |||
BIND (replace(str(?cp), str(ns1:), "") AS ?cpRemoved) | |||
BIND (STRDT(STR(?cpRemoved), xsd:date) AS ?cpDate) | |||
?s ns1:investigation_end ?end . #Investigation End | |||
BIND (replace(str(?end), str(ns1:), "") AS ?endRemoved) | |||
BIND (STRDT(STR(?endRemoved), xsd:date) AS ?endDate) | |||
?s ns1:investigation_start ?start . #Investigation Start | |||
BIND (replace(str(?start), str(ns1:), "") AS ?startRemoved) | |||
BIND (STRDT(STR(?startRemoved), xsd:date) AS ?startDate) | |||
}""" | |||
sparql.setQuery(update_str) | |||
sparql.setMethod(POST) | |||
sparql.query() | |||
# Ask whether there was an ongoing indictment on the date 1990-01-01. | |||
sparql.setQuery(""" | |||
PREFIX ns1: <http://example.org#> | |||
ASK { | |||
SELECT ?end ?start | |||
WHERE{ | |||
?s ns1:investigation_end ?end; | |||
ns1:investigation_start ?start; | |||
ns1:outcome ns1:indictment. | |||
FILTER(?start <= "1990-01-01"^^xsd:date && ?end >= "1990-01-01"^^xsd:date) | |||
} | |||
} | |||
""") | |||
sparql.setReturnFormat(JSON) | |||
results = sparql.query().convert() | |||
print(f"Are there any investigation on the 1990-01-01: {results['boolean']}") | |||
# List ongoing indictments on that date 1990-01-01. | |||
sparql.setQuery(""" | |||
PREFIX ns1: <http://example.org#> | |||
SELECT ?s | |||
WHERE{ | |||
?s ns1:investigation_end ?end; | |||
ns1:investigation_start ?start; | |||
ns1:outcome ns1:indictment. | |||
FILTER(?start <= "1990-01-01"^^xsd:date && ?end >= "1990-01-01"^^xsd:date) | |||
} | |||
""") | |||
sparql.setReturnFormat(JSON) | |||
results = sparql.query().convert() | |||
print("The ongoing investigations on the 1990-01-01 are:") | |||
for result in results["results"]["bindings"]: | |||
print(result["s"]["value"]) | |||
# Describe investigation number 100 (muellerkg:investigation_100). | |||
sparql.setQuery(""" | |||
PREFIX ns1: <http://example.org#> | |||
DESCRIBE ns1:investigation_100 | |||
""") | |||
sparql.setReturnFormat(TURTLE) | |||
results = sparql.query().convert() | |||
print(results.serialize()) | |||
# Print out a list of all the types used in your graph. | |||
sparql.setQuery(""" | |||
PREFIX ns1: <http://example.org#> | |||
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> | |||
SELECT DISTINCT ?types | |||
WHERE{ | |||
?s rdf:type ?types . | |||
} | |||
""") | |||
sparql.setReturnFormat(JSON) | |||
results = sparql.query().convert() | |||
rdf_Types = [] | |||
for result in results["results"]["bindings"]: | |||
rdf_Types.append(result["types"]["value"]) | |||
print(rdf_Types) | |||
# Update the graph to that every resource that is an object in a muellerkg:investigation triple has the rdf:type muellerkg:Investigation. | |||
update_str = """ | |||
PREFIX ns1: <http://example.org#> | |||
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> | |||
INSERT{ | |||
?invest rdf:type ns1:Investigation . | |||
} | |||
WHERE{ | |||
?s ns1:investigation ?invest . | |||
}""" | |||
sparql.setQuery(update_str) | |||
sparql.setMethod(POST) | |||
sparql.query() | |||
#To Test | |||
sparql.setQuery(""" | |||
prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> | |||
PREFIX ns1: <http://example.org#> | |||
ASK{ | |||
ns1:watergate rdf:type ns1:Investigation. | |||
} | |||
""") | |||
sparql.setReturnFormat(JSON) | |||
results = sparql.query().convert() | |||
print(results['boolean']) | |||
# Update the graph to that every resource that is an object in a muellerkg:person triple has the rdf:type muellerkg:IndictedPerson. | |||
update_str = """ | |||
PREFIX ns1: <http://example.org#> | |||
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> | |||
INSERT{ | |||
?person rdf:type ns1:IndictedPerson . | |||
} | |||
WHERE{ | |||
?s ns1:person ?person . | |||
}""" | |||
sparql.setQuery(update_str) | |||
sparql.setMethod(POST) | |||
sparql.query() | |||
#To test, run the query in the above task, replacing the ask query with e.g. ns1:Deborah_Gore_Dean rdf:type ns1:IndictedPerson | |||
# Update the graph so all the investigation nodes (such as muellerkg:watergate) become the subject in a dc:title triple with the corresponding string (watergate) as the literal. | |||
update_str = """ | |||
PREFIX ns1: <http://example.org#> | |||
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> | |||
PREFIX dc: <http://purl.org/dc/elements/1.1/> | |||
INSERT{ | |||
?invest dc:title ?investString. | |||
} | |||
WHERE{ | |||
?s ns1:investigation ?invest . | |||
BIND (replace(str(?invest), str(ns1:), "") AS ?investString) | |||
}""" | |||
sparql.setQuery(update_str) | |||
sparql.setMethod(POST) | |||
sparql.query() | |||
#Same test as above, replace it with e.g. ns1:watergate dc:title "watergate" | |||
# Print out a sorted list of all the indicted persons represented in your graph. | |||
sparql.setQuery(""" | |||
PREFIX ns1: <http://example.org#> | |||
PREFIX foaf: <http://xmlns.com/foaf/0.1/> | |||
SELECT ?name | |||
WHERE{ | |||
?s ns1:person ?name; | |||
ns1:outcome ns1:indictment. | |||
} | |||
ORDER BY ?name | |||
""") | |||
sparql.setReturnFormat(JSON) | |||
results = sparql.query().convert() | |||
names = [] | |||
for result in results["results"]["bindings"]: | |||
names.append(result["name"]["value"]) | |||
print(names) | |||
# Print out the minimum, average and maximum indictment days for all the indictments in the graph. | |||
sparql.setQuery(""" | |||
prefix xsd: <http://www.w3.org/2001/XMLSchema#> | |||
PREFIX ns1: <http://example.org#> | |||
SELECT (AVG(?daysRemoved) as ?avg) (MAX(?daysRemoved) as ?max) (MIN(?daysRemoved) as ?min) WHERE{ | |||
?s ns1:indictment_days ?days; | |||
ns1:outcome ns1:indictment. | |||
BIND (replace(str(?days), str(ns1:), "") AS ?daysR) | |||
BIND (STRDT(STR(?daysR), xsd:float) AS ?daysRemoved) | |||
} | |||
""") | |||
sparql.setReturnFormat(JSON) | |||
results = sparql.query().convert() | |||
for result in results["results"]["bindings"]: | |||
print(f'The longest an investigation lasted was: {result["max"]["value"]}') | |||
print(f'The shortest an investigation lasted was: {result["min"]["value"]}') | |||
print(f'The average investigation lasted: {result["avg"]["value"]}') | |||
# Print out the minimum, average and maximum indictment days for all the indictments in the graph per investigation. | |||
sparql.setQuery(""" | |||
prefix xsd: <http://www.w3.org/2001/XMLSchema#> | |||
PREFIX ns1: <http://example.org#> | |||
SELECT ?investigation (AVG(?daysRemoved) as ?avg) (MAX(?daysRemoved) as ?max) (MIN(?daysRemoved) as ?min) WHERE{ | |||
?s ns1:indictment_days ?days; | |||
ns1:outcome ns1:indictment; | |||
ns1:investigation ?investigation. | |||
BIND (replace(str(?days), str(ns1:), "") AS ?daysR) | |||
BIND (STRDT(STR(?daysR), xsd:float) AS ?daysRemoved) | |||
} | |||
GROUP BY ?investigation | |||
""") | |||
sparql.setReturnFormat(JSON) | |||
results = sparql.query().convert() | |||
for result in results["results"]["bindings"]: | |||
print(f'{result["investigation"]["value"]} - min: {result["min"]["value"]}, max: {result["max"]["value"]}, avg: {result["avg"]["value"]}') | |||
</syntaxhighlight> | </syntaxhighlight> |
Revision as of 15:15, 21 February 2023
This page will be updated with Python examples related to the labs as the course progresses.
Examples from the lectures
Lecture 1: Introduction to KGs
Turtle example:
@prefix ex: <http://example.org/> .
ex:Roger_Stone
ex:name "Roger Stone" ;
ex:occupation ex:lobbyist ;
ex:significant_person ex:Donald_Trump .
ex:Donald_Trump
ex:name "Donald Trump" .
Lecture 2: RDF
Blank nodes for anonymity, or when we have not decided on a URI:
from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD
EX = Namespace('http://example.org/')
g = Graph()
g.bind('ex', EX) # this is why the line '@prefix ex: <http://example.org/> .'
# and the 'ex.' prefix are used when we print out Turtle later
robertMueller = BNode()
g.add((robertMueller, RDF.type, EX.Human))
g.add((robertMueller, FOAF.name, Literal('Robert Mueller', lang='en')))
g.add((robertMueller, EX.position_held, Literal('Director of the Federal Bureau of Investigation', lang='en')))
print(g.serialize(format='turtle'))
Blank nodes used to group related properties:
from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD
EX = Namespace('http://example.org/')
g = Graph()
g.bind('ex', EX)
# This is a task in Exercise 2
print(g.serialize(format='turtle'))
Literals:
from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD
EX = Namespace('http://example.org/')
g = Graph()
g.bind('ex', EX)
g.add((EX.Robert_Mueller, RDF.type, EX.Human))
g.add((EX.Robert_Mueller, FOAF.name, Literal('Robert Mueller', lang='en')))
g.add((EX.Robert_Mueller, FOAF.name, Literal('رابرت مولر', lang='fa')))
g.add((EX.Robert_Mueller, DC.description, Literal('sixth director of the FBI', datatype=XSD.string)))
g.add((EX.Robert_Mueller, EX.start_time, Literal(2001, datatype=XSD.integer)))
print(g.serialize(format='turtle'))
Alternative container (open):
from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD
EX = Namespace('http://example.org/')
g = Graph()
g.bind('ex', EX)
muellerReportArchives = BNode()
g.add((muellerReportArchives, RDF.type, RDF.Alt))
archive1 = 'https://archive.org/details/MuellerReportVolume1Searchable/' \
'Mueller%20Report%20Volume%201%20Searchable/'
archive2 = 'https://edition.cnn.com/2019/04/18/politics/full-mueller-report-pdf/index.html'
archive3 = 'https://www.politico.com/story/2019/04/18/mueller-report-pdf-download-text-file-1280891'
g.add((muellerReportArchives, RDFS.member, Literal(archive1, datatype=XSD.anyURI)))
g.add((muellerReportArchives, RDFS.member, Literal(archive2, datatype=XSD.anyURI)))
g.add((muellerReportArchives, RDFS.member, Literal(archive3, datatype=XSD.anyURI)))
g.add((EX.Mueller_Report, RDF.type, FOAF.Document))
g.add((EX.Mueller_Report, DC.contributor, EX.Robert_Mueller))
g.add((EX.Mueller_Report, SCHEMA.archivedAt, muellerReportArchives))
print(g.serialize(format='turtle'))
Sequence container (open):
from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD
EX = Namespace('http://example.org/')
g = Graph()
g.bind('ex', EX)
donaldTrumpSpouses = BNode()
g.add((donaldTrumpSpouses, RDF.type, RDF.Seq))
g.add((donaldTrumpSpouses, RDF._1, EX.IvanaTrump))
g.add((donaldTrumpSpouses, RDF._2, EX.MarlaMaples))
g.add((donaldTrumpSpouses, RDF._3, EX.MelaniaTrump))
g.add((EX.Donald_Trump, SCHEMA.spouse, donaldTrumpSpouses))
print(g.serialize(format='turtle'))
Collection (closed list):
from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD
EX = Namespace('http://example.org/')
g = Graph()
g.bind('ex', EX)
from rdflib.collection import Collection
g = Graph()
g.bind('ex', EX)
donaldTrumpSpouses = BNode()
Collection(g, donaldTrumpSpouses, [
EX.IvanaTrump, EX.MarlaMaples, EX.MelaniaTrump
])
g.add((EX.Donald_Trump, SCHEMA.spouse, donaldTrumpSpouses))
print(g.serialize(format='turtle'))
g.serialize(destination='s02_Donald_Trump_spouses_list.ttl', format='turtle')
print(g.serialize(format='turtle'))
Example lab solutions
Getting started (Lab 1)
from rdflib import Graph, Namespace
g = Graph()
ex = Namespace('http://example.org/')
g.bind("ex", ex)
#The Mueller Investigation was lead by Robert Mueller.
g.add((ex.Mueller_Investigation, ex.leadBy, ex.Robert_Muller))
#It involved Paul Manafort, Rick Gates, George Papadopoulos, Michael Flynn, and Roger Stone.
g.add((ex.Mueller_Investigation, ex.involved, ex.Paul_Manafort))
g.add((ex.Mueller_Investigation, ex.involved, ex.Rick_Gates))
g.add((ex.Mueller_Investigation, ex.involved, ex.George_Papadopoulos))
g.add((ex.Mueller_Investigation, ex.involved, ex.Michael_Flynn))
g.add((ex.Mueller_Investigation, ex.involved, ex.Michael_Cohen))
g.add((ex.Mueller_Investigation, ex.involved, ex.Roger_Stone))
# --- Paul Manafort ---
#Paul Manafort was business partner of Rick Gates.
g.add((ex.Paul_Manafort, ex.businessManager, ex.Rick_Gates))
# He was campaign chairman for Trump
g.add((ex.Paul_Manafort, ex.campaignChairman, ex.Donald_Trump))
# He was charged with money laundering, tax evasion, and foreign lobbying.
g.add((ex.Paul_Manafort, ex.chargedWith, ex.MoneyLaundering))
g.add((ex.Paul_Manafort, ex.chargedWith, ex.TaxEvasion))
g.add((ex.Paul_Manafort, ex.chargedWith, ex.ForeignLobbying))
# He was convicted for bank and tax fraud.
g.add((ex.Paul_Manafort, ex.convictedFor, ex.BankFraud))
g.add((ex.Paul_Manafort, ex.convictedFor, ex.TaxFraud))
# He pleaded guilty to conspiracy.
g.add((ex.Paul_Manafort, ex.pleadGuiltyTo, ex.Conspiracy))
# He was sentenced to prison.
g.add((ex.Paul_Manafort, ex.sentencedTo, ex.Prison))
# He negotiated a plea agreement.
g.add((ex.Paul_Manafort, ex.negoiated, ex.PleaBargain))
# --- Rick Gates ---
#Rick Gates was charged with money laundering, tax evasion and foreign lobbying.
g.add((ex.Rick_Gates, ex.chargedWith, ex.MoneyLaundering))
g.add((ex.Rick_Gates, ex.chargedWith, ex.TaxEvasion))
g.add((ex.Rick_Gates, ex.chargedWith, ex.ForeignLobbying))
#He pleaded guilty to conspiracy and lying to FBI.
g.add((ex.Rick_Gates, ex.pleadGuiltyTo, ex.Conspiracy))
g.add((ex.Rick_Gates, ex.pleadGuiltyTo, ex.LyingToFBI))
#Use the serialize method to write out the model in different formats on screen
print(g.serialize(format="ttl"))
# g.serialize("lab1.ttl", format="ttl") #or to file
#Loop through the triples in the model to print out all triples that have pleading guilty as predicate
for subject, object in g[ : ex.pleadGuiltyTo : ]:
print(subject, ex.pleadGuiltyTo, object)
# Michael Cohen, Michael Flynn and the lying is part of lab 2 and therefore the answer is not provided this week
#Write a method (function) that submits your model for rendering and saves the returned image to file.
import requests
import shutil
def graphToImage(graph):
data = {"rdf":graph, "from":"ttl", "to":"png"}
link = "http://www.ldf.fi/service/rdf-grapher"
response = requests.get(link, params = data, stream=True)
# print(response.content)
print(response.raw)
with open("lab1.png", "wb") as fil:
shutil.copyfileobj(response.raw, fil)
graph = g.serialize(format="ttl")
graphToImage(graph)
RDF programming with RDFlib (Lab 2)
from rdflib import Graph, URIRef, Namespace, Literal, XSD, BNode
from rdflib.collection import Collection
g = Graph()
g.parse("lab1.ttl", format="ttl") #Retrives the triples from lab 1
ex = Namespace('http://example.org/')
# --- Michael Cohen ---
#Michael Cohen was Donald Trump's attorney.
g.add((ex.Michael_Cohen, ex.attorneyTo, ex.Donald_Trump))
#He pleaded guilty to lying to the FBI.
g.add((ex.Michael_Cohen, ex.pleadGuiltyTo, ex.LyingToCongress))
# --- Michael Flynn ---
#Michael Flynn was adviser to Trump.
g.add((ex.Michael_Flynn, ex.adviserTo, ex.Donald_Trump))
#He pleaded guilty to lying to the FBI.
g.add((ex.Michael_Flynn, ex.pleadGuiltyTo, ex.LyingToFBI))
# He negotiated a plea agreement.
g.add((ex.Michael_Flynn, ex.negoiated, ex.PleaBargain))
#How can you modify your knowledge graph to account for the different lying?
#Remove these to not have duplicates
g.remove((ex.Michael_Flynn, ex.pleadGuiltyTo, ex.LyingToFBI))
g.remove((ex.Michael_Flynn, ex.negoiated, ex.PleaBargain))
g.remove((ex.Rick_Gates, ex.pleadGuiltyTo, ex.LyingToFBI))
g.remove((ex.Rick_Gates, ex.pleadGuiltyTo, ex.Conspiracy))
g.remove((ex.Rick_Gates, ex.chargedWith, ex.ForeignLobbying))
g.remove((ex.Rick_Gates, ex.chargedWith, ex.MoneyLaundering))
g.remove((ex.Rick_Gates, ex.chargedWith, ex.TaxEvasion))
g.remove((ex.Michael_Cohen, ex.pleadGuiltyTo, ex.LyingToCongress))
# --- Michael Flynn ---
FlynnLying = BNode()
g.add((FlynnLying, ex.crime, ex.LyingToFBI))
g.add((FlynnLying, ex.pleadGulityOn, Literal("2017-12-1", datatype=XSD.date)))
g.add((FlynnLying, ex.liedAbout, Literal("His communications with a former Russian ambassador during the presidential transition", datatype=XSD.string)))
g.add((FlynnLying, ex.pleaBargain, Literal("true", datatype=XSD.boolean)))
g.add((ex.Michael_Flynn, ex.pleadGuiltyTo, FlynnLying))
# --- Rick Gates ---
GatesLying = BNode()
Crimes = BNode()
Charged = BNode()
Collection(g, Crimes, [ex.LyingToFBI, ex.Conspiracy])
Collection(g, Charged, [ex.ForeignLobbying, ex.MoneyLaundering, ex.TaxEvasion])
g.add((GatesLying, ex.crime, Crimes))
g.add((GatesLying, ex.chargedWith, Charged))
g.add((GatesLying, ex.pleadGulityOn, Literal("2018-02-23", datatype=XSD.date)))
g.add((GatesLying, ex.pleaBargain, Literal("true", datatype=XSD.boolean)))
g.add((ex.Rick_Gates, ex.pleadGuiltyTo, GatesLying))
# --- Michael Cohen ---
CohenLying = BNode()
g.add((CohenLying, ex.crime, ex.LyingToCongress))
g.add((CohenLying, ex.liedAbout, ex.TrumpRealEstateDeal))
g.add((CohenLying, ex.prosecutorsAlleged, Literal("In an August 2017 letter Cohen sent to congressional committees investigating Russian election interference, he falsely stated that the project ended in January 2016", datatype=XSD.string)))
g.add((CohenLying, ex.mullerInvestigationAlleged, Literal("Cohen falsely stated that he had never agreed to travel to Russia for the real estate deal and that he did not recall any contact with the Russian government about the project", datatype=XSD.string)))
g.add((CohenLying, ex.pleadGulityOn, Literal("2018-11-29", datatype=XSD.date)))
g.add((CohenLying, ex.pleaBargain, Literal("true", datatype=XSD.boolean)))
g.add((ex.Michael_Cohen, ex.pleadGuiltyTo, CohenLying))
print(g.serialize(format="ttl"))
#Save (serialize) your graph to a Turtle file.
# g.serialize("lab2.ttl", format="ttl")
#Add a few triples to the Turtle file with more information about Donald Trump.
'''
ex:Donald_Trump ex:address [ ex:city ex:Palm_Beach ;
ex:country ex:United_States ;
ex:postalCode 33480 ;
ex:residence ex:Mar_a_Lago ;
ex:state ex:Florida ;
ex:streetName "1100 S Ocean Blvd"^^xsd:string ] ;
ex:previousAddress [ ex:city ex:Washington_DC ;
ex:country ex:United_States ;
ex:phoneNumber "1 202 456 1414"^^xsd:integer ;
ex:postalCode "20500"^^xsd:integer ;
ex:residence ex:The_White_House ;
ex:streetName "1600 Pennsylvania Ave."^^xsd:string ];
ex:marriedTo ex:Melania_Trump;
ex:fatherTo (ex:Ivanka_Trump ex:Donald_Trump_Jr ex: ex:Tiffany_Trump ex:Eric_Trump ex:Barron_Trump).
'''
#Read (parse) the Turtle file back into a Python program, and check that the new triples are there
def serialize_Graph():
newGraph = Graph()
newGraph.parse("lab2.ttl")
print(newGraph.serialize())
# serialize_Graph() #Don't need this to run until after adding the triples above to the ttl file
#Write a method (function) that starts with Donald Trump prints out a graph depth-first to show how the other graph nodes are connected to him
visited_nodes = set()
def create_Tree(model, nodes):
#Traverse the model breadth-first to create the tree.
global visited_nodes
tree = Graph()
children = set()
visited_nodes |= set(nodes)
for s, p, o in model:
if s in nodes and o not in visited_nodes:
tree.add((s, p, o))
visited_nodes.add(o)
children.add(o)
if o in nodes and s not in visited_nodes:
invp = URIRef(f'{p}_inv') #_inv represents inverse of
tree.add((o, invp, s))
visited_nodes.add(s)
children.add(s)
if len(children) > 0:
children_tree = create_Tree(model, children)
for triple in children_tree:
tree.add(triple)
return tree
def print_Tree(tree, root, indent=0):
#Print the tree depth-first.
print(str(root))
for s, p, o in tree:
if s==root:
print(' '*indent + ' ' + str(p), end=' ')
print_Tree(tree, o, indent+1)
tree = create_Tree(g, [ex.Donald_Trump])
print_Tree(tree, ex.Donald_Trump)
SPARQL Programming (Lab 4)
NOTE: These tasks were performed on the old dataset, with the new dataset, some of these answers would be different.
from rdflib import Graph, Namespace, RDF, FOAF
from SPARQLWrapper import SPARQLWrapper, JSON, POST, GET, TURTLE
g = Graph()
g.parse("Russia_investigation_kg.ttl")
# ----- RDFLIB -----
ex = Namespace('http://example.org#')
NS = {
'': ex,
'rdf': RDF,
'foaf': FOAF,
}
# Print out a list of all the predicates used in your graph.
task1 = g.query("""
SELECT DISTINCT ?p WHERE{
?s ?p ?o .
}
""", initNs=NS)
print(list(task1))
# Print out a sorted list of all the presidents represented in your graph.
task2 = g.query("""
SELECT DISTINCT ?president WHERE{
?s :president ?president .
}
ORDER BY ?president
""", initNs=NS)
print(list(task2))
# Create dictionary (Python dict) with all the represented presidents as keys. For each key, the value is a list of names of people indicted under that president.
task3_dic = {}
task3 = g.query("""
SELECT ?president ?person WHERE{
?s :president ?president;
:name ?person;
:outcome :indictment.
}
""", initNs=NS)
for president, person in task3:
if president not in task3_dic:
task3_dic[president] = [person]
else:
task3_dic[president].append(person)
print(task3_dic)
# Use an ASK query to investigate whether Donald Trump has pardoned more than 5 people.
# This task is a lot trickier than it needs to be. As far as I'm aware RDFLib has no HAVING support, so a query like this:
task4 = g.query("""
ASK {
SELECT (COUNT(?s) as ?count) WHERE{
?s :pardoned :true;
:president :Bill_Clinton .
}
HAVING (?count > 5)
}
""", initNs=NS)
print(task4.askAnswer)
# Which works fine in Blazegraph and is a valid SPARQL query will always provide false in RDFLib, cause it uses HAVING. Instead you have to use a nested SELECT query like below, where you use FILTER instead of HAVING. Donald Trump has no pardons, so I have instead chosen Bill Clinton (which has 13 pardons) to check if the query works.
task4 = g.query("""
ASK{
SELECT ?count WHERE{{
SELECT (COUNT(?s) as ?count) WHERE{
?s :pardoned :true;
:president :Bill_Clinton .
}}
FILTER (?count > 5)
}
}
""", initNs=NS)
print(task4.askAnswer)
# Use a DESCRIBE query to create a new graph with information about Donald Trump. Print out the graph in Turtle format.
# By all accounts, it seems DESCRIBE queries are yet to be implemented in RDFLib, but they are attempting to implement it: https://github.com/RDFLib/rdflib/pull/2221 (Issue and proposed solution raised) & https://github.com/RDFLib/rdflib/commit/2325b4a81724c1ccee3a131067db4fbf9b4e2629 (Solution committed to RDFLib). This solution does not work. However, this proposed solution should work if DESCRIBE is implemented in RDFLib
# task5 = g.query("""
# DESCRIBE :Donald_Trump
# """, initNs=NS)
# print(task5.serialize())
# ----- SPARQLWrapper -----
namespace = "kb" #Default namespace
sparql = SPARQLWrapper("http://localhost:9999/blazegraph/namespace/"+ namespace + "/sparql") #Replace localhost:9999 with your URL
# The current dates are URIs, we would want to change them to Literals with datatype "date" for task 1 & 2
update_str = """
PREFIX ns1: <http://example.org#>
DELETE {
?s ns1:cp_date ?cp;
ns1:investigation_end ?end;
ns1:investigation_start ?start.
}
INSERT{
?s ns1:cp_date ?cpDate;
ns1:investigation_end ?endDate;
ns1:investigation_start ?startDate.
}
WHERE{
?s ns1:cp_date ?cp . #Date conviction was recieved
BIND (replace(str(?cp), str(ns1:), "") AS ?cpRemoved)
BIND (STRDT(STR(?cpRemoved), xsd:date) AS ?cpDate)
?s ns1:investigation_end ?end . #Investigation End
BIND (replace(str(?end), str(ns1:), "") AS ?endRemoved)
BIND (STRDT(STR(?endRemoved), xsd:date) AS ?endDate)
?s ns1:investigation_start ?start . #Investigation Start
BIND (replace(str(?start), str(ns1:), "") AS ?startRemoved)
BIND (STRDT(STR(?startRemoved), xsd:date) AS ?startDate)
}"""
sparql.setQuery(update_str)
sparql.setMethod(POST)
sparql.query()
# Ask whether there was an ongoing indictment on the date 1990-01-01.
sparql.setQuery("""
PREFIX ns1: <http://example.org#>
ASK {
SELECT ?end ?start
WHERE{
?s ns1:investigation_end ?end;
ns1:investigation_start ?start;
ns1:outcome ns1:indictment.
FILTER(?start <= "1990-01-01"^^xsd:date && ?end >= "1990-01-01"^^xsd:date)
}
}
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print(f"Are there any investigation on the 1990-01-01: {results['boolean']}")
# List ongoing indictments on that date 1990-01-01.
sparql.setQuery("""
PREFIX ns1: <http://example.org#>
SELECT ?s
WHERE{
?s ns1:investigation_end ?end;
ns1:investigation_start ?start;
ns1:outcome ns1:indictment.
FILTER(?start <= "1990-01-01"^^xsd:date && ?end >= "1990-01-01"^^xsd:date)
}
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print("The ongoing investigations on the 1990-01-01 are:")
for result in results["results"]["bindings"]:
print(result["s"]["value"])
# Describe investigation number 100 (muellerkg:investigation_100).
sparql.setQuery("""
PREFIX ns1: <http://example.org#>
DESCRIBE ns1:investigation_100
""")
sparql.setReturnFormat(TURTLE)
results = sparql.query().convert()
print(results.serialize())
# Print out a list of all the types used in your graph.
sparql.setQuery("""
PREFIX ns1: <http://example.org#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
SELECT DISTINCT ?types
WHERE{
?s rdf:type ?types .
}
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
rdf_Types = []
for result in results["results"]["bindings"]:
rdf_Types.append(result["types"]["value"])
print(rdf_Types)
# Update the graph to that every resource that is an object in a muellerkg:investigation triple has the rdf:type muellerkg:Investigation.
update_str = """
PREFIX ns1: <http://example.org#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
INSERT{
?invest rdf:type ns1:Investigation .
}
WHERE{
?s ns1:investigation ?invest .
}"""
sparql.setQuery(update_str)
sparql.setMethod(POST)
sparql.query()
#To Test
sparql.setQuery("""
prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX ns1: <http://example.org#>
ASK{
ns1:watergate rdf:type ns1:Investigation.
}
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print(results['boolean'])
# Update the graph to that every resource that is an object in a muellerkg:person triple has the rdf:type muellerkg:IndictedPerson.
update_str = """
PREFIX ns1: <http://example.org#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
INSERT{
?person rdf:type ns1:IndictedPerson .
}
WHERE{
?s ns1:person ?person .
}"""
sparql.setQuery(update_str)
sparql.setMethod(POST)
sparql.query()
#To test, run the query in the above task, replacing the ask query with e.g. ns1:Deborah_Gore_Dean rdf:type ns1:IndictedPerson
# Update the graph so all the investigation nodes (such as muellerkg:watergate) become the subject in a dc:title triple with the corresponding string (watergate) as the literal.
update_str = """
PREFIX ns1: <http://example.org#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
INSERT{
?invest dc:title ?investString.
}
WHERE{
?s ns1:investigation ?invest .
BIND (replace(str(?invest), str(ns1:), "") AS ?investString)
}"""
sparql.setQuery(update_str)
sparql.setMethod(POST)
sparql.query()
#Same test as above, replace it with e.g. ns1:watergate dc:title "watergate"
# Print out a sorted list of all the indicted persons represented in your graph.
sparql.setQuery("""
PREFIX ns1: <http://example.org#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?name
WHERE{
?s ns1:person ?name;
ns1:outcome ns1:indictment.
}
ORDER BY ?name
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
names = []
for result in results["results"]["bindings"]:
names.append(result["name"]["value"])
print(names)
# Print out the minimum, average and maximum indictment days for all the indictments in the graph.
sparql.setQuery("""
prefix xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX ns1: <http://example.org#>
SELECT (AVG(?daysRemoved) as ?avg) (MAX(?daysRemoved) as ?max) (MIN(?daysRemoved) as ?min) WHERE{
?s ns1:indictment_days ?days;
ns1:outcome ns1:indictment.
BIND (replace(str(?days), str(ns1:), "") AS ?daysR)
BIND (STRDT(STR(?daysR), xsd:float) AS ?daysRemoved)
}
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
print(f'The longest an investigation lasted was: {result["max"]["value"]}')
print(f'The shortest an investigation lasted was: {result["min"]["value"]}')
print(f'The average investigation lasted: {result["avg"]["value"]}')
# Print out the minimum, average and maximum indictment days for all the indictments in the graph per investigation.
sparql.setQuery("""
prefix xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX ns1: <http://example.org#>
SELECT ?investigation (AVG(?daysRemoved) as ?avg) (MAX(?daysRemoved) as ?max) (MIN(?daysRemoved) as ?min) WHERE{
?s ns1:indictment_days ?days;
ns1:outcome ns1:indictment;
ns1:investigation ?investigation.
BIND (replace(str(?days), str(ns1:), "") AS ?daysR)
BIND (STRDT(STR(?daysR), xsd:float) AS ?daysRemoved)
}
GROUP BY ?investigation
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
print(f'{result["investigation"]["value"]} - min: {result["min"]["value"]}, max: {result["max"]["value"]}, avg: {result["avg"]["value"]}')