Lab Solutions: Difference between revisions
From info216
No edit summary |
No edit summary |
||
(21 intermediate revisions by 3 users not shown) | |||
Line 1: | Line 1: | ||
Here we will present suggested solutions after each lab. ''The page will be updated as the course progresses'' | |||
<!-- | <!-- | ||
=Getting started (Lab 1)= | |||
<syntaxhighlight> | <syntaxhighlight> | ||
from rdflib import Graph, Namespace | from rdflib import Graph, Namespace | ||
ex = Namespace('http://example.org/') | |||
g = Graph() | g = Graph() | ||
ex | g.bind("ex", ex) | ||
g. | # The Mueller Investigation was lead by Robert Mueller | ||
g.add((ex.MuellerInvestigation, ex.leadBy, ex.RobertMueller)) | |||
# | # It involved Paul Manafort, Rick Gates, George Papadopoulos, Michael Flynn, Michael Cohen, and Roger Stone. | ||
g.add((ex. | g.add((ex.MuellerInvestigation, ex.involved, ex.PaulManafort)) | ||
g.add((ex.MuellerInvestigation, ex.involved, ex.RickGates)) | |||
g.add((ex.MuellerInvestigation, ex.involved, ex.GeorgePapadopoulos)) | |||
g.add((ex.MuellerInvestigation, ex.involved, ex.MichaelFlynn)) | |||
g.add((ex.MuellerInvestigation, ex.involved, ex.MichaelCohen)) | |||
g.add((ex.MuellerInvestigation, ex.involved, ex.RogerStone)) | |||
# | # Paul Manafort was business partner of Rick Gates | ||
g.add((ex.PaulManafort, ex.businessPartner, ex.RickGates)) | |||
g.add((ex. | |||
# He was campaign chairman for Donald Trump | |||
g.add((ex.PaulManafort, ex.campaignChairman, ex.DonaldTrump)) | |||
# He was campaign chairman for Trump | |||
g.add((ex. | |||
# He was charged with money laundering, tax evasion, and foreign lobbying. | # He was charged with money laundering, tax evasion, and foreign lobbying. | ||
g.add((ex. | g.add((ex.PaulManafort, ex.chargedWith, ex.MoneyLaundering)) | ||
g.add((ex. | g.add((ex.PaulManafort, ex.chargedWith, ex.TaxEvasion)) | ||
g.add((ex. | g.add((ex.PaulManafort, ex.chargedWith, ex.ForeignLobbying)) | ||
# He was convicted for bank and tax fraud. | # He was convicted for bank and tax fraud. | ||
g.add((ex. | g.add((ex.PaulManafort, ex.convictedOf, ex.BankFraud)) | ||
g.add((ex. | g.add((ex.PaulManafort, ex.convictedOf, ex.TaxFraud)) | ||
# He pleaded guilty to conspiracy. | # He pleaded guilty to conspiracy. | ||
g.add((ex. | g.add((ex.PaulManafort, ex.pleadGuiltyTo, ex.Conspiracy)) | ||
# He was sentenced to prison. | # He was sentenced to prison. | ||
g.add((ex. | g.add((ex.PaulManafort, ex.sentencedTo, ex.Prison)) | ||
# He negotiated a plea agreement. | # He negotiated a plea agreement. | ||
g.add((ex. | g.add((ex.PaulManafort, ex.negotiated, ex.PleaAgreement)) | ||
# Rick Gates was charged with money laundering, tax evasion and foreign lobbying. | |||
#Rick Gates was charged with money laundering, tax evasion and foreign lobbying. | g.add((ex.RickGates, ex.chargedWith, ex.MoneyLaundering)) | ||
g.add((ex. | g.add((ex.RickGates, ex.chargedWith, ex.TaxEvasion)) | ||
g.add((ex. | g.add((ex.RickGates, ex.chargedWith, ex.ForeignLobbying)) | ||
g.add((ex. | |||
#He pleaded guilty to conspiracy and lying to FBI. | # He pleaded guilty to conspiracy and lying to FBI. | ||
g.add((ex. | g.add((ex.RickGates, ex.pleadGuiltyTo, ex.Conspiracy)) | ||
g.add((ex. | g.add((ex.RickGates, ex.pleadGuiltyTo, ex.LyingToFBI)) | ||
#Use the serialize method to write out the model in different formats on screen | # Use the serialize method of rdflib.Graph to write out the model in different formats (on screen or to file) | ||
print(g.serialize(format="ttl")) | print(g.serialize(format="ttl")) # To screen | ||
# g.serialize("lab1.ttl", format="ttl") # | #g.serialize("lab1.ttl", format="ttl") # To file | ||
#Loop through the triples in the model to print out all triples that have pleading guilty as predicate | # Loop through the triples in the model to print out all triples that have pleading guilty as predicate | ||
for subject, object in g[ : ex.pleadGuiltyTo : ]: | for subject, object in g[ : ex.pleadGuiltyTo :]: | ||
print(subject, ex.pleadGuiltyTo, object) | print(subject, ex.pleadGuiltyTo, object) | ||
# --- IF you have more time tasks --- | |||
# Michael Cohen, Michael Flynn and the lying is part of lab 2 and therefore the answer is not provided this week | # Michael Cohen, Michael Flynn and the lying is part of lab 2 and therefore the answer is not provided this week | ||
Line 72: | Line 74: | ||
import shutil | import shutil | ||
def graphToImage( | def graphToImage(graphInput): | ||
data = {"rdf": | data = {"rdf":graphInput, "from":"ttl", "to":"png"} | ||
link = "http://www.ldf.fi/service/rdf-grapher" | link = "http://www.ldf.fi/service/rdf-grapher" | ||
response = requests.get(link, params = data, stream=True) | response = requests.get(link, params = data, stream=True) | ||
# print(response.content) | # print(response.content) | ||
print(response.raw) | print(response.raw) | ||
with open("lab1.png", "wb") as | with open("lab1.png", "wb") as file: | ||
shutil.copyfileobj(response.raw, | shutil.copyfileobj(response.raw, file) | ||
graph = g.serialize(format="ttl") | graph = g.serialize(format="ttl") | ||
graphToImage(graph) | graphToImage(graph) | ||
</syntaxhighlight> | </syntaxhighlight> | ||
=RDF programming with RDFlib (Lab 2)= | |||
<syntaxhighlight> | <syntaxhighlight> | ||
from rdflib import Graph, Namespace, Literal, BNode, XSD, FOAF, RDF, URIRef | |||
from rdflib import Graph | |||
from rdflib.collection import Collection | from rdflib.collection import Collection | ||
g = Graph() | g = Graph() | ||
ex = Namespace( | # Getting the graph created in the first lab | ||
g.parse("lab1.ttl", format="ttl") | |||
ex = Namespace("http://example.org/") | |||
g.bind("ex", ex) | |||
g.bind("foaf", FOAF) | |||
# --- Michael Cohen --- | # --- Michael Cohen --- | ||
#Michael Cohen was Donald Trump's attorney. | # Michael Cohen was Donald Trump's attorney. | ||
g.add((ex. | g.add((ex.MichaelCohen, ex.attorneyTo, ex.DonaldTrump)) | ||
#He pleaded guilty | # He pleaded guilty for lying to Congress. | ||
g.add((ex. | g.add((ex.MichaelCohen, ex.pleadGuiltyTo, ex.LyingToCongress)) | ||
# --- Michael Flynn --- | # --- Michael Flynn --- | ||
#Michael Flynn was adviser to Trump. | # Michael Flynn was adviser to Donald Trump. | ||
g.add((ex. | g.add((ex.MichaelFlynn, ex.adviserTo, ex.DonaldTrump)) | ||
#He pleaded guilty | # He pleaded guilty for lying to the FBI. | ||
g.add((ex. | g.add((ex.MichaelFlynn, ex.pleadGuiltyTo, ex.LyingToFBI)) | ||
# He negotiated a plea agreement. | # He negotiated a plea agreement. | ||
g.add((ex. | g.add((ex.MichaelFlynn, ex.negotiated, ex.PleaAgreement)) | ||
# | # Change your graph so it represents instances of lying as blank nodes. | ||
#Remove | # Remove the triples that will be duplicated | ||
g.remove((ex.Michael_Flynn, ex.pleadGuiltyTo, ex.LyingToFBI)) | g.remove((ex.Michael_Flynn, ex.pleadGuiltyTo, ex.LyingToFBI)) | ||
g.remove((ex.Michael_Flynn, ex.negoiated, ex.PleaBargain)) | g.remove((ex.Michael_Flynn, ex.negoiated, ex.PleaBargain)) | ||
Line 181: | Line 188: | ||
print(newGraph.serialize()) | print(newGraph.serialize()) | ||
#Don't need this to run until after adding the triples above to the ttl file | |||
# serialize_Graph() | |||
#Write a method (function) that starts with Donald Trump prints out a graph depth-first to show how the other graph nodes are connected to him | #Write a method (function) that starts with Donald Trump prints out a graph depth-first to show how the other graph nodes are connected to him | ||
Line 218: | Line 226: | ||
tree = create_Tree(g, [ex.Donald_Trump]) | tree = create_Tree(g, [ex.Donald_Trump]) | ||
print_Tree(tree, ex.Donald_Trump) | print_Tree(tree, ex.Donald_Trump) | ||
</syntaxhighlight> | </syntaxhighlight> | ||
=SPARQL (Lab 3-4)= | |||
===List all triples=== | ===List all triples=== | ||
<syntaxhighlight lang="SPARQL"> | <syntaxhighlight lang="SPARQL"> | ||
SELECT ?s ?p ?o | SELECT ?s ?p ?o | ||
WHERE {?s ?p ?o .} | WHERE {?s ?p ?o .} | ||
</syntaxhighlight> | </syntaxhighlight> | ||
Line 231: | Line 238: | ||
<syntaxhighlight lang="SPARQL"> | <syntaxhighlight lang="SPARQL"> | ||
SELECT ?s ?p ?o | SELECT ?s ?p ?o | ||
WHERE {?s ?p ?o .} | WHERE {?s ?p ?o .} | ||
LIMIT 100 | LIMIT 100 | ||
</syntaxhighlight> | </syntaxhighlight> | ||
Line 238: | Line 245: | ||
<syntaxhighlight lang="SPARQL"> | <syntaxhighlight lang="SPARQL"> | ||
SELECT (COUNT(*) as ?count) | SELECT (COUNT(*) as ?count) | ||
WHERE {?s ?p ?o .} | WHERE {?s ?p ?o .} | ||
</syntaxhighlight> | </syntaxhighlight> | ||
===Count the number of indictments=== | ===Count the number of indictments=== | ||
<syntaxhighlight lang="SPARQL"> | <syntaxhighlight lang="SPARQL"> | ||
PREFIX ns1: <http://example.org#> | PREFIX ns1: <http://example.org#> | ||
SELECT (COUNT(?ind) as ?amount) | SELECT (COUNT(?ind) as ?amount) | ||
WHERE { | WHERE { | ||
?s ns1:outcome ?ind; | |||
ns1:outcome ns1:indictment. | |||
} | } | ||
</syntaxhighlight> | </syntaxhighlight> | ||
Line 254: | Line 261: | ||
===List the names of everyone who pleaded guilty, along with the name of the investigation=== | ===List the names of everyone who pleaded guilty, along with the name of the investigation=== | ||
<syntaxhighlight lang="SPARQL"> | <syntaxhighlight lang="SPARQL"> | ||
PREFIX ns1: <http://example.org#> | PREFIX ns1: <http://example.org#> | ||
SELECT ?name ?invname | SELECT ?name ?invname | ||
WHERE { | WHERE { | ||
?s ns1:name ?name; | |||
ns1:investigation ?invname; | |||
ns1:outcome ns1:guilty-plea . | |||
} | } | ||
</syntaxhighlight> | </syntaxhighlight> | ||
===List the names of everyone who were convicted, but who had their conviction overturned by which president=== | ===List the names of everyone who were convicted, but who had their conviction overturned by which president=== | ||
<syntaxhighlight lang="SPARQL"> | <syntaxhighlight lang="SPARQL"> | ||
PREFIX ns1: <http://example.org#> | PREFIX ns1: <http://example.org#> | ||
SELECT ?name ?president | SELECT ?name ?president | ||
WHERE { | WHERE { | ||
?s ns1:name ?name; | |||
ns1:president ?president; | |||
ns1:outcome ns1:conviction; | |||
ns1:overturned ns1:true. | |||
} | } | ||
</syntaxhighlight> | </syntaxhighlight> | ||
Line 279: | Line 286: | ||
===For each investigation, list the number of indictments made=== | ===For each investigation, list the number of indictments made=== | ||
<syntaxhighlight lang="SPARQL"> | <syntaxhighlight lang="SPARQL"> | ||
PREFIX ns1: <http://example.org#> | PREFIX ns1: <http://example.org#> | ||
SELECT ?invs (COUNT(?invs) as ?count) | SELECT ?invs (COUNT(?invs) as ?count) | ||
WHERE { | WHERE { | ||
?s ns1:investigation ?invs; | |||
ns1:outcome ns1:indictment . | |||
} | } | ||
GROUP BY ?invs | GROUP BY ?invs | ||
</syntaxhighlight> | </syntaxhighlight> | ||
Line 291: | Line 298: | ||
===For each investigation with multiple indictments, list the number of indictments made=== | ===For each investigation with multiple indictments, list the number of indictments made=== | ||
<syntaxhighlight lang="SPARQL"> | <syntaxhighlight lang="SPARQL"> | ||
PREFIX ns1: <http://example.org#> | PREFIX ns1: <http://example.org#> | ||
SELECT ?invs (COUNT(?invs) as ?count) | SELECT ?invs (COUNT(?invs) as ?count) | ||
WHERE { | WHERE { | ||
?s ns1:investigation ?invs; | |||
ns1:outcome ns1:indictment . | |||
} | } | ||
GROUP BY ?invs | GROUP BY ?invs | ||
HAVING(?count > 1) | HAVING(?count > 1) | ||
Line 304: | Line 311: | ||
===For each investigation with multiple indictments, list the number of indictments made, sorted with the most indictments first=== | ===For each investigation with multiple indictments, list the number of indictments made, sorted with the most indictments first=== | ||
<syntaxhighlight lang="SPARQL"> | <syntaxhighlight lang="SPARQL"> | ||
PREFIX ns1: <http://example.org#> | PREFIX ns1: <http://example.org#> | ||
SELECT ?invs (COUNT(?invs) as ?count) | SELECT ?invs (COUNT(?invs) as ?count) | ||
WHERE { | WHERE { | ||
?s ns1:investigation ?invs; | |||
ns1:outcome ns1:indictment . | |||
} | } | ||
GROUP BY ?invs | GROUP BY ?invs | ||
HAVING(?count > 1) | HAVING(?count > 1) | ||
Line 318: | Line 325: | ||
===For each president, list the numbers of convictions and of pardons made=== | ===For each president, list the numbers of convictions and of pardons made=== | ||
<syntaxhighlight lang="SPARQL"> | <syntaxhighlight lang="SPARQL"> | ||
PREFIX ns1: <http://example.org#> | PREFIX ns1: <http://example.org#> | ||
SELECT ?president (COUNT(?outcome) as ?conviction) (COUNT(?pardon) as ?pardons) | SELECT ?president (COUNT(?outcome) as ?conviction) (COUNT(?pardon) as | ||
?pardons) | |||
WHERE { | WHERE { | ||
?s ns1:president ?president; | |||
ns1:outcome ?outcome ; | |||
ns1:outcome ns1:conviction. | |||
OPTIONAL{ | |||
?s ns1:pardoned ?pardon . | |||
FILTER (?pardon = true) | |||
} | |||
} | } | ||
GROUP BY ?president | GROUP BY ?president | ||
Line 336: | Line 344: | ||
<syntaxhighlight lang="SPARQL"> | <syntaxhighlight lang="SPARQL"> | ||
PREFIX ns1: <http://example.org#> | PREFIX ns1: <http://example.org#> | ||
DELETE{?s ns1:name ?o} | DELETE{?s ns1:name ?o} | ||
Line 346: | Line 354: | ||
<syntaxhighlight lang="SPARQL"> | <syntaxhighlight lang="SPARQL"> | ||
PREFIX ns1: <http://example.org#> | PREFIX ns1: <http://example.org#> | ||
PREFIX foaf: <http://xmlns.com/foaf/0.1/> | PREFIX foaf: <http://xmlns.com/foaf/0.1/> | ||
Line 352: | Line 360: | ||
INSERT {?person foaf:name ?name} | INSERT {?person foaf:name ?name} | ||
WHERE { | WHERE { | ||
?investigation ns1:person ?person . | |||
BIND(REPLACE(STR(?person), STR(ns1:), "") AS ?name) | |||
} | } | ||
Line 359: | Line 367: | ||
INSERT {?president foaf:name ?name} | INSERT {?president foaf:name ?name} | ||
WHERE { | WHERE { | ||
?investigation ns1:president ?president . | |||
BIND(REPLACE(STR(?president), STR(ns1:), "") AS ?name) | |||
} | } | ||
</syntaxhighlight> | </syntaxhighlight> | ||
Line 367: | Line 375: | ||
<syntaxhighlight lang="SPARQL"> | <syntaxhighlight lang="SPARQL"> | ||
PREFIX ns1: <http://example.org#> | PREFIX ns1: <http://example.org#> | ||
INSERT DATA { | INSERT DATA { | ||
ns1:George_Papadopoulos ns1:adviserTo ns1:Donald_Trump; | |||
ns1:pleadGuiltyTo ns1:LyingToFBI; | |||
ns1:sentencedTo ns1:Prison. | |||
ns1:Roger_Stone a ns1:Republican; | |||
ns1:adviserTo ns1:Donald_Trump; | |||
ns1:officialTo ns1:Trump_Campaign; | |||
ns1:interactedWith ns1:Wikileaks; | |||
ns1:providedTestimony ns1:House_Intelligence_Committee; | |||
ns1:clearedOf ns1:AllCharges. | |||
} | } | ||
#To test if added | #To test if added | ||
SELECT ?p ?o | SELECT ?p ?o | ||
WHERE {ns1:Roger_Stone ?p ?o .} | WHERE {ns1:Roger_Stone ?p ?o .} | ||
</syntaxhighlight> | </syntaxhighlight> | ||
Line 390: | Line 398: | ||
<syntaxhighlight lang="SPARQL"> | <syntaxhighlight lang="SPARQL"> | ||
PREFIX ns1: <http://example.org#> | PREFIX ns1: <http://example.org#> | ||
DELETE DATA { | DELETE DATA { | ||
ns1:Roger_Stone ns1:clearedOf ns1:AllCharges . | |||
} | } | ||
INSERT DATA { | INSERT DATA { | ||
ns1:Roger_Stone ns1:indictedFor ns1:ObstructionOfJustice, | |||
ns1:WitnessTampering, | |||
ns1:FalseStatements. | |||
} | } | ||
#The task specifically requested DELETE DATA & INSERT DATA, put below is a more efficient solution | #The task specifically requested DELETE DATA & INSERT DATA, put below is | ||
a more efficient solution | |||
DELETE{ns1:Roger_Stone ns1:clearedOf ns1:AllCharges.} | DELETE{ns1:Roger_Stone ns1:clearedOf ns1:AllCharges.} | ||
INSERT{ | INSERT{ | ||
ns1:Roger_Stone ns1:indictedFor ns1:ObstructionOfJustice, | |||
ns1:WitnessTampering, | |||
ns1:FalseStatements. | |||
} | } | ||
WHERE{ns1:Roger_Stone ns1:clearedOf ns1:AllCharges.} | WHERE{ns1:Roger_Stone ns1:clearedOf ns1:AllCharges.} | ||
Line 416: | Line 425: | ||
<syntaxhighlight lang="SPARQL"> | <syntaxhighlight lang="SPARQL"> | ||
PREFIX ns1: <http://example.org#> | PREFIX ns1: <http://example.org#> | ||
DESCRIBE ?o | DESCRIBE ?o | ||
WHERE {ns1:Roger_Stone ns1:indictedFor ?o .} | WHERE {ns1:Roger_Stone ns1:indictedFor ?o .} | ||
</syntaxhighlight> | </syntaxhighlight> | ||
Line 425: | Line 434: | ||
<syntaxhighlight lang="SPARQL"> | <syntaxhighlight lang="SPARQL"> | ||
PREFIX ns1: <http://example.org#> | PREFIX ns1: <http://example.org#> | ||
CONSTRUCT { | CONSTRUCT { | ||
ns1:Roger_Stone ?p ?o. | |||
?s ?p2 ns1:Roger_Stone. | |||
} | } | ||
WHERE { | WHERE { | ||
ns1:Roger_Stone ?p ?o . | |||
?s ?p2 ns1:Roger_Stone | |||
} | } | ||
</syntaxhighlight> | </syntaxhighlight> | ||
Line 440: | Line 449: | ||
<syntaxhighlight lang="SPARQL"> | <syntaxhighlight lang="SPARQL"> | ||
PREFIX ns1: <http://example.org#> | PREFIX ns1: <http://example.org#> | ||
PREFIX dbp: <https://dbpedia.org/page/> | PREFIX dbp: <https://dbpedia.org/page/> | ||
DELETE {?s ns1:person ?o1} | DELETE {?s ns1:person ?o1} | ||
INSERT {?s ns1:person ?o2} | INSERT {?s ns1:person ?o2} | ||
WHERE{ | WHERE{ | ||
?s ns1:person ?o1 . | |||
BIND (IRI(replace(str(?o1), str(ns1:), str(dbp:))) AS ?o2) | |||
} | } | ||
#This update changes the object in triples with ns1:person as the predicate. It changes it's prefix of ns1 (which is the "shortcut/shorthand" for example.org) to the prefix dbp (dbpedia.org) | #This update changes the object in triples with ns1:person as the | ||
predicate. It changes it's prefix of ns1 (which is the | |||
"shortcut/shorthand" for example.org) to the prefix dbp (dbpedia.org) | |||
</syntaxhighlight> | </syntaxhighlight> | ||
Line 456: | Line 467: | ||
<syntaxhighlight lang="SPARQL"> | <syntaxhighlight lang="SPARQL"> | ||
#Whilst this solution is not exactly what the task asks for, I feel like this is more appropiate given the dataset. The following update | #Whilst this solution is not exactly what the task asks for, I feel like | ||
changes the objects that uses the cp_date as predicate from a URI, to a literal with date as it's datatype | this is more appropiate given the dataset. The following update | ||
changes the objects that uses the cp_date as predicate from a URI, to a | |||
literal with date as it's datatype | |||
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> | PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> | ||
PREFIX ns1: <http://example.org#> | PREFIX ns1: <http://example.org#> | ||
DELETE {?s ns1:cp_date ?o} | DELETE {?s ns1:cp_date ?o} | ||
INSERT{?s ns1:cp_date ?o3} | INSERT{?s ns1:cp_date ?o3} | ||
WHERE{ | WHERE{ | ||
?s ns1:cp_date ?o . | |||
BIND (replace(str(?o), str(ns1:), "") AS ?o2) | |||
BIND (STRDT(STR(?o2), xsd:date) AS ?o3) | |||
} | } | ||
#To test: | #To test: | ||
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> | PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> | ||
PREFIX ns1: <http://example.org#> | PREFIX ns1: <http://example.org#> | ||
SELECT ?s ?o | SELECT ?s ?o | ||
WHERE{ | WHERE{ | ||
?s ns1:cp_date ?o. | |||
FILTER(datatype(?o) = xsd:date) | |||
} | } | ||
#To change it to an integer, use the following code, and to change it back to date, swap "xsd:integer" to "xsd:date" | #To change it to an integer, use the following code, and to change it | ||
back to date, swap "xsd:integer" to "xsd:date" | |||
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> | PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> | ||
PREFIX ns1: <http://example.org#> | PREFIX ns1: <http://example.org#> | ||
DELETE {?s ns1:cp_date ?o} | DELETE {?s ns1:cp_date ?o} | ||
INSERT{?s ns1:cp_date ?o2} | INSERT{?s ns1:cp_date ?o2} | ||
WHERE{ | WHERE{ | ||
?s ns1:cp_date ?o . | |||
BIND (STRDT(STR(?o), xsd:integer) AS ?o2) | |||
} | } | ||
</syntaxhighlight> | </syntaxhighlight> | ||
=SPARQL Programming (Lab 5)= | |||
<syntaxhighlight> | <syntaxhighlight> | ||
Line 567: | Line 581: | ||
print(task4.askAnswer) | print(task4.askAnswer) | ||
# Which works fine in Blazegraph and is a valid SPARQL query will always provide false in RDFLib | # Which works fine in Blazegraph and is a valid SPARQL query will always provide false in RDFLib cause it uses HAVING. | ||
# Instead you have to use a nested SELECT query like below, where you use FILTER instead of HAVING. Donald Trump has no pardons, | |||
# so I have instead chosen Bill Clinton with 13 to check if the query works. | |||
task4 = g.query(""" | task4 = g.query(""" | ||
Line 585: | Line 601: | ||
# Use a DESCRIBE query to create a new graph with information about Donald Trump. Print out the graph in Turtle format. | # Use a DESCRIBE query to create a new graph with information about Donald Trump. Print out the graph in Turtle format. | ||
# By all accounts, it seems DESCRIBE | # By all accounts, it seems DESCRIBE querires are yet to be implemented in RDFLib, but they are attempting to implement it: | ||
# https://github.com/RDFLib/rdflib/pull/2221 <--- Issue and proposed solution rasied | |||
# https://github.com/RDFLib/rdflib/commit/2325b4a81724c1ccee3a131067db4fbf9b4e2629 <--- Solution commited to RDFLib | |||
# This solution does not work. However, this proposed solution should work if DESCRIBE is implemented in RDFLib | |||
# task5 = g.query(""" | # task5 = g.query(""" | ||
Line 595: | Line 614: | ||
# ----- SPARQLWrapper ----- | # ----- SPARQLWrapper ----- | ||
SERVER = 'http://localhost:7200' #Might need to replace this | |||
REPOSITORY = 'Labs' #Replace with your repository name | |||
# | # Query Endpoint | ||
sparql = SPARQLWrapper(f'{SERVER}/repositories/{REPOSITORY}') | |||
# Update Endpoint | |||
sparqlUpdate = SPARQLWrapper(f'{SERVER}/repositories/{REPOSITORY}/statements') | |||
} | |||
# Ask whether there was an ongoing indictment on the date 1990-01-01. | # Ask whether there was an ongoing indictment on the date 1990-01-01. | ||
Line 675: | Line 667: | ||
results = sparql.query().convert() | results = sparql.query().convert() | ||
print(results | print(results) | ||
# Print out a list of all the types used in your graph. | # Print out a list of all the types used in your graph. | ||
Line 710: | Line 702: | ||
}""" | }""" | ||
sparqlUpdate.setQuery(update_str) | |||
sparqlUpdate.setMethod(POST) | |||
sparqlUpdate.query() | |||
#To Test | #To Test | ||
Line 737: | Line 729: | ||
} | } | ||
WHERE{ | WHERE{ | ||
?s ns1: | ?s ns1:name ?person . | ||
}""" | }""" | ||
sparqlUpdate.setQuery(update_str) | |||
sparqlUpdate.setMethod(POST) | |||
sparqlUpdate.query() | |||
#To test, run the query in the above task, replacing the ask query with e.g. ns1:Deborah_Gore_Dean rdf:type ns1:IndictedPerson | #To test, run the query in the above task, replacing the ask query with e.g. ns1:Deborah_Gore_Dean rdf:type ns1:IndictedPerson | ||
Line 760: | Line 752: | ||
}""" | }""" | ||
sparqlUpdate.setQuery(update_str) | |||
sparqlUpdate.setMethod(POST) | |||
sparqlUpdate.query() | |||
#Same test as above, replace it with e.g. ns1:watergate dc:title "watergate" | #Same test as above, replace it with e.g. ns1:watergate dc:title "watergate" | ||
Line 773: | Line 765: | ||
SELECT ?name | SELECT ?name | ||
WHERE{ | WHERE{ | ||
?s ns1: | ?s ns1:name ?name; | ||
ns1:outcome ns1:indictment. | |||
} | } | ||
ORDER BY ?name | ORDER BY ?name | ||
Line 790: | Line 782: | ||
# Print out the minimum, average and maximum indictment days for all the indictments in the graph. | # Print out the minimum, average and maximum indictment days for all the indictments in the graph. | ||
sparql.setQuery(""" | sparql.setQuery(""" | ||
prefix xsd: <http://www.w3.org/2001/XMLSchema#> | prefix xsd: <http://www.w3.org/2001/XMLSchema#> | ||
Line 812: | Line 805: | ||
# Print out the minimum, average and maximum indictment days for all the indictments in the graph per investigation. | # Print out the minimum, average and maximum indictment days for all the indictments in the graph per investigation. | ||
sparql.setQuery(""" | sparql.setQuery(""" | ||
prefix xsd: <http://www.w3.org/2001/XMLSchema#> | prefix xsd: <http://www.w3.org/2001/XMLSchema#> | ||
Line 835: | Line 829: | ||
</syntaxhighlight> | </syntaxhighlight> | ||
==CSV | =Wikidata SPARQL (Lab 6)= | ||
<syntaxhighlight> | ===Use a DESCRIBE query to retrieve some triples about your entity=== | ||
<syntaxhighlight lang="SPARQL"> | |||
DESCRIBE wd:Q42 LIMIT 100 | |||
</syntaxhighlight> | |||
===Use a SELECT query to retrieve the first 100 triples about your entity=== | |||
<syntaxhighlight lang="SPARQL"> | |||
SELECT * WHERE { | |||
wd:Q42 ?p ?o . | |||
} LIMIT 100 | |||
</syntaxhighlight> | |||
===Write a local SELECT query that embeds a SERVICE query to retrieve the first 100 triples about your entity to your local machine=== | |||
<syntaxhighlight lang="SPARQL"> | |||
PREFIX wd: <http://www.wikidata.org/entity/> | |||
SELECT * WHERE { | |||
SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> { | |||
SELECT * WHERE { | |||
wd:Q42 ?p ?o . | |||
} LIMIT 100 | |||
} | |||
} | |||
</syntaxhighlight> | |||
===Change the SELECT query to an INSERT query that adds the Wikidata triples your local repository=== | |||
<syntaxhighlight lang="SPARQL"> | |||
PREFIX wd: <http://www.wikidata.org/entity/> | |||
INSERT { | |||
wd:Q42 ?p ?o . | |||
} WHERE { | |||
SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> { | |||
SELECT * WHERE { | |||
wd:Q42 ?p ?o . | |||
} LIMIT 100 | |||
} | |||
} | |||
</syntaxhighlight> | |||
===Use a FILTER statement to only SELECT primary triples in this sense.=== | |||
<syntaxhighlight lang="SPARQL"> | |||
PREFIX wd: <http://www.wikidata.org/entity/> | |||
SELECT * WHERE { | |||
wd:Q42 ?p ?o . | |||
FILTER (STRSTARTS(STR(?p), STR(wdt:))) | |||
FILTER (STRSTARTS(STR(?o), STR(wd:))) | |||
} LIMIT 100 | |||
</syntaxhighlight> | |||
===Use Wikidata's in-built SERVICE wikibase:label to get labels for all the object resources=== | |||
<syntaxhighlight lang="SPARQL"> | |||
PREFIX wd: <http://www.wikidata.org/entity/> | |||
SELECT ?p ?oLabel WHERE { | |||
wd:Q42 ?p ?o . | |||
FILTER (STRSTARTS(STR(?p), STR(wdt:))) | |||
FILTER (STRSTARTS(STR(?o), STR(wd:))) | |||
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } | |||
} LIMIT 100 | |||
</syntaxhighlight> | |||
===Edit your query (by relaxing the FILTER expression) so it also returns triples where the object has DATATYPE xsd:string.=== | |||
<syntaxhighlight lang="SPARQL"> | |||
PREFIX wd: <http://www.wikidata.org/entity/> | |||
SELECT ?p ?oLabel ?o WHERE { | |||
wd:Q42 ?p ?o . | |||
FILTER (STRSTARTS(STR(?p), STR(wdt:))) | |||
FILTER ( | |||
STRSTARTS(STR(?o), STR(wd:)) || # comment out this whole line to see only string literals! | |||
DATATYPE(?o) = xsd:string | |||
) | |||
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } | |||
} LIMIT 100 | |||
</syntaxhighlight> | |||
===Relax the FILTER expression again so it also returns triples with these three predicates (rdfs:label, skos:altLabel and schema:description) === | |||
<syntaxhighlight lang="SPARQL"> | |||
PREFIX wd: <http://www.wikidata.org/entity/> | |||
SELECT ?p ?oLabel ?o WHERE { | |||
wd:Q42 ?p ?o . | |||
FILTER ( | |||
(STRSTARTS(STR(?p), STR(wdt:)) && # comment out these three lines to see only fingerprint literals! | |||
STRSTARTS(STR(?o), STR(wd:)) || DATATYPE(?o) = xsd:string) | |||
|| | |||
(?p IN (rdfs:label, skos:altLabel, schema:description) && | |||
DATATYPE(?o) = rdf:langString && LANG(?o) = "en") | |||
) | |||
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } | |||
} LIMIT 100 | |||
</syntaxhighlight> | |||
===Try to restrict the FILTER expression again so that, when the predicate is rdfs:label, skos:altLabel and schema:description, the object must have LANG "en" === | |||
<syntaxhighlight lang="SPARQL"> | |||
PREFIX wikibase: <http://wikiba.se/ontology#> | |||
PREFIX bd: <http://www.bigdata.com/rdf#> | |||
PREFIX wd: <http://www.wikidata.org/entity/> | |||
PREFIX wdt: <http://www.wikidata.org/prop/direct/> | |||
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> | |||
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> | |||
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> | |||
PREFIX skos: <http://www.w3.org/2004/02/skos/core#> | |||
PREFIX schema: <http://schema.org/> | |||
SELECT * WHERE { | |||
SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> { | |||
SELECT ?p ?oLabel ?o WHERE { | |||
wd:Q42 ?p ?o . | |||
FILTER ( | |||
(STRSTARTS(STR(?p), STR(wdt:)) && | |||
STRSTARTS(STR(?o), STR(wd:)) || DATATYPE(?o) = xsd:string) | |||
|| | |||
(?p IN (rdfs:label, skos:altLabel, schema:description) && | |||
DATATYPE(?o) = rdf:langString && LANG(?o) = "en") | |||
) | |||
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } | |||
} LIMIT 100 | |||
} | |||
} | |||
</syntaxhighlight> | |||
===Change the SELECT query to an INSERT query that adds the Wikidata triples your local repository === | |||
<syntaxhighlight lang="SPARQL"> | |||
PREFIX wikibase: <http://wikiba.se/ontology#> | |||
PREFIX bd: <http://www.bigdata.com/rdf#> | |||
PREFIX wd: <http://www.wikidata.org/entity/> | |||
PREFIX wdt: <http://www.wikidata.org/prop/direct/> | |||
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> | |||
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> | |||
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> | |||
PREFIX skos: <http://www.w3.org/2004/02/skos/core#> | |||
PREFIX schema: <http://schema.org/> | |||
INSERT { | |||
wd:Q42 ?p ?o . | |||
?o rdfs:label ?oLabel . | |||
} WHERE { | |||
SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> { | |||
SELECT ?p ?oLabel ?o WHERE { | |||
wd:Q42 ?p ?o . | |||
FILTER ( | |||
(STRSTARTS(STR(?p), STR(wdt:)) && | |||
STRSTARTS(STR(?o), STR(wd:)) || DATATYPE(?o) = xsd:string) | |||
|| | |||
(?p IN (rdfs:label, skos:altLabel, schema:description) && | |||
DATATYPE(?o) = rdf:langString && LANG(?o) = "en") | |||
) | |||
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } | |||
} LIMIT 500 | |||
} | |||
} | |||
</syntaxhighlight> | |||
==If you have more time == | |||
===You must therefore REPLACE all wdt: prefixes of properties with wd: prefixes and BIND the new URI AS a new variable, for example ?pw. === | |||
<syntaxhighlight lang="SPARQL"> | |||
PREFIX wd: <http://www.wikidata.org/entity/> | |||
SELECT ?pwLabel ?oLabel WHERE { | |||
wd:Q42 ?p ?o . | |||
FILTER (STRSTARTS(STR(?p), STR(wdt:))) | |||
FILTER (STRSTARTS(STR(?o), STR(wd:))) | |||
BIND (IRI(REPLACE(STR(?p), STR(wdt:), STR(wd:))) AS ?pw) | |||
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } | |||
} LIMIT 100 | |||
</syntaxhighlight> | |||
===Now you can go back to the SELECT statement that returned primary triples with only resource objects (not literal objects or fingerprints). Extend it so it also includes primary triples "one step out", i.e., triples where the subjects are objects of triples involving your reference entity. === | |||
<syntaxhighlight lang="SPARQL"> | |||
PREFIX wikibase: <http://wikiba.se/ontology#> | |||
PREFIX bd: <http://www.bigdata.com/rdf#> | |||
PREFIX wd: <http://www.wikidata.org/entity/> | |||
PREFIX wdt: <http://www.wikidata.org/prop/direct/> | |||
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> | |||
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> | |||
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> | |||
PREFIX skos: <http://www.w3.org/2004/02/skos/core#> | |||
PREFIX schema: <http://schema.org/> | |||
INSERT { | |||
wd:Q42 ?p1 ?o1 . | |||
?o1 rdfs:label ?o1Label . | |||
?o1 ?p2 ?o2 . | |||
?o2 rdfs:label ?o2Label . | |||
} WHERE { | |||
SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> { | |||
SELECT ?p1 ?o1Label ?o1 ?p2 ?o2Label ?o2 WHERE { | |||
wd:Q42 ?p1 ?o1 . | |||
?o1 ?p2 ?o2 . | |||
FILTER ( | |||
STRSTARTS(STR(?p1), STR(wdt:)) && | |||
STRSTARTS(STR(?o1), STR(wd:)) && | |||
STRSTARTS(STR(?p2), STR(wdt:)) && | |||
STRSTARTS(STR(?o2), STR(wd:)) | |||
) | |||
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } | |||
} LIMIT 500 | |||
} | |||
} | |||
</syntaxhighlight> | |||
=CSV to RDF (Lab 7)= | |||
<syntaxhighlight lang="Python"> | |||
#Imports | #Imports | ||
Line 846: | Line 1,081: | ||
SERVER = "https://api.dbpedia-spotlight.org/en/annotate" | SERVER = "https://api.dbpedia-spotlight.org/en/annotate" | ||
# Test around with the confidence, and see how many names changes depending on the confidence. However, be aware that anything lower than this (0.83) it will replace James W. McCord and other names that includes James with LeBron James | # Test around with the confidence, and see how many names changes depending on the confidence. | ||
# However, be aware that anything lower than this (0.83) it will replace James W. McCord and other names that includes James with LeBron James | |||
CONFIDENCE = 0.83 | CONFIDENCE = 0.83 | ||
# This function uses DBpedia Spotlight, which was not a part of the CSV lab this year. | |||
def annotate_entity(entity, filters={'types': 'DBpedia:Person'}): | def annotate_entity(entity, filters={'types': 'DBpedia:Person'}): | ||
annotations = [] | annotations = [] | ||
Line 866: | Line 1,103: | ||
df = df.replace(nan, None) | df = df.replace(nan, None) | ||
#Function that prepares the values to be added to the graph as a URI or Literal | #Function that prepares the values to be added to the graph as a URI (ex infront) or Literal | ||
def prepareValue(row): | def prepareValue(row): | ||
if row == None: #none type | if row == None: #none type | ||
Line 903: | Line 1,140: | ||
#Spotlight Search | #Spotlight Search | ||
name = annotate_entity(str(row['name'])) | name = annotate_entity(str(row['name'])) | ||
president = annotate_entity(str(row['president']).replace(".", "")) | president = annotate_entity(str(row['president']).replace(".", "")) | ||
Line 935: | Line 1,171: | ||
csv_to_rdf(df) | csv_to_rdf(df) | ||
print(g.serialize()) | print(g.serialize()) | ||
g.serialize("lab7.ttl", format="ttl") | |||
</syntaxhighlight> | |||
=JSON-LD (Lab 8)= | |||
== Task 1) Basic JSON-LD == | |||
<syntaxhighlight lang="JSON-LD"> | |||
{ | |||
"@context": { | |||
"@base": "http://example.org/", | |||
"edges": "http://example.org/triple", | |||
"start": "http://example.org/source", | |||
"rel": "http://exaxmple.org/predicate", | |||
"end": "http://example.org/object", | |||
"Person" : "http://example.org/Person", | |||
"birthday" : { | |||
"@id" : "http://example.org/birthday", | |||
"@type" : "xsd:date" | |||
}, | |||
"nameEng" : { | |||
"@id" : "http://example.org/en/name", | |||
"@language" : "en" | |||
}, | |||
"nameFr" : { | |||
"@id" : "http://example.org/fr/name", | |||
"@language" : "fr" | |||
}, | |||
"nameCh" : { | |||
"@id" : "http://example.org/ch/name", | |||
"@language" : "ch" | |||
}, | |||
"age" : { | |||
"@id" : "http://example.org/age", | |||
"@type" : "xsd:int" | |||
}, | |||
"likes" : "http://example.org/games/likes", | |||
"haircolor" : "http://example.org/games/haircolor" | |||
}, | |||
"@graph": [ | |||
{ | |||
"@id": "people/Jeremy", | |||
"@type": "Person", | |||
"birthday" : "1987.1.1", | |||
"nameEng" : "Jeremy", | |||
"age" : 26 | |||
}, | |||
{ | |||
"@id": "people/Tom", | |||
"@type": "Person" | |||
}, | |||
{ | |||
"@id": "people/Ju", | |||
"@type": "Person", | |||
"birthday" : "2001.1.1", | |||
"nameCh" : "Ju", | |||
"age" : 22, | |||
"likes" : "bastketball" | |||
}, | |||
{ | |||
"@id": "people/Louis", | |||
"@type": "Person", | |||
"birthday" : "1978.1.1", | |||
"haircolor" : "Black", | |||
"nameFr" : "Louis", | |||
"age" : 45 | |||
}, | |||
{"edges" : [ | |||
{ | |||
"start" : "people/Jeremy", | |||
"rel" : "knows", | |||
"end" : "people/Tom" | |||
}, | |||
{ | |||
"start" : "people/Tom", | |||
"rel" : "knows", | |||
"end" : "people/Louis" | |||
}, | |||
{ | |||
"start" : "people/Louis", | |||
"rel" : "teaches", | |||
"end" : "people/Ju" | |||
}, | |||
{ | |||
"start" : "people/Ju", | |||
"rel" : "plays", | |||
"end" : "people/Jeremy" | |||
}, | |||
{ | |||
"start" : "people/Ju", | |||
"rel" : "plays", | |||
"end" : "people/Tom" | |||
} | |||
]} | |||
] | |||
} | |||
</syntaxhighlight> | |||
== Task 2 & 3) Retrieving JSON-LD from ConceptNet / Programming JSON-LD in Python == | |||
<syntaxhighlight lang="Python"> | |||
import rdflib | |||
CN_BASE = 'http://api.conceptnet.io/c/en/' | |||
g = rdflib.Graph() | |||
g.parse(CN_BASE+'indictment', format='json-ld') | |||
# To download JSON object: | |||
import json | |||
import requests | |||
json_obj = requests.get(CN_BASE+'indictment').json() | |||
# To change the @context: | |||
context = { | |||
"@base": "http://ex.org/", | |||
"edges": "http://ex.org/triple/", | |||
"start": "http://ex.org/s/", | |||
"rel": "http://ex.org/p/", | |||
"end": "http://ex.org/o/", | |||
"label": "http://ex.org/label" | |||
} | |||
json_obj['@context'] = context | |||
json_str = json.dumps(json_obj) | |||
g = rdflib.Graph() | |||
g.parse(data=json_str, format='json-ld') | |||
# To extract triples (here with labels): | |||
r = g.query(""" | |||
SELECT ?s ?sLabel ?p ?o ?oLabel WHERE { | |||
?edge | |||
<http://ex.org/s/> ?s ; | |||
<http://ex.org/p/> ?p ; | |||
<http://ex.org/o/> ?o . | |||
?s <http://ex.org/label> ?sLabel . | |||
?o <http://ex.org/label> ?oLabel . | |||
} | |||
""", initNs={'cn': CN_BASE}) | |||
print(r.serialize(format='txt').decode()) | |||
# Construct a new graph: | |||
r = g.query(""" | |||
CONSTRUCT { | |||
?s ?p ?o . | |||
?s <http://ex.org/label> ?sLabel . | |||
?o <http://ex.org/label> ?oLabel . | |||
} WHERE { | |||
?edge <http://ex.org/s/> ?s ; | |||
<http://ex.org/p/> ?p ; | |||
<http://ex.org/o/> ?o . | |||
?s <http://ex.org/label> ?sLabel . | |||
?o <http://ex.org/label> ?oLabel . | |||
} | |||
""", initNs={'cn': CN_BASE}) | |||
print(r.graph.serialize(format='ttl')) | |||
</syntaxhighlight> | </syntaxhighlight> | ||
=SHACL (Lab 9)= | |||
<syntaxhighlight> | |||
<syntaxhighlight lang="Python"> | |||
from pyshacl import validate | from pyshacl import validate | ||
Line 945: | Line 1,348: | ||
data_graph = Graph() | data_graph = Graph() | ||
# parses the Turtle | # parses the Turtle example from the task | ||
data_graph.parse("data_graph.ttl") | data_graph.parse("data_graph.ttl") | ||
prefixes = """ | |||
@prefix ex: <http://example.org/> . | @prefix ex: <http://example.org/> . | ||
@prefix foaf: <http://xmlns.com/foaf/0.1/> . | @prefix foaf: <http://xmlns.com/foaf/0.1/> . | ||
Line 955: | Line 1,357: | ||
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> . | @prefix xsd: <http://www.w3.org/2001/XMLSchema#> . | ||
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . | @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . | ||
""" | |||
ex: | shape_graph = """ | ||
ex:PUI_Shape | |||
a sh:NodeShape ; | a sh:NodeShape ; | ||
sh:targetClass ex:PersonUnderInvestigation ; | sh:targetClass ex:PersonUnderInvestigation ; | ||
Line 972: | Line 1,376: | ||
# --- If you have more time tasks --- | # --- If you have more time tasks --- | ||
ex: | ex:User_Shape rdf:type sh:NodeShape; | ||
sh:targetClass ex:Indictment; | sh:targetClass ex:Indictment; | ||
# The only allowed values for ex:american are true, false or unknown. | # The only allowed values for ex:american are true, false or unknown. | ||
sh:property [ | sh:property [ | ||
sh:path ex:american; | sh:path ex:american; | ||
sh:pattern "(true|false|unknown)" ; | sh:pattern "(true|false|unknown)" ; | ||
] ; | ]; | ||
# The value of a property that counts days must be an integer. | # The value of a property that counts days must be an integer. | ||
Line 985: | Line 1,388: | ||
sh:path ex:indictment_days; | sh:path ex:indictment_days; | ||
sh:datatype xsd:integer; | sh:datatype xsd:integer; | ||
] ; | ]; | ||
sh:property [ | sh:property [ | ||
sh:path ex:investigation_days; | sh:path ex:investigation_days; | ||
sh:datatype xsd:integer; | sh:datatype xsd:integer; | ||
] ; | ]; | ||
# The value of a property that indicates a start date must be xsd:date. | # The value of a property that indicates a start date must be xsd:date. | ||
Line 995: | Line 1,398: | ||
sh:path ex:investigation_start; | sh:path ex:investigation_start; | ||
sh:datatype xsd:date; | sh:datatype xsd:date; | ||
] ; | ]; | ||
# The value of a property that indicates an end date must be xsd:date or unknown (tip: you can use sh:or (...) ). | # The value of a property that indicates an end date must be xsd:date or unknown (tip: you can use sh:or (...) ). | ||
Line 1,003: | Line 1,406: | ||
[ sh:datatype xsd:date ] | [ sh:datatype xsd:date ] | ||
[ sh:hasValue "unknown" ] | [ sh:hasValue "unknown" ] | ||
)] ; | )]; | ||
# Every indictment must have exactly one FOAF name for the investigated person. | # Every indictment must have exactly one FOAF name for the investigated person. | ||
Line 1,010: | Line 1,413: | ||
sh:minCount 1; | sh:minCount 1; | ||
sh:maxCount 1; | sh:maxCount 1; | ||
] ; | ]; | ||
# Every indictment must have exactly one investigated person property, and that person must have the type ex:PersonUnderInvestigation. | # Every indictment must have exactly one investigated person property, and that person must have the type ex:PersonUnderInvestigation. | ||
Line 1,031: | Line 1,434: | ||
sh:property [ | sh:property [ | ||
sh:path ex:president ; | sh:path ex:president ; | ||
sh:minCount 1 ; | |||
sh:class ex:President ; | sh:class ex:President ; | ||
sh:nodeKind sh:IRI ; | sh:nodeKind sh:IRI ; | ||
Line 1,037: | Line 1,441: | ||
shacl_graph = Graph() | shacl_graph = Graph() | ||
# parses the contents of a shape_graph made in the | # parses the contents of a shape_graph you made in the previous task | ||
shacl_graph.parse(data=shape_graph) | shacl_graph.parse(data=prefixes+shape_graph) | ||
# uses pySHACL's validate method to apply the shape_graph constraints to the data_graph | # uses pySHACL's validate method to apply the shape_graph constraints to the data_graph | ||
Line 1,059: | Line 1,463: | ||
SELECT DISTINCT ?message WHERE { | SELECT DISTINCT ?message WHERE { | ||
[] sh:result / sh:resultMessage ?message . | |||
} | } | ||
""" | """ | ||
Line 1,075: | Line 1,475: | ||
SELECT ?message (COUNT(?node) AS ?num_messages) WHERE { | SELECT ?message (COUNT(?node) AS ?num_messages) WHERE { | ||
[] sh:result ? | [] sh:result ?result . | ||
? | ?result sh:resultMessage ?message ; | ||
sh:focusNode ?node . | |||
} | } | ||
GROUP BY ?message | GROUP BY ?message | ||
Line 1,085: | Line 1,485: | ||
messages = results_graph.query(count_messages) | messages = results_graph.query(count_messages) | ||
for row in messages: | for row in messages: | ||
print( | print("COUNT MESSAGE") | ||
print(row.num_messages, " ", row.message) | |||
</syntaxhighlight> | </syntaxhighlight> | ||
=RDFS (Lab 10)= | |||
<syntaxhighlight> | |||
<syntaxhighlight lang="Python"> | |||
import owlrl | import owlrl | ||
from rdflib import Graph, RDF, Namespace, FOAF, RDFS | from rdflib import Graph, RDF, Namespace, Literal, XSD, FOAF, RDFS | ||
from rdflib.collection import Collection | |||
g = Graph() | g = Graph() | ||
Line 1,100: | Line 1,504: | ||
g.bind("ex", ex) | g.bind("ex", ex) | ||
g.bind("foaf", FOAF) | g.bind("foaf", FOAF) | ||
NS = { | NS = { | ||
Line 1,110: | Line 1,515: | ||
#Write a small function that computes the RDFS closure on your graph. | #Write a small function that computes the RDFS closure on your graph. | ||
def flush(): | def flush(): | ||
owlrl. | engine = owlrl.RDFSClosure.RDFS_Semantics(g, False, False, False) | ||
engine.closure() | |||
engine.flush_stored_triples() | |||
#Rick Gates was charged with money laundering and tax evasion. | #Rick Gates was charged with money laundering and tax evasion. | ||
Line 1,117: | Line 1,524: | ||
#When one thing that is charged with another thing, | #When one thing that is charged with another thing, | ||
g.add((ex.chargedWith, RDFS.domain, ex.PersonUnderInvestigation)) #the first thing is a person under investigation and | g.add((ex.chargedWith, RDFS.domain, ex.PersonUnderInvestigation)) #the first thing (subject) is a person under investigation and | ||
g.add((ex.chargedWith, RDFS.range, ex.Offense)) #the second thing is an offense. | g.add((ex.chargedWith, RDFS.range, ex.Offense)) #the second thing (object) is an offense. | ||
#Write a SPARQL query that checks the RDF type(s) of Rick Gates and money laundering in your RDF graph. | #Write a SPARQL query that checks the RDF type(s) of Rick Gates and money laundering in your RDF graph. | ||
print( | print(g.query('ASK {ex:Rick_Gates rdf:type ex:PersonUnderInvestigation}', initNs=NS).askAnswer) | ||
print( | print(g.query('ASK {ex:MoneyLaundering rdf:type ex:Offense}', initNs=NS).askAnswer) | ||
flush() | flush() | ||
print( | print(g.query('ASK {ex:Rick_Gates rdf:type ex:PersonUnderInvestigation}', initNs=NS).askAnswer) | ||
print( | print(g.query('ASK {ex:MoneyLaundering rdf:type ex:Offense}', initNs=NS).askAnswer) | ||
#A person under investigation is a FOAF person | #A person under investigation is a FOAF person | ||
g.add((ex.PersonUnderInvestigation, RDFS.subClassOf, FOAF.Person)) | g.add((ex.PersonUnderInvestigation, RDFS.subClassOf, FOAF.Person)) | ||
print( | print(g.query('ASK {ex:Rick_Gates rdf:type foaf:Person}', initNs=NS).askAnswer) | ||
flush() | flush() | ||
print( | print(g.query('ASK {ex:Rick_Gates rdf:type foaf:Person}', initNs=NS).askAnswer) | ||
#Paul Manafort was convicted for tax evasion. | #Paul Manafort was convicted for tax evasion. | ||
Line 1,137: | Line 1,544: | ||
#the first thing is also charged with the second thing | #the first thing is also charged with the second thing | ||
g.add((ex.convictedFor, RDFS.subPropertyOf, ex.chargedWith)) | g.add((ex.convictedFor, RDFS.subPropertyOf, ex.chargedWith)) | ||
flush() | flush() | ||
print( | print(g.query('ASK {ex:Paul_Manafort ex:chargedWith ex:TaxEvasion}', initNs=NS).askAnswer) | ||
print(g.serialize()) | print(g.serialize()) | ||
Line 1,145: | Line 1,551: | ||
</syntaxhighlight> | </syntaxhighlight> | ||
=OWL 1 (Lab 11)= | |||
<syntaxhighlight> | <syntaxhighlight lang="Python"> | ||
from rdflib import Graph, RDFS, Namespace, RDF, FOAF, BNode, OWL, URIRef, Literal, XSD | from rdflib import Graph, RDFS, Namespace, RDF, FOAF, BNode, OWL, URIRef, Literal, XSD | ||
Line 1,243: | Line 1,649: | ||
</syntaxhighlight> | </syntaxhighlight> | ||
=OWL 2 (Lab 12)= | |||
<syntaxhighlight lang="Python"> | |||
<syntaxhighlight> | |||
@prefix : <http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#> . | @prefix : <http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#> . | ||
Line 1,302: | Line 1,706: | ||
rdfs:domain io:Investigation ; | rdfs:domain io:Investigation ; | ||
rdfs:range xsd:string . | rdfs:range xsd:string . | ||
Line 1,330: | Line 1,728: | ||
### http://xmlns.com/foaf/0.1/title | |||
foaf:title rdf:type owl:DatatypeProperty ; | |||
rdfs:domain io:Investigation ; | |||
rdfs:range xsd:string . | |||
### http://xmlns.com/foaf/0 | |||
Line 1,539: | Line 1,761: | ||
io:Person rdf:type owl:Class ; | io:Person rdf:type owl:Class ; | ||
rdfs:subClassOf foaf:Person . | rdfs:subClassOf foaf:Person . | ||
Line 1,565: | Line 1,777: | ||
### http://dbpedia.org/resource/Elizabeth_Prelogar | ### http://dbpedia.org/resource/Elizabeth_Prelogar | ||
dbr:Elizabeth_Prelogar rdf:type owl:NamedIndividual | dbr:Elizabeth_Prelogar rdf:type owl:NamedIndividual ; | ||
io:investigating <http://dbpedia.org/resource/Special_Counsel_investigation_(2017–2019)> ; | io:investigating <http://dbpedia.org/resource/Special_Counsel_investigation_(2017–2019)> ; | ||
foaf:name "Elizabeth Prelogar" . | foaf:name "Elizabeth Prelogar" . | ||
Line 1,591: | Line 1,802: | ||
dbr:Roger_Stone rdf:type owl:NamedIndividual ; | dbr:Roger_Stone rdf:type owl:NamedIndividual ; | ||
foaf:name "Roger Stone" . | foaf:name "Roger Stone" . | ||
### http://dbpedia.org/resource/Special_Counsel_investigation_(2017–2019) | ### http://dbpedia.org/resource/Special_Counsel_investigation_(2017–2019) | ||
<http://dbpedia.org/resource/Special_Counsel_investigation_(2017–2019)> rdf:type owl:NamedIndividual ; | <http://dbpedia.org/resource/Special_Counsel_investigation_(2017–2019)> rdf:type owl:NamedIndividual ; | ||
foaf:title "Mueller Investigation" . | |||
Line 1,623: | Line 1,820: | ||
dbr:Robert_Mueller | dbr:Robert_Mueller | ||
dbr:Roger_Stone | dbr:Roger_Stone | ||
) | ) | ||
] . | ] . | ||
Line 1,632: | Line 1,828: | ||
</syntaxhighlight> | </syntaxhighlight> | ||
=Using Graph Embeddings (Lab 13)= | |||
https://colab.research.google.com/drive/ | https://colab.research.google.com/drive/1WkRJUeUBVF5yVv7o0pOKfsd4pqG6369k | ||
=Training Graph Embeddings (Lab 14)= | |||
https://colab.research.google.com/drive/1jKpzlQ7gYTVzgphJsrK5iuMpFhkrY96q | https://colab.research.google.com/drive/1jKpzlQ7gYTVzgphJsrK5iuMpFhkrY96q | ||
--> | --> |
Latest revision as of 10:56, 20 January 2025
Here we will present suggested solutions after each lab. The page will be updated as the course progresses