Lab Solutions: Difference between revisions

From info216
No edit summary
(Added proposed solution for Lab 13 (Using Graph Embeddings))
 
(60 intermediate revisions by 5 users not shown)
Line 1: Line 1:
This page will be updated with Python examples related to the lectures and labs. We will add more examples after each lab has ended. The first examples will use Python's RDFlib. We will introduce other relevant libraries later.
Here we will present suggested solutions after each lab. ''The page will be updated as the course progresses''


=Example lab solutions=
=Getting started (Lab 1)=


==Getting started==
<syntaxhighlight>


<syntaxhighlight>
from rdflib import Graph, Namespace


from rdflib.collection import Collection
ex = Namespace('http://example.org/')
from rdflib import Graph, Namespace, Literal, URIRef
from rdflib.namespace import RDF, FOAF, XSD


g = Graph()
g = Graph()
EX = Namespace('http://EXample.org/')
RL = Namespace('http://purl.org/vocab/relationship/')
DBO = Namespace('https://dbpedia.org/ontology/')
DBR = Namespace('https://dbpedia.org/page/')


g.namespace_manager.bind('exampleURI', EX)
g.bind("ex", ex)
g.namespace_manager.bind('relationship', RL)
 
g.namespace_manager.bind('dbpediaOntology', DBO)
# The Mueller Investigation was lead by Robert Mueller
g.namespace_manager.bind('dbpediaPage', DBR)
g.add((ex.MuellerInvestigation, ex.leadBy, ex.RobertMueller))
 
# It involved Paul Manafort, Rick Gates, George Papadopoulos, Michael Flynn, Michael Cohen, and Roger Stone.
g.add((ex.MuellerInvestigation, ex.involved, ex.PaulManafort))
g.add((ex.MuellerInvestigation, ex.involved, ex.RickGates))
g.add((ex.MuellerInvestigation, ex.involved, ex.GeorgePapadopoulos))
g.add((ex.MuellerInvestigation, ex.involved, ex.MichaelFlynn))
g.add((ex.MuellerInvestigation, ex.involved, ex.MichaelCohen))
g.add((ex.MuellerInvestigation, ex.involved, ex.RogerStone))
 
# Paul Manafort was business partner of Rick Gates
g.add((ex.PaulManafort, ex.businessPartner, ex.RickGates))
 
# He was campaign chairman for Donald Trump
g.add((ex.PaulManafort, ex.campaignChairman, ex.DonaldTrump))
 
# He was charged with money laundering, tax evasion, and foreign lobbying.
g.add((ex.PaulManafort, ex.chargedWith, ex.MoneyLaundering))
g.add((ex.PaulManafort, ex.chargedWith, ex.TaxEvasion))
g.add((ex.PaulManafort, ex.chargedWith, ex.ForeignLobbying))
 
# He was convicted for bank and tax fraud.
g.add((ex.PaulManafort, ex.convictedOf, ex.BankFraud))
g.add((ex.PaulManafort, ex.convictedOf, ex.TaxFraud))
 
# He pleaded guilty to conspiracy.
g.add((ex.PaulManafort, ex.pleadGuiltyTo, ex.Conspiracy))
 
# He was sentenced to prison.
g.add((ex.PaulManafort, ex.sentencedTo, ex.Prison))
 
# He negotiated a plea agreement.
g.add((ex.PaulManafort, ex.negotiated, ex.PleaAgreement))
 
# Rick Gates was charged with money laundering, tax evasion and foreign lobbying.
g.add((ex.RickGates, ex.chargedWith, ex.MoneyLaundering))
g.add((ex.RickGates, ex.chargedWith, ex.TaxEvasion))
g.add((ex.RickGates, ex.chargedWith, ex.ForeignLobbying))
 
# He pleaded guilty to conspiracy and lying to FBI.
g.add((ex.RickGates, ex.pleadGuiltyTo, ex.Conspiracy))
g.add((ex.RickGates, ex.pleadGuiltyTo, ex.LyingToFBI))
 
# Use the serialize method of rdflib.Graph to write out the model in different formats (on screen or to file)
print(g.serialize(format="ttl")) # To screen
#g.serialize("lab1.ttl", format="ttl") # To file
 
# Loop through the triples in the model to print out all triples that have pleading guilty as predicate
for subject, object in g[ : ex.pleadGuiltyTo :]:
    print(subject, ex.pleadGuiltyTo, object)
 
# --- IF you have more time tasks ---
 
# Michael Cohen, Michael Flynn and the lying is part of lab 2 and therefore the answer is not provided this week
 
#Write a method (function) that submits your model for rendering and saves the returned image to file.
import requests
import shutil


g.add((EX.Cade, RDF.type, FOAF.Person))  
def graphToImage(graphInput):
g.add((EX.Mary, RDF.type, FOAF.Person))
    data = {"rdf":graphInput, "from":"ttl", "to":"png"}
g.add((EX.Cade, RL.spouseOf, EX.Mary)) # a symmetrical relation from an established namespace
    link = "http://www.ldf.fi/service/rdf-grapher"
g.add((DBR.France, DBO.capital, DBR.Paris))
    response = requests.get(link, params = data, stream=True)
g.add((EX.Cade, FOAF.age, Literal(27)))
    # print(response.content)
g.add((EX.Mary, FOAF.age, Literal('26', datatype=XSD.int)))
    print(response.raw)
Collection (g, EX.MaryInterests, [EX.hiking, EX.choclate, EX.biology])
    with open("lab1.png", "wb") as file:
g.add((EX.Mary, EX.hasIntrest, EX.MaryInterests))
        shutil.copyfileobj(response.raw, file)
g.add((EX.Mary, RDF.type, EX.student))
g.add((DBO.capital, EX.range, EX.city))
g.add((EX.Mary, RDF.type, EX.kind))
g.add((EX.Cade, RDF.type, EX.kindPerson))


#hobbies = ['hiking', 'choclate', 'biology']
graph = g.serialize(format="ttl")
#for i in hobbies:
graphToImage(graph)
#    g.add((EX.Mary, FOAF.interest, EX[i]))


print(g.serialize(format="turtle"))
</syntaxhighlight>
</syntaxhighlight>


==RDFlib==
=RDF programming with RDFlib (Lab 2)=
 
<syntaxhighlight>
<syntaxhighlight>
from rdflib import Graph, Namespace, Literal, BNode, XSD, FOAF, RDF, URIRef
from rdflib.collection import Collection


from rdflib.namespace import RDF, XSD, FOAF
g = Graph()
from rdflib import Graph, Namespace, Literal, BNode
from rdflib.collection import Collection


# Getting the graph created in the first lab
g.parse("lab1.ttl", format="ttl")


g = Graph()
ex = Namespace("http://example.org/")
ex = Namespace('http://example.org/')
schema = Namespace("https://schema.org/")
dbp = Namespace("https://dbpedia.org/resource/")


g.bind("ex", ex)
g.bind("ex", ex)
g.bind("dbp", dbp)
g.bind("foaf", FOAF)
g.bind("schema", schema)
 
# --- Michael Cohen ---
# Michael Cohen was Donald Trump's attorney.
g.add((ex.MichaelCohen, ex.attorneyTo, ex.DonaldTrump))
# He pleaded guilty for lying to Congress.
g.add((ex.MichaelCohen, ex.pleadGuiltyTo, ex.LyingToCongress))
 
# --- Michael Flynn ---
# Michael Flynn was adviser to Donald Trump.
g.add((ex.MichaelFlynn, ex.adviserTo, ex.DonaldTrump))
# He pleaded guilty for lying to the FBI.
g.add((ex.MichaelFlynn, ex.pleadGuiltyTo, ex.LyingToFBI))
# He negotiated a plea agreement.
g.add((ex.MichaelFlynn, ex.negotiated, ex.PleaAgreement))
 
# Change your graph so it represents instances of lying as blank nodes.
# Remove the triples that will be duplicated
g.remove((ex.Michael_Flynn, ex.pleadGuiltyTo, ex.LyingToFBI))
g.remove((ex.Michael_Flynn, ex.negoiated, ex.PleaBargain))
g.remove((ex.Rick_Gates, ex.pleadGuiltyTo, ex.LyingToFBI))
g.remove((ex.Rick_Gates, ex.pleadGuiltyTo, ex.Conspiracy))
g.remove((ex.Rick_Gates, ex.chargedWith, ex.ForeignLobbying))
g.remove((ex.Rick_Gates, ex.chargedWith, ex.MoneyLaundering))
g.remove((ex.Rick_Gates, ex.chargedWith, ex.TaxEvasion))
g.remove((ex.Michael_Cohen, ex.pleadGuiltyTo, ex.LyingToCongress))
 
# --- Michael Flynn ---
FlynnLying = BNode()
g.add((FlynnLying, ex.crime, ex.LyingToFBI))
g.add((FlynnLying, ex.pleadGulityOn, Literal("2017-12-1", datatype=XSD.date)))
g.add((FlynnLying, ex.liedAbout, Literal("His communications with a former Russian ambassador during the presidential transition", datatype=XSD.string)))
g.add((FlynnLying, ex.pleaBargain, Literal("true", datatype=XSD.boolean)))
g.add((ex.Michael_Flynn, ex.pleadGuiltyTo, FlynnLying))
 
# --- Rick Gates ---
GatesLying = BNode()
Crimes = BNode()
Charged = BNode()
Collection(g, Crimes, [ex.LyingToFBI, ex.Conspiracy])
Collection(g, Charged, [ex.ForeignLobbying, ex.MoneyLaundering, ex.TaxEvasion])
g.add((GatesLying, ex.crime, Crimes))
g.add((GatesLying, ex.chargedWith, Charged))
g.add((GatesLying, ex.pleadGulityOn, Literal("2018-02-23", datatype=XSD.date)))
g.add((GatesLying, ex.pleaBargain, Literal("true", datatype=XSD.boolean)))
g.add((ex.Rick_Gates, ex.pleadGuiltyTo, GatesLying))
 
# --- Michael Cohen ---
CohenLying = BNode()
g.add((CohenLying, ex.crime, ex.LyingToCongress))
g.add((CohenLying, ex.liedAbout, ex.TrumpRealEstateDeal))
g.add((CohenLying, ex.prosecutorsAlleged, Literal("In an August 2017 letter Cohen sent to congressional committees investigating Russian election interference, he falsely stated that the project ended in January 2016", datatype=XSD.string)))
g.add((CohenLying, ex.mullerInvestigationAlleged, Literal("Cohen falsely stated that he had never agreed to travel to Russia for the real estate deal and that he did not recall any contact with the Russian government about the project", datatype=XSD.string)))
g.add((CohenLying, ex.pleadGulityOn, Literal("2018-11-29", datatype=XSD.date)))
g.add((CohenLying, ex.pleaBargain, Literal("true", datatype=XSD.boolean)))
g.add((ex.Michael_Cohen, ex.pleadGuiltyTo, CohenLying))
 
print(g.serialize(format="ttl"))


address = BNode()
#Save (serialize) your graph to a Turtle file.
degree = BNode()
# g.serialize("lab2.ttl", format="ttl")


# from lab 1
#Add a few triples to the Turtle file with more information about Donald Trump.
g.add((ex.Cade, FOAF.name, Literal("Cade Tracey", datatype=XSD.string)))
'''
g.add((ex.Mary, FOAF.name, Literal("Mary", datatype=XSD.string)))
ex:Donald_Trump ex:address [ ex:city ex:Palm_Beach ;
g.add((ex.Cade, RDF.type, FOAF.Person))
            ex:country ex:United_States ;
g.add((ex.Mary, RDF.type, FOAF.Person))
            ex:postalCode 33480 ;
g.add((ex.Mary, RDF.type, ex.Student))
            ex:residence ex:Mar_a_Lago ;
g.add((ex.Cade, ex.married, ex.Mary))
            ex:state ex:Florida ;
g.add((ex.Cade, FOAF.age, Literal('27', datatype=XSD.int)))
            ex:streetName "1100 S Ocean Blvd"^^xsd:string ] ;
g.add((ex.Mary, FOAF.age, Literal('26', datatype=XSD.int)))
    ex:previousAddress [ ex:city ex:Washington_DC ;
g.add((ex.Paris, RDF.type, ex.City))
            ex:country ex:United_States ;
g.add((ex.France, ex.Capital, ex.Paris))
            ex:phoneNumber "1 202 456 1414"^^xsd:integer ;
g.add((ex.Mary, FOAF.interest, ex.hiking))
            ex:postalCode "20500"^^xsd:integer ;
g.add((ex.Mary, FOAF.interest, ex.Chocolate))
            ex:residence ex:The_White_House ;
g.add((ex.Mary, FOAF.interest, ex.biology))
            ex:streetName "1600 Pennsylvania Ave."^^xsd:string ];
g.add((ex.France, ex.City, ex.Paris))
    ex:marriedTo ex:Melania_Trump;
g.add((ex.Mary, ex.Characterostic, ex.kind))
    ex:fatherTo (ex:Ivanka_Trump ex:Donald_Trump_Jr ex: ex:Tiffany_Trump ex:Eric_Trump ex:Barron_Trump).
g.add((ex.Cade, ex.Characterostic, ex.kind))
'''
g.add((ex.France, RDF.type, ex.Country))
g.add((ex.Cade, schema.address, address))


# BNode address
#Read (parse) the Turtle file back into a Python program, and check that the new triples are there
g.add((address, RDF.type, schema.PostalAdress))
def serialize_Graph():
g.add((address, schema.streetAddress, Literal('1516 Henry Street')))
    newGraph = Graph()
g.add((address, schema.addresCity, dbp.Berkeley))
    newGraph.parse("lab2.ttl")
g.add((address, schema.addressRegion, dbp.California))
    print(newGraph.serialize())
g.add((address, schema.postalCode, Literal('94709')))
g.add((address, schema.addressCountry, dbp.United_States))


# More info about Cade
#Don't need this to run until after adding the triples above to the ttl file
g.add((ex.Cade, ex.Degree, degree))
# serialize_Graph()  
g.add((degree, ex.Field, dbp.Biology))
g.add((degree, RDF.type, dbp.Bachelors_degree))
g.add((degree, ex.Universety, dbp.University_of_California))
g.add((degree, ex.year, Literal('2001', datatype=XSD.gYear)))


# Emma
#Write a method (function) that starts with Donald Trump prints out a graph depth-first to show how the other graph nodes are connected to him
emma_degree = BNode()
visited_nodes = set()
g.add((ex.Emma, FOAF.name, Literal("Emma Dominguez", datatype=XSD.string)))
 
g.add((ex.Emma, RDF.type, FOAF.Person))
def create_Tree(model, nodes):
g.add((ex.Emma, ex.Degree, emma_degree))
    #Traverse the model breadth-first to create the tree.
g.add((degree, ex.Field, dbp.Chemistry))
    global visited_nodes
g.add((degree, RDF.type, dbp.Masters_degree))
    tree = Graph()
g.add((degree, ex.Universety, dbp.University_of_Valencia))
    children = set()
g.add((degree, ex.year, Literal('2015', datatype=XSD.gYear)))
    visited_nodes |= set(nodes)
    for s, p, o in model:
        if s in nodes and o not in visited_nodes:
            tree.add((s, p, o))
            visited_nodes.add(o)
            children.add(o)
        if o in nodes and s not in visited_nodes:
            invp = URIRef(f'{p}_inv') #_inv represents inverse of
            tree.add((o, invp, s))
            visited_nodes.add(s)
            children.add(s)
    if len(children) > 0:
        children_tree = create_Tree(model, children)
        for triple in children_tree:
            tree.add(triple)
    return tree
 
def print_Tree(tree, root, indent=0):
    #Print the tree depth-first.
    print(str(root))
    for s, p, o in tree:
        if s==root:
            print('   '*indent + '  ' + str(p), end=' ')
            print_Tree(tree, o, indent+1)
   
tree = create_Tree(g, [ex.Donald_Trump])
print_Tree(tree, ex.Donald_Trump)
</syntaxhighlight>
 
=SPARQL (Lab 3-4)=
===List all triples===
<syntaxhighlight lang="SPARQL">
SELECT ?s ?p ?o
WHERE {?s ?p ?o .}
</syntaxhighlight>
 
===List the first 100 triples===
<syntaxhighlight lang="SPARQL">
SELECT ?s ?p ?o
WHERE {?s ?p ?o .}
LIMIT 100
</syntaxhighlight>


# Address
===Count the number of triples===
emma_address = BNode()
<syntaxhighlight lang="SPARQL">
g.add((ex.Emma, schema.address, emma_address))
SELECT (COUNT(*) as ?count)
g.add((emma_address, RDF.type, schema.PostalAdress))
WHERE {?s ?p ?o .}
g.add((emma_address, schema.streetAddress,
</syntaxhighlight>
      Literal('Carrer de la Guardia Civil 20')))
g.add((emma_address, schema.addressRegion, dbp.Valencia))
g.add((emma_address, schema.postalCode, Literal('46020')))
g.add((emma_address, schema.addressCountry, dbp.Spain))


b = BNode()
===Count the number of indictments===
g.add((ex.Emma, ex.visit, b))
<syntaxhighlight lang="SPARQL">
Collection(g, b,
PREFIX ns1: <http://example.org#>
          [dbp.Portugal, dbp.Italy, dbp.France, dbp.Germany, dbp.Denmark, dbp.Sweden])


SELECT (COUNT(?ind) as ?amount)
WHERE {
  ?s ns1:outcome ?ind;
      ns1:outcome ns1:indictment.
}
</syntaxhighlight>
</syntaxhighlight>


==SPARQL - Blazegraph==
===List the names of everyone who pleaded guilty, along with the name of the investigation===
<syntaxhighlight>
<syntaxhighlight lang="SPARQL">
PREFIX ex: <http://example.org/>  
PREFIX ns1: <http://example.org#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
 
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
SELECT ?name ?invname
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
WHERE {
PREFIX xml: <http://www.w3.org/XML/1998/namespace>
  ?s ns1:name ?name;
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>  
      ns1:investigation ?invname;
      ns1:outcome ns1:guilty-plea .
}
</syntaxhighlight>


===List the names of everyone who were convicted, but who had their conviction overturned by which president===
<syntaxhighlight lang="SPARQL">
PREFIX ns1: <http://example.org#>


#select all triplets in graph
SELECT ?name ?president
SELECT ?s ?p ?o
WHERE {
WHERE {
    ?s ?p ?o .
  ?s ns1:name ?name;
}  
      ns1:president ?president;
#select the interestes of Cade
      ns1:outcome ns1:conviction;
SELECT ?cadeInterest
      ns1:overturned ns1:true.
}
</syntaxhighlight>
 
===For each investigation, list the number of indictments made===
<syntaxhighlight lang="SPARQL">
PREFIX ns1: <http://example.org#>
 
SELECT ?invs (COUNT(?invs) as ?count)
WHERE {
WHERE {
    ex:Cade ex:interest ?cadeInterest .
  ?s ns1:investigation ?invs;
}  
      ns1:outcome ns1:indictment .
#select the country and city where Emma lives
}
SELECT ?emmaCity ?emmaCountry
GROUP BY ?invs
</syntaxhighlight>
 
===For each investigation with multiple indictments, list the number of indictments made===
<syntaxhighlight lang="SPARQL">
PREFIX ns1: <http://example.org#>
 
SELECT ?invs (COUNT(?invs) as ?count)
WHERE {
WHERE {
    ex:Emma ex:address ?address .
  ?s ns1:investigation ?invs;
  ?address ex:city ?emmaCity .
      ns1:outcome ns1:indictment .
  ?address ex:country ?emmaCountry .
}
}
GROUP BY ?invs
#select the people who are over 26 years old
HAVING(?count > 1)
SELECT ?person ?age
</syntaxhighlight>
 
===For each investigation with multiple indictments, list the number of indictments made, sorted with the most indictments first===
<syntaxhighlight lang="SPARQL">
PREFIX ns1: <http://example.org#>
 
SELECT ?invs (COUNT(?invs) as ?count)
WHERE {
WHERE {
    ?person ex:age ?age .
  ?s ns1:investigation ?invs;
  FILTER(?age > 26) .   
      ns1:outcome ns1:indictment .
}
}
#select people who graduated with Bachelor
GROUP BY ?invs
SELECT ?person ?degree
HAVING(?count > 1)
ORDER BY DESC(?count)
</syntaxhighlight>
 
===For each president, list the numbers of convictions and of pardons made===
<syntaxhighlight lang="SPARQL">
PREFIX ns1: <http://example.org#>
 
SELECT ?president (COUNT(?outcome) as ?conviction) (COUNT(?pardon) as
?pardons)
WHERE {
WHERE {
    ?person ex:degree ?degree .
  ?s ns1:president ?president;
  ?degree ex:degreeLevel "Bachelor" .
      ns1:outcome ?outcome ;
         
      ns1:outcome ns1:conviction.
}  
      OPTIONAL{
# delete cades photography interest
        ?s ns1:pardoned ?pardon .
DELETE DATA
        FILTER (?pardon = ns1:true)
{
      }
    ex:Cade ex:interest ex:Photography .
}
}
GROUP BY ?president
</syntaxhighlight>
 
===Rename mullerkg:name to something like muellerkg:person===
 
<syntaxhighlight lang="SPARQL">
PREFIX ns1: <http://example.org#>
 
DELETE{?s ns1:name ?o}
INSERT{?s ns1:person ?o}
WHERE {?s ns1:name ?o}
</syntaxhighlight>
 
===Update the graph so all the investigated person and president nodes become the subjects in foaf:name triples with the corresponding strings===
 
<syntaxhighlight lang="SPARQL">
PREFIX ns1: <http://example.org#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>


# delete and insert university of valencia
#Persons
DELETE { ?s ?p ex:University_of_Valencia }
INSERT {?person foaf:name ?name}
INSERT { ?s ?p ex:Universidad_de_Valencia }
WHERE {
WHERE  { ?s ?p ex:University_of_Valencia }  
      ?investigation ns1:person ?person .
      BIND(REPLACE(STR(?person), STR(ns1:), "") AS ?name)
}


#check if the deletion worked
#Presidents
SELECT ?s ?o2
INSERT {?president foaf:name ?name}
WHERE  {  
  ?s ex:degree ?o .
  ?o ex:degreeSource ?o2 .
      }  
#describe sergio
DESCRIBE ex:Sergio ?o
WHERE {
WHERE {
  ex:Sergio ?p ?o .
      ?investigation ns1:president ?president .
  ?o ?p2 ?o2 .
      BIND(REPLACE(STR(?president), STR(ns1:), "") AS ?name)
  }
}
</syntaxhighlight>
</syntaxhighlight>


==SPARQL - RDFlib==
===Use INSERT DATA updates to add these triples===
<syntaxhighlight>
 
from SPARQLWrapper import SPARQLWrapper, JSON, POST, GET, TURTLE
<syntaxhighlight lang="SPARQL">
PREFIX ns1: <http://example.org#>


namespace = "lab4"
INSERT DATA {
sparql = SPARQLWrapper("http://10.111.21.183:9999/blazegraph/namespace/"+ namespace + "/sparql")
    ns1:George_Papadopoulos ns1:adviserTo ns1:Donald_Trump;
        ns1:pleadGuiltyTo ns1:LyingToFBI;
        ns1:sentencedTo ns1:Prison.


# Print out Cades interests
    ns1:Roger_Stone a ns1:Republican;
sparql.setQuery("""
        ns1:adviserTo ns1:Donald_Trump;
    PREFIX ex: <http://example.org/>
        ns1:officialTo ns1:Trump_Campaign;
    SELECT * WHERE {
        ns1:interactedWith ns1:Wikileaks;
    ex:Cade ex:interest ?interest.
        ns1:providedTestimony ns1:House_Intelligence_Committee;
    }
        ns1:clearedOf ns1:AllCharges.
""")
}
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
    print(result["interest"]["value"])


# Print Emmas city and country
#To test if added
sparql.setQuery("""
SELECT ?p ?o
    PREFIX ex: <http://example.org/>
WHERE {ns1:Roger_Stone ?p ?o .}
    SELECT ?emmaCity ?emmaCountry
</syntaxhighlight>
    WHERE {
        ex:Emma ex:address ?address .
        ?address ex:city ?emmaCity .
        ?address ex:country ?emmaCountry .
        }  
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
    print("Emma's city is "+result["emmaCity"]["value"]+" and Emma's country is " + result["emmaCountry"]["value"])


#Select the people who are over 26 years old
===Use DELETE DATA and then INSERT DATA updates to correct that Roger Stone was cleared of all charges===
sparql.setQuery("""
    PREFIX ex: <http://example.org/>
    SELECT ?person ?age
    WHERE {
        ?person ex:age ?age .
        FILTER(?age > 26) . 
        }
        """)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
    print("All people who are over 26 years old: "+result["person"]["value"])


#Select people who graduated with Bachelor
<syntaxhighlight lang="SPARQL">
sparql.setQuery("""
PREFIX ns1: <http://example.org#>
    PREFIX ex: <http://example.org/>
    SELECT ?person ?degree
    WHERE {
        ?person ex:degree ?degree .
        ?degree ex:degreeLevel "Bachelor" .
        }
        """)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
    print("People who graduated with Bachelor: "+result["person"]["value"])


#Delete cades photography interest
DELETE DATA {
sparql.setQuery("""
      ns1:Roger_Stone ns1:clearedOf ns1:AllCharges .
    PREFIX ex: <http://example.org/>
}
    DELETE DATA {
        ex:Cade ex:interest ex:Photography .
        }  
        """)
sparql.setMethod(POST)
results = sparql.query()
print(results.response.read())


# Print out Cades interests again
INSERT DATA {
sparql.setQuery("""
      ns1:Roger_Stone ns1:indictedFor ns1:ObstructionOfJustice,
    PREFIX ex: <http://example.org/>
                                      ns1:WitnessTampering,
    SELECT * WHERE {
                                      ns1:FalseStatements.
    ex:Cade ex:interest ?interest.
}
    }
""")
sparql.setReturnFormat(JSON)
sparql.setMethod(GET)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
    print(result["interest"]["value"])


# Check university names
#The task specifically requested DELETE DATA & INSERT DATA, put below is
sparql.setQuery("""
a more efficient solution
    PREFIX ex: <http://example.org/>
    SELECT ?s ?o2
    WHERE  {
        ?s ex:degree ?o .
        ?o ex:degreeSource ?o2 .
      }
    """)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
    print(result["o2"]["value"])


DELETE{ns1:Roger_Stone ns1:clearedOf ns1:AllCharges.}
INSERT{
  ns1:Roger_Stone ns1:indictedFor ns1:ObstructionOfJustice,
                                  ns1:WitnessTampering,
                                  ns1:FalseStatements.
}
WHERE{ns1:Roger_Stone ns1:clearedOf ns1:AllCharges.}
</syntaxhighlight>


#Delete and insert university of valencia
===Use a DESCRIBE query to show the updated information about Roger Stone===
sparql.setQuery("""
    PREFIX ex: <http://example.org/>
    DELETE { ?s ?p ex:University_of_Valencia }
    INSERT { ?s ?p ex:Universidad_de_Valencia }
    WHERE  { ?s ?p ex:University_of_Valencia }
        """)
sparql.setMethod(POST)
results = sparql.query()
print(results.response.read())


# Check university names again
<syntaxhighlight lang="SPARQL">
sparql.setQuery("""
PREFIX ns1: <http://example.org#>
    PREFIX ex: <http://example.org/>
    SELECT ?s ?o2
    WHERE  {
        ?s ex:degree ?o .
        ?o ex:degreeSource ?o2 .
      }
    """)
sparql.setReturnFormat(JSON)
sparql.setMethod(GET)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
    print(result["o2"]["value"])


#Insert Sergio
DESCRIBE ?o
sparql.setQuery("""
WHERE {ns1:Roger_Stone ns1:indictedFor ?o .}
    PREFIX ex: <http://example.org/>
</syntaxhighlight>
    PREFIX foaf: <http://xmlns.com/foaf/0.1/>
    INSERT DATA {
        ex:Sergio a foaf:Person ;
        ex:address [ a ex:Address ;
                ex:city ex:Valenciay ;
                ex:country ex:Spain ;
                ex:postalCode "46021"^^xsd:string ;
                ex:state ex:California ;
                ex:street "4_Carrer_del_Serpis"^^xsd:string ] ;
        ex:degree [ ex:degreeField ex:Computer_science ;
                ex:degreeLevel "Master"^^xsd:string ;
                ex:degreeSource ex:University_of_Valencia ;
                ex:year "2008"^^xsd:gYear ] ;
        ex:expertise ex:Big_data,
            ex:Semantic_technologies,
            ex:Machine_learning;
        foaf:name "Sergio_Pastor"^^xsd:string .
        }
    """)
sparql.setMethod(POST)
results = sparql.query()
print(results.response.read())
sparql.setMethod(GET)


# Describe Sergio
===Use a CONSTRUCT query to create a new RDF group with triples only about Roger Stone===
sparql.setReturnFormat(TURTLE)
sparql.setQuery("""
    PREFIX ex: <http://example.org/>
    DESCRIBE ex:Sergio ?o
    WHERE {
        ex:Sergio ?p ?o .
        ?o ?p2 ?o2 .
    }
    """)
results = sparql.query().convert()
print(results.serialize(format='turtle'))


# Construct that any city is in the country in an address
<syntaxhighlight lang="SPARQL">
sparql.setQuery("""
PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>  
    PREFIX ex: <http://example.org/>
    CONSTRUCT {?city ex:locatedIn ?country}
    Where {
        ?s rdf:type ex:Address .
        ?s ex:city ?city .
        ?s ex:country ?country.
        }
    """)
sparql.setReturnFormat(TURTLE)
results = sparql.query().convert()
print(results.serialize(format='turtle'))


CONSTRUCT {
  ns1:Roger_Stone ?p ?o.
  ?s ?p2 ns1:Roger_Stone.
}
WHERE {
  ns1:Roger_Stone ?p ?o .
  ?s ?p2 ns1:Roger_Stone
}
</syntaxhighlight>
</syntaxhighlight>
==Web APIs and JSON-LD==


<syntaxhighlight>
===Write a DELETE/INSERT statement to change one of the prefixes in your graph===
import requests
from rdflib import FOAF, Namespace, Literal, RDF, Graph, TURTLE


r = requests.get('http://api.open-notify.org/astros.json').json()
<syntaxhighlight lang="SPARQL">
g = Graph()
PREFIX ns1: <http://example.org#>
EX = Namespace('http://EXample.org/')
PREFIX dbp: <https://dbpedia.org/page/>
g.bind("ex", EX)


for item in r['people']:
DELETE {?s ns1:person ?o1}
    craft = item['craft'].replace(" ","_")
INSERT {?s ns1:person ?o2}
    person = item['name'].replace(" ","_")
WHERE{
    g.add((EX[person], EX.onCraft, EX[craft]))
  ?s ns1:person ?o1 .
    g.add((EX[person], RDF.type, FOAF.Person))
  BIND (IRI(replace(str(?o1), str(ns1:), str(dbp:))) AS ?o2)
    g.add((EX[person], FOAF.name, Literal(item['name'])))
}
    g.add((EX[craft], FOAF.name, Literal(item['craft'])))
res = g.query("""
    CONSTRUCT {?person1 foaf:knows ?person2}
    WHERE {
        ?person1 ex:onCraft ?craft .
        ?person2 ex:onCraft ?craft .
        }
""")


for triplet in res:
#This update changes the object in triples with ns1:person as the
    # (we don't need to add that they know themselves)
predicate. It changes it's prefix of ns1 (which is the
    if (triplet[0] != triplet[2]):
"shortcut/shorthand" for example.org) to the prefix dbp (dbpedia.org)
        g.add((triplet))
       
print(g.serialize(format="turtle"))
</syntaxhighlight>
</syntaxhighlight>


==Semantic lifting - CSV==
===Write an INSERT statement to add at least one significant date to the Mueller investigation, with literal type xsd:date. Write a DELETE/INSERT statement to change the date to a string, and a new DELETE/INSERT statement to change it back to xsd:date. ===
<syntaxhighlight>
 
import pandas as pd
<syntaxhighlight lang="SPARQL">
from rdflib import Graph, Namespace, URIRef, Literal
#Whilst this solution is not exactly what the task asks for, I feel like
from rdflib.namespace import RDF, XSD
this is more appropiate given the dataset. The following update
changes the objects that uses the cp_date as predicate from a URI, to a
literal with date as it's datatype
 
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX ns1: <http://example.org#>
 
DELETE {?s ns1:cp_date ?o}
INSERT{?s ns1:cp_date ?o3}
WHERE{
  ?s ns1:cp_date ?o .
  BIND (replace(str(?o), str(ns1:), "")  AS ?o2)
  BIND (STRDT(STR(?o2), xsd:date) AS ?o3)
}


ex = Namespace("http://example.org/")
#To test:
dbr = Namespace("http://dbpedia.org/resource/")
dbp = Namespace("https://dbpedia.org/property/")
dbpage = Namespace("https://dbpedia.org/page/")
sem = Namespace("http://semanticweb.cs.vu.nl/2009/11/sem/")
tl = Namespace("http://purl.org/NET/c4dm/timeline.owl#")


g = Graph()
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
g.bind("ex", ex)
PREFIX ns1: <http://example.org#>
g.bind("dbr", dbr)
g.bind("dbp", dbp)
g.bind("dbpage", dbpage)
g.bind("sem", sem)
g.bind("tl", tl)


df = pd.read_csv("russia-investigations.csv")
SELECT ?s ?o
# We need to correct the type of the columns in the DataFrame, as Pandas assigns an incorrect type when it reads the file (for me at least). We use .astype("str") to convert the content of the columns to a string.
WHERE{
df["name"] = df["name"].astype("str")
  ?s ns1:cp_date ?o.
df["type"] = df["type"].astype("str")
  FILTER(datatype(?o) = xsd:date)
}


# iterrows creates an iterable object (list of rows)
#To change it to an integer, use the following code, and to change it
for index, row in df.iterrows():
back to date, swap "xsd:integer" to "xsd:date"
investigation = URIRef(ex + row['investigation'])
investigation_start = Literal(row['investigation-start'], datatype=XSD.date)
investigation_end = Literal(row['investigation-end'], datatype=XSD.date)
investigation_days = Literal(row['investigation-days'], datatype=XSD.integer)


name = Literal(row['name'], datatype=XSD.string)
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
name_underscore = URIRef(dbpage + row['name'].replace(" ","_"))
PREFIX ns1: <http://example.org#>
investigation_result = URIRef(ex + row['investigation']+ "_investigation_" + row['name'].replace(" ","_"))
indictment_days = Literal(row['indictment-days'], datatype=XSD.integer)
type = URIRef(dbr + row['type'].replace(" ","_"))
cp_date = Literal(row['cp-date'], datatype=XSD.date)
cp_days = Literal(row['cp-days'], datatype=XSD.duration)
overturned = Literal(row['overturned'], datatype=XSD.boolean)
pardoned = Literal(row['pardoned'], datatype=XSD.boolean)
american = Literal(row['american'], datatype=XSD.boolean)
president = Literal(row['president'], datatype=XSD.string)
president_underscore = URIRef(dbr + row['president'].replace(" ","_"))


g.add((investigation, RDF.type, sem.Event))
DELETE {?s ns1:cp_date ?o}
g.add((investigation, sem.hasBeginTimeStamp, investigation_start))
INSERT{?s ns1:cp_date ?o2}
g.add((investigation, sem.hasEndTimeStamp, investigation_end))
WHERE{
g.add((investigation, tl.duration, investigation_days))
  ?s ns1:cp_date ?o .
g.add((investigation, dbp.president, president_underscore))
  BIND (STRDT(STR(?o), xsd:integer) AS ?o2)
g.add((investigation, sem.hasSubEvent, investigation_result))
}


g.add((investigation_result, ex.resultType, type))
g.add((investigation_result, ex.objectOfInvestigation, name_underscore))
g.add((investigation_result, ex.isAmerican, american))
g.add((investigation_result, ex.indictmentDuration, indictment_days))
g.add((investigation_result, ex.caseSolved, cp_date))
g.add((investigation_result, ex.daysBeforeCaseSolved, cp_days))
g.add((investigation_result, ex.overturned, overturned))
g.add((investigation_result, ex.pardoned, pardoned))
g.serialize("output.ttl",format="ttl")
</syntaxhighlight>
</syntaxhighlight>


==RDFS==
=SPARQL Programming (Lab 5)=
 
<syntaxhighlight>
<syntaxhighlight>
from rdflib.namespace import RDF, FOAF, XSD, RDFS
from rdflib import OWL, Graph, Namespace, URIRef, Literal, BNode
from rdflib.namespace import RDF, RDFS, XSD, OWL
import owlrl


ex = Namespace("http://example.org/")
from rdflib import Graph, Namespace, RDF, FOAF
dbr = Namespace("http://dbpedia.org/resource/")
from SPARQLWrapper import SPARQLWrapper, JSON, POST, GET, TURTLE
dbp = Namespace("https://dbpedia.org/property/")
dbpage = Namespace("https://dbpedia.org/page/")
sem = Namespace("http://semanticweb.cs.vu.nl/2009/11/sem/")
tl = Namespace("http://purl.org/NET/c4dm/timeline.owl#")


g = Graph()
g = Graph()
g.bind("ex", ex)
g.parse("Russia_investigation_kg.ttl")
g.bind("dbr", dbr)
 
g.bind("dbp", dbp)
# ----- RDFLIB -----
g.bind("dbpage", dbpage)
ex = Namespace('http://example.org#')
g.bind("sem", sem)
 
g.bind("tl", tl)
NS = {
    '': ex,
    'rdf': RDF,
    'foaf': FOAF,
}
 
# Print out a list of all the predicates used in your graph.
task1 = g.query("""
SELECT DISTINCT ?p WHERE{
    ?s ?p ?o .
}
""", initNs=NS)
 
print(list(task1))
 
# Print out a sorted list of all the presidents represented in your graph.
task2 = g.query("""
SELECT DISTINCT ?president WHERE{
    ?s :president ?president .
}
ORDER BY ?president
""", initNs=NS)
 
print(list(task2))
 
# Create dictionary (Python dict) with all the represented presidents as keys. For each key, the value is a list of names of people indicted under that president.
task3_dic = {}
 
task3 = g.query("""
SELECT ?president ?person WHERE{
    ?s :president ?president;
      :name ?person;
      :outcome :indictment.
}
""", initNs=NS)
 
for president, person in task3:
    if president not in task3_dic:
        task3_dic[president] = [person]
    else:
        task3_dic[president].append(person)
 
print(task3_dic)
 
# Use an ASK query to investigate whether Donald Trump has pardoned more than 5 people.
 
# This task is a lot trickier than it needs to be. As far as I'm aware RDFLib has no HAVING support, so a query like this:
task4 = g.query("""
ASK {
  SELECT (COUNT(?s) as ?count) WHERE{
    ?s :pardoned :true;
    :president :Bill_Clinton  .
    }
    HAVING (?count > 5)
}
""", initNs=NS)
 
print(task4.askAnswer)
 
# Which works fine in Blazegraph and is a valid SPARQL query will always provide false in RDFLib cause it uses HAVING.
# Instead you have to use a nested SELECT query like below, where you use FILTER instead of HAVING. Donald Trump has no pardons,
# so I have instead chosen Bill Clinton with 13 to check if the query works.
 
task4 = g.query("""
    ASK{
        SELECT ?count WHERE{{
          SELECT (COUNT(?s) as ?count) WHERE{
            ?s :pardoned :true;
                  :president :Bill_Clinton  .
                }}
        FILTER (?count > 5)
        }
    }
""", initNs=NS)


g.parse(location="exampleTTL.ttl", format="turtle")
print(task4.askAnswer)


# University of California and University of Valencia are both Universities.
# Use a DESCRIBE query to create a new graph with information about Donald Trump. Print out the graph in Turtle format.
g.add((ex.University_of_California, RDF.type, ex.University))
g.add((ex.University_of_Valencia, RDF.type, ex.University))
# All universities are higher education institutions (HEIs).
g.add((ex.University, RDFS.subClassOf, ex.Higher_education))
# Only persons can have an expertise, and what they have expertise in is always a subject.
g.add((ex.expertise, RDFS.domain, FOAF.Person))
g.add((ex.expertise, RDFS.range, ex.subject))
# Only persons can graduate from a HEI.  
g.add((ex.graduatedFromHEI, RDFS.domain, FOAF.Person))
g.add((ex.graduatedFromHEI, RDFS.range, ex.Higher_education))
# If you are a student, you are in fact a person as well.
g.add((ex.Student, RDFS.subClassOf, FOAF.Person))
# That a person is married to someone, means that they know them.
g.add((ex.married, RDFS.subPropertyOf, FOAF.knows))
# Finally, if a person has a name, that name is also the label of that entity."
g.add((FOAF.name, RDFS.subPropertyOf, RDFS.label))


# Having a degree from a HEI means that you have also graduated from that HEI.
# By all accounts, it seems DESCRIBE querires are yet to be implemented in RDFLib, but they are attempting to implement it:
g.add((ex.graduatedFromHEI, RDFS.subPropertyOf, ex.degree))
# https://github.com/RDFLib/rdflib/pull/2221 <--- Issue and proposed solution rasied
# That a city is a capital of a country means that this city is located in that country.
# https://github.com/RDFLib/rdflib/commit/2325b4a81724c1ccee3a131067db4fbf9b4e2629 <--- Solution commited to RDFLib
g.add((ex.capital, RDFS.domain, ex.Country))
# This solution does not work. However, this proposed solution should work if DESCRIBE is implemented in RDFLib
g.add((ex.capital, RDFS.range, ex.City))
g.add((ex.capital, RDFS.subPropertyOf, ex.hasLocation))
# That someone was involved in a meeting, means that they have met the other participants.  
    # This question was bad for the RDFS lab because we need OWL
# If someone partook in a meeting somewhere, means that they have visited that place"
    # This question was bad for the RDFS lab because we need OWL


rdfs = owlrl.OWLRL.OWLRL_Semantics(g, False, False, False)
# task5 = g.query("""
rdfs.closure()
# DESCRIBE :Donald_Trump
rdfs.flush_stored_triples()
# """, initNs=NS)
g.serialize("output.ttl",format="ttl")
</syntaxhighlight>


=More examples from past semesters that may be useful=
# print(task5.serialize())


# ----- SPARQLWrapper -----


===Printing the triples of the Graph in a readable way===
SERVER = 'http://localhost:7200' #Might need to replace this
<syntaxhighlight>
REPOSITORY = 'Labs' #Replace with your repository name
# The turtle format has the purpose of being more readable for humans.
print(g.serialize(format="turtle"))
</syntaxhighlight>


===Coding Tasks Lab 1===
# Query Endpoint
<syntaxhighlight>
sparql = SPARQLWrapper(f'{SERVER}/repositories/{REPOSITORY}')
from rdflib import Graph, Namespace, URIRef, BNode, Literal
# Update Endpoint
from rdflib.namespace import RDF, FOAF, XSD
sparqlUpdate = SPARQLWrapper(f'{SERVER}/repositories/{REPOSITORY}/statements')


g = Graph()
# Ask whether there was an ongoing indictment on the date 1990-01-01.
ex = Namespace("http://example.org/")
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    ASK {
        SELECT ?end ?start
        WHERE{
            ?s ns1:investigation_end ?end;
              ns1:investigation_start ?start;
              ns1:outcome ns1:indictment.
            FILTER(?start <= "1990-01-01"^^xsd:date && ?end >= "1990-01-01"^^xsd:date)
    }
    }
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print(f"Are there any investigation on the 1990-01-01: {results['boolean']}")


g.add((ex.Cade, ex.married, ex.Mary))
# List ongoing indictments on that date 1990-01-01.
g.add((ex.France, ex.capital, ex.Paris))
sparql.setQuery("""
g.add((ex.Cade, ex.age, Literal("27", datatype=XSD.integer)))
    PREFIX ns1: <http://example.org#>
g.add((ex.Mary, ex.age, Literal("26", datatype=XSD.integer)))
    SELECT ?s
g.add((ex.Mary, ex.interest, ex.Hiking))
    WHERE{
g.add((ex.Mary, ex.interest, ex.Chocolate))
        ?s ns1:investigation_end ?end;
g.add((ex.Mary, ex.interest, ex.Biology))
          ns1:investigation_start ?start;
g.add((ex.Mary, RDF.type, ex.Student))
          ns1:outcome ns1:indictment.
g.add((ex.Paris, RDF.type, ex.City))
        FILTER(?start <= "1990-01-01"^^xsd:date && ?end >= "1990-01-01"^^xsd:date)  
g.add((ex.Paris, ex.locatedIn, ex.France))
    }
g.add((ex.Cade, ex.characteristic, ex.Kind))
""")
g.add((ex.Mary, ex.characteristic, ex.Kind))
g.add((ex.Mary, RDF.type, FOAF.Person))
g.add((ex.Cade, RDF.type, FOAF.Person))


# OR
sparql.setReturnFormat(JSON)
results = sparql.query().convert()


g = Graph()
print("The ongoing investigations on the 1990-01-01 are:")
for result in results["results"]["bindings"]:
    print(result["s"]["value"])


ex = Namespace('http://example.org/')
# Describe investigation number 100 (muellerkg:investigation_100).
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    DESCRIBE ns1:investigation_100
""")


g.add((ex.Cade, FOAF.name, Literal("Cade", datatype=XSD.string)))
sparql.setReturnFormat(TURTLE)
g.add((ex.Mary, FOAF.name, Literal("Mary", datatype=XSD.string)))
results = sparql.query().convert()
g.add((ex.Cade, RDF.type, FOAF.Person))
g.add((ex.Mary, RDF.type, FOAF.Person))
g.add((ex.Mary, RDF.type, ex.Student))
g.add((ex.Cade, ex.Married, ex.Mary))
g.add((ex.Cade, FOAF.age, Literal('27', datatype=XSD.int)))
g.add((ex.Mary, FOAF.age, Literal('26', datatype=XSD.int)))
g.add((ex.Paris, RDF.type, ex.City))
g.add((ex.France, ex.Capital, ex.Paris))
g.add((ex.Mary, FOAF.interest, ex.hiking))
g.add((ex.Mary, FOAF.interest, ex.Chocolate))
g.add((ex.Mary, FOAF.interest, ex.biology))
g.add((ex.France, ex.City, ex.Paris))
g.add((ex.Mary, ex.characteristic, ex.kind))
g.add((ex.Cade, ex.characteristic, ex.kind))
g.add((ex.France, RDF.type, ex.Country))


print(results)


print(g.serialize(format="turtle"))
# Print out a list of all the types used in your graph.
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>


</syntaxhighlight>
    SELECT DISTINCT ?types
    WHERE{
        ?s rdf:type ?types .
    }
""")
 
sparql.setReturnFormat(JSON)
results = sparql.query().convert()


==Basic RDF programming==
rdf_Types = []


===Different ways to create an address===
for result in results["results"]["bindings"]:
    rdf_Types.append(result["types"]["value"])


<syntaxhighlight>
print(rdf_Types)


from rdflib import Graph, Namespace, URIRef, BNode, Literal
# Update the graph to that every resource that is an object in a muellerkg:investigation triple has the rdf:type muellerkg:Investigation.
from rdflib.namespace import RDF, FOAF, XSD
update_str = """
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>


g = Graph()
    INSERT{
ex = Namespace("http://example.org/")
        ?invest rdf:type ns1:Investigation .
    }
    WHERE{
        ?s ns1:investigation ?invest .
}"""


sparqlUpdate.setQuery(update_str)
sparqlUpdate.setMethod(POST)
sparqlUpdate.query()


# How to represent the address of Cade Tracey. From probably the worst solution to the best.
#To Test
sparql.setQuery("""
    prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
    PREFIX ns1: <http://example.org#>


# Solution 1 -
    ASK{
# Make the entire address into one Literal. However, Generally we want to separate each part of an address into their own triples. This is useful for instance if we want to find only the streets where people live.  
        ns1:watergate rdf:type ns1:Investigation.
    }
""")


g.add((ex.Cade_Tracey, ex.livesIn, Literal("1516_Henry_Street, Berkeley, California 94709, USA")))
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print(results['boolean'])


# Update the graph to that every resource that is an object in a muellerkg:person triple has the rdf:type muellerkg:IndictedPerson.
update_str = """
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>


# Solution 2 -
    INSERT{
# Seperate the different pieces information into their own triples
        ?person rdf:type ns1:IndictedPerson .
    }
    WHERE{
        ?s ns1:name ?person .
}"""


g.add((ex.Cade_tracey, ex.street, Literal("1516_Henry_Street")))
sparqlUpdate.setQuery(update_str)
g.add((ex.Cade_tracey, ex.city, Literal("Berkeley")))
sparqlUpdate.setMethod(POST)
g.add((ex.Cade_tracey, ex.state, Literal("California")))
sparqlUpdate.query()
g.add((ex.Cade_tracey, ex.zipcode, Literal("94709")))
g.add((ex.Cade_tracey, ex.country, Literal("USA")))


#To test, run the query in the above task, replacing the ask query with e.g. ns1:Deborah_Gore_Dean rdf:type ns1:IndictedPerson


# Solution 3 - Some parts of the addresses can make more sense to be resources than Literals.
# Update the graph so all the investigation nodes (such as muellerkg:watergate) become the subject in a dc:title triple with the corresponding string (watergate) as the literal.
# Larger concepts like a city or state are typically represented as resources rather than Literals, but this is not necesarilly a requirement in the case that you don't intend to say more about them.  
update_str = """
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
    PREFIX dc: <http://purl.org/dc/elements/1.1/>


g.add((ex.Cade_tracey, ex.street, Literal("1516_Henry_Street")))
    INSERT{
g.add((ex.Cade_tracey, ex.city, ex.Berkeley))
        ?invest dc:title ?investString.
g.add((ex.Cade_tracey, ex.state, ex.California))
    }
g.add((ex.Cade_tracey, ex.zipcode, Literal("94709")))
    WHERE{
g.add((ex.Cade_tracey, ex.country, ex.USA))
        ?s ns1:investigation ?invest .
        BIND (replace(str(?invest), str(ns1:), "") AS ?investString)
}"""


sparqlUpdate.setQuery(update_str)
sparqlUpdate.setMethod(POST)
sparqlUpdate.query()


# Solution 4
#Same test as above, replace it with e.g. ns1:watergate dc:title "watergate"
# Grouping of the information into an Address. We can Represent the address concept with its own URI OR with a Blank Node.
# One advantage of this is that we can easily remove the entire address, instead of removing each individual part of the address.
# Solution 4 or 5 is how I would recommend to make addresses. Here, ex.CadeAddress could also be called something like ex.address1 or so on, if you want to give each address a unique ID.  


# Address URI - CadeAdress
# Print out a sorted list of all the indicted persons represented in your graph.
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    PREFIX foaf: <http://xmlns.com/foaf/0.1/>


g.add((ex.Cade_Tracey, ex.address, ex.CadeAddress))
    SELECT ?name
g.add((ex.CadeAddress, RDF.type, ex.Address))
    WHERE{
g.add((ex.CadeAddress, ex.street, Literal("1516 Henry Street")))
    ?s  ns1:name ?name;
g.add((ex.CadeAddress, ex.city, ex.Berkeley))
            ns1:outcome ns1:indictment.
g.add((ex.CadeAddress, ex.state, ex.California))
    }
g.add((ex.CadeAddress, ex.postalCode, Literal("94709")))
    ORDER BY ?name
g.add((ex.CadeAddress, ex.country, ex.USA))
""")


# OR
sparql.setReturnFormat(JSON)
results = sparql.query().convert()


# Blank node for Address. 
names = []
address = BNode()
g.add((ex.Cade_Tracey, ex.address, address))
g.add((address, RDF.type, ex.Address))
g.add((address, ex.street, Literal("1516 Henry Street", datatype=XSD.string)))
g.add((address, ex.city, ex.Berkeley))
g.add((address, ex.state, ex.California))
g.add((address, ex.postalCode, Literal("94709", datatype=XSD.string)))
g.add((address, ex.country, ex.USA))


for result in results["results"]["bindings"]:
    names.append(result["name"]["value"])


# Solution 5 using existing vocabularies for address
print(names)


# (in this case https://schema.org/PostalAddress from schema.org).
# Print out the minimum, average and maximum indictment days for all the indictments in the graph.
# Also using existing ontology for places like California. (like http://dbpedia.org/resource/California from dbpedia.org)


schema = Namespace("https://schema.org/")
sparql.setQuery("""
dbp = Namespace("https://dpbedia.org/resource/")
    prefix xsd: <http://www.w3.org/2001/XMLSchema#>
    PREFIX ns1: <http://example.org#>


g.add((ex.Cade_Tracey, schema.address, ex.CadeAddress))
    SELECT (AVG(?daysRemoved) as ?avg) (MAX(?daysRemoved) as ?max) (MIN(?daysRemoved) as ?min) WHERE{
g.add((ex.CadeAddress, RDF.type, schema.PostalAddress))
        ?s  ns1:indictment_days ?days;
g.add((ex.CadeAddress, schema.streetAddress, Literal("1516 Henry Street")))
            ns1:outcome ns1:indictment.
g.add((ex.CadeAddress, schema.addresCity, dbp.Berkeley))
   
g.add((ex.CadeAddress, schema.addressRegion, dbp.California))
    BIND (replace(str(?days), str(ns1:), "") AS ?daysR)
g.add((ex.CadeAddress, schema.postalCode, Literal("94709")))
    BIND (STRDT(STR(?daysR), xsd:float) AS ?daysRemoved)
g.add((ex.CadeAddress, schema.addressCountry, dbp.United_States))
}
""")


</syntaxhighlight>
sparql.setReturnFormat(JSON)
results = sparql.query().convert()


===Typed Literals===
for result in results["results"]["bindings"]:
<syntaxhighlight>
    print(f'The longest an investigation lasted was: {result["max"]["value"]}')
from rdflib import Graph, Literal, Namespace
    print(f'The shortest an investigation lasted was: {result["min"]["value"]}')
from rdflib.namespace import XSD
    print(f'The average investigation lasted: {result["avg"]["value"]}')
g = Graph()
ex = Namespace("http://example.org/")


g.add((ex.Cade, ex.age, Literal(27, datatype=XSD.integer)))
# Print out the minimum, average and maximum indictment days for all the indictments in the graph per investigation.
g.add((ex.Cade, ex.gpa, Literal(3.3, datatype=XSD.float)))
g.add((ex.Cade, FOAF.name, Literal("Cade Tracey", datatype=XSD.string)))
g.add((ex.Cade, ex.birthday, Literal("2006-01-01", datatype=XSD.date)))
</syntaxhighlight>


sparql.setQuery("""
    prefix xsd: <http://www.w3.org/2001/XMLSchema#>
    PREFIX ns1: <http://example.org#>


===Writing and reading graphs/files===
    SELECT ?investigation (AVG(?daysRemoved) as ?avg) (MAX(?daysRemoved) as ?max) (MIN(?daysRemoved) as ?min)  WHERE{
    ?s  ns1:indictment_days ?days;
        ns1:outcome ns1:indictment;
        ns1:investigation ?investigation.
   
    BIND (replace(str(?days), str(ns1:), "")  AS ?daysR)
    BIND (STRDT(STR(?daysR), xsd:float) AS ?daysRemoved)
    }
    GROUP BY ?investigation
""")


<syntaxhighlight>
sparql.setReturnFormat(JSON)
  # Writing the graph to a file on your system. Possible formats = turtle, n3, xml, nt.
results = sparql.query().convert()
g.serialize(destination="triples.txt", format="turtle")


  # Parsing a local file
for result in results["results"]["bindings"]:
parsed_graph = g.parse(location="triples.txt", format="turtle")
    print(f'{result["investigation"]["value"]} - min: {result["min"]["value"]}, max: {result["max"]["value"]}, avg: {result["avg"]["value"]}')


  # Parsing a remote endpoint like Dbpedia
dbpedia_graph = g.parse("http://dbpedia.org/resource/Pluto")
</syntaxhighlight>
</syntaxhighlight>


===Graph Binding===
=Wikidata SPARQL (Lab 6)=
<syntaxhighlight>
===Use a DESCRIBE query to retrieve some triples about your entity===
#Graph Binding is useful for at least two reasons:
#(1) We no longer need to specify prefixes with SPARQL queries if they are already binded to the graph.
#(2) When serializing the graph, the serialization will show the correct expected prefix
# instead of default namespace names ns1, ns2 etc.


g = Graph()
<syntaxhighlight lang="SPARQL">
DESCRIBE wd:Q42 LIMIT 100
</syntaxhighlight>


ex = Namespace("http://example.org/")
===Use a SELECT query to retrieve the first 100 triples about your entity===
dbp = Namespace("http://dbpedia.org/resource/")
schema = Namespace("https://schema.org/")


g.bind("ex", ex)
<syntaxhighlight lang="SPARQL">
g.bind("dbp", dbp)
SELECT * WHERE {
g.bind("schema", schema)
  wd:Q42 ?p ?o .
} LIMIT 100
</syntaxhighlight>
</syntaxhighlight>


===Collection Example===
===Write a local SELECT query that embeds a SERVICE query to retrieve the first 100 triples about your entity to your local machine===


<syntaxhighlight>
<syntaxhighlight lang="SPARQL">
from rdflib import Graph, Namespace
PREFIX wd: <http://www.wikidata.org/entity/>
from rdflib.collection import Collection
 
SELECT * WHERE {
    SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> {
        SELECT * WHERE {
            wd:Q42 ?p ?o .
        } LIMIT 100
    }
}
</syntaxhighlight>


===Change the SELECT query to an INSERT query that adds the Wikidata triples your local repository===


# Sometimes we want to add many objects or subjects for the same predicate at once.
<syntaxhighlight lang="SPARQL">
# In these cases we can use Collection() to save some time.
PREFIX wd: <http://www.wikidata.org/entity/>
# In this case I want to add all countries that Emma has visited at once.


b = BNode()
INSERT {
g.add((ex.Emma, ex.visit, b))
    wd:Q42 ?p ?o .
Collection(g, b,
} WHERE {
     [ex.Portugal, ex.Italy, ex.France, ex.Germany, ex.Denmark, ex.Sweden])
     SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> {
        SELECT * WHERE {
            wd:Q42 ?p ?o .
        } LIMIT 100
    }
}
</syntaxhighlight>


# OR
===Use a FILTER statement to only SELECT primary triples in this sense.===


g.add((ex.Emma, ex.visit, ex.EmmaVisits))
<syntaxhighlight lang="SPARQL">
Collection(g, ex.EmmaVisits,
PREFIX wd: <http://www.wikidata.org/entity/>
    [ex.Portugal, ex.Italy, ex.France, ex.Germany, ex.Denmark, ex.Sweden])


SELECT * WHERE {
    wd:Q42 ?p ?o .
    FILTER (STRSTARTS(STR(?p), STR(wdt:)))
    FILTER (STRSTARTS(STR(?o), STR(wd:)))
} LIMIT 100
</syntaxhighlight>
</syntaxhighlight>


==SPARQL==
===Use Wikidata's in-built SERVICE wikibase:label to get labels for all the object resources===


Also see the [[SPARQL Examples]] page!
<syntaxhighlight lang="SPARQL">
PREFIX wd: <http://www.wikidata.org/entity/>


===Querying a local ("in memory") graph===
SELECT ?p ?oLabel WHERE {
    wd:Q42 ?p ?o .
    FILTER (STRSTARTS(STR(?p), STR(wdt:)))
    FILTER (STRSTARTS(STR(?o), STR(wd:)))
    SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
} LIMIT 100
</syntaxhighlight>


Example contents of the file family.ttl:
===Edit your query (by relaxing the FILTER expression) so it also returns triples where the object has DATATYPE xsd:string.===
@prefix rex: <http://example.org/royal#> .
 
@prefix fam: <http://example.org/family#> .
<syntaxhighlight lang="SPARQL">
PREFIX wd: <http://www.wikidata.org/entity/>
 
SELECT ?p ?oLabel ?o WHERE {
    wd:Q42 ?p ?o .
   
   
rex:IngridAlexandra fam:hasParent rex:HaakonMagnus .
    FILTER (STRSTARTS(STR(?p), STR(wdt:)))
rex:SverreMagnus fam:hasParent rex:HaakonMagnus .
    FILTER (
rex:HaakonMagnus fam:hasParent rex:Harald .
      STRSTARTS(STR(?o), STR(wd:)) || # comment out this whole line to see only string literals!
  rex:MarthaLouise fam:hasParent rex:Harald .
      DATATYPE(?o) = xsd:string
rex:HaakonMagnus fam:hasSister rex:MarthaLouise .
    )
 
import rdflib
   
   
g = rdflib.Graph()
    SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
g.parse("family.ttl", format='ttl')
   
   
qres = g.query("""
} LIMIT 100
PREFIX fam: <http://example.org/family#>
</syntaxhighlight>
    SELECT ?child ?sister WHERE {
 
        ?child fam:hasParent ?parent .
===Relax the FILTER expression again so it also returns triples with these three predicates (rdfs:label, skos:altLabel and schema:description) ===
        ?parent fam:hasSister ?sister .
 
    }""")
<syntaxhighlight lang="SPARQL">
for row in qres:
PREFIX wd: <http://www.wikidata.org/entity/>
    print("%s has aunt %s" % row)


With a prepared query, you can write the query once, and then bind some of the variables each time you use it:
SELECT ?p ?oLabel ?o WHERE {
import rdflib
    wd:Q42 ?p ?o .
   
   
  g = rdflib.Graph()
    FILTER (
g.parse("family.ttl", format='ttl')
      (STRSTARTS(STR(?p), STR(wdt:)) && # comment out these three lines to see only fingerprint literals!
      STRSTARTS(STR(?o), STR(wd:)) || DATATYPE(?o) = xsd:string)
      ||
      (?p IN (rdfs:label, skos:altLabel, schema:description) &&
      DATATYPE(?o) = rdf:langString && LANG(?o) = "en")
    )
   
   
q = rdflib.plugins.sparql.prepareQuery(
    SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
        """SELECT ?child ?sister WHERE {
                  ?child fam:hasParent ?parent .
                  ?parent fam:hasSister ?sister .
        }""",
        initNs = { "fam": "http://example.org/family#"})
   
   
sm = rdflib.URIRef("http://example.org/royal#SverreMagnus")
} LIMIT 100
</syntaxhighlight>
for row in g.query(q, initBindings={'child': sm}):
 
        print(row)
===Try to restrict the FILTER expression again so that, when the predicate is rdfs:label, skos:altLabel and schema:description, the object must have LANG "en" ===
 
<syntaxhighlight lang="SPARQL">
PREFIX wikibase: <http://wikiba.se/ontology#>
PREFIX bd: <http://www.bigdata.com/rdf#>
PREFIX wd: <http://www.wikidata.org/entity/>
PREFIX wdt: <http://www.wikidata.org/prop/direct/>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX schema: <http://schema.org/>
 
SELECT * WHERE {
  SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> {
    SELECT ?p ?oLabel ?o WHERE {
        wd:Q42 ?p ?o .
 
        FILTER (
          (STRSTARTS(STR(?p), STR(wdt:)) &&
          STRSTARTS(STR(?o), STR(wd:)) || DATATYPE(?o) = xsd:string)
          ||
          (?p IN (rdfs:label, skos:altLabel, schema:description) &&
          DATATYPE(?o) = rdf:langString && LANG(?o) = "en")
        )
 
        SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }


===Select all contents of lists (rdfllib.Collection)===
    } LIMIT 100
<syntaxhighlight>
  }
}
</syntaxhighlight>


# rdflib.Collection has a different interntal structure so it requires a slightly more advance query. Here I am selecting all places that Emma has visited.
===Change the SELECT query to an INSERT query that adds the Wikidata triples your local repository ===


PREFIX ex:   <http://example.org/>
<syntaxhighlight lang="SPARQL">
PREFIX wikibase: <http://wikiba.se/ontology#>
PREFIX bd: <http://www.bigdata.com/rdf#>
PREFIX wd: <http://www.wikidata.org/entity/>
PREFIX wdt: <http://www.wikidata.org/prop/direct/>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX schema: <http://schema.org/>
INSERT {
  wd:Q42 ?p ?o .
  ?o rdfs:label ?oLabel .
} WHERE {
  SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> {
    SELECT ?p ?oLabel ?o WHERE {
        wd:Q42 ?p ?o .
        FILTER (
          (STRSTARTS(STR(?p), STR(wdt:)) &&
          STRSTARTS(STR(?o), STR(wd:)) || DATATYPE(?o) = xsd:string)
          ||
          (?p IN (rdfs:label, skos:altLabel, schema:description) &&
          DATATYPE(?o) = rdf:langString && LANG(?o) = "en")
        )
        SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }


SELECT ?visit
    } LIMIT 500
WHERE {
   }
   ex:Emma ex:visit/rdf:rest*/rdf:first ?visit
}
}
</syntaxhighlight>
</syntaxhighlight>


==If you have more time ==
===You must therefore REPLACE all wdt: prefixes of properties with wd: prefixes and BIND the new URI AS a new variable, for example ?pw. ===


===Using parameters/variables in rdflib queries===
<syntaxhighlight lang="SPARQL">
PREFIX wd: <http://www.wikidata.org/entity/>


<syntaxhighlight>
SELECT ?pwLabel ?oLabel WHERE {
from rdflib import Graph, Namespace, URIRef
    wd:Q42 ?p ?o .
from rdflib.plugins.sparql import prepareQuery
    FILTER (STRSTARTS(STR(?p), STR(wdt:)))
    FILTER (STRSTARTS(STR(?o), STR(wd:)))
    BIND (IRI(REPLACE(STR(?p), STR(wdt:), STR(wd:))) AS ?pw)


g = Graph()
    SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
ex = Namespace("http://example.org/")
g.bind("ex", ex)
} LIMIT 100
</syntaxhighlight>


g.add((ex.Cade, ex.livesIn, ex.France))
===Now you can go back to the SELECT statement that returned primary triples with only resource objects (not literal objects or fingerprints). Extend it so it also includes primary triples "one step out", i.e., triples where the subjects are objects of triples involving your reference entity. ===
g.add((ex.Anne, ex.livesIn, ex.Norway))
g.add((ex.Sofie, ex.livesIn, ex.Sweden))
g.add((ex.Per, ex.livesIn, ex.Norway))
g.add((ex.John, ex.livesIn, ex.USA))


<syntaxhighlight lang="SPARQL">
PREFIX wikibase: <http://wikiba.se/ontology#>
PREFIX bd: <http://www.bigdata.com/rdf#>
PREFIX wd: <http://www.wikidata.org/entity/>
PREFIX wdt: <http://www.wikidata.org/prop/direct/>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX schema: <http://schema.org/>


def find_people_from_country(country):
INSERT {
        country = URIRef(ex + country)
  wd:Q42 ?p1 ?o1 .
        q = prepareQuery(
  ?o1 rdfs:label ?o1Label .
        """
  ?o1 ?p2 ?o2 .
        PREFIX ex: <http://example.org/>
  ?o2 rdfs:label ?o2Label .
        SELECT ?person WHERE {  
} WHERE {
        ?person ex:livesIn ?country.
  SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> {
        }
    SELECT ?p1 ?o1Label ?o1 ?p2 ?o2Label ?o2 WHERE {
        """)
        wd:Q42 ?p1 ?o1 .
        ?o1 ?p2 ?o2 .


         capital_result = g.query(q, initBindings={'country': country})
         FILTER (
          STRSTARTS(STR(?p1), STR(wdt:)) &&
          STRSTARTS(STR(?o1), STR(wd:)) &&
          STRSTARTS(STR(?p2), STR(wdt:)) &&
          STRSTARTS(STR(?o2), STR(wd:))
        )


         for row in capital_result:
         SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
            print(row)


find_people_from_country("Norway")
    } LIMIT 500
  }
}
</syntaxhighlight>
</syntaxhighlight>


===SELECTING data from Blazegraph via Python===
=CSV to RDF (Lab 7)=
<syntaxhighlight>


from SPARQLWrapper import SPARQLWrapper, JSON
<syntaxhighlight lang="Python">


# This creates a server connection to the same URL that contains the graphic interface for Blazegraph.
#Imports
# You also need to add "sparql" to end of the URL like below.
import re
from pandas import *
from numpy import nan
from rdflib import Graph, Namespace, URIRef, Literal, RDF, XSD, FOAF
from spotlight import SpotlightException, annotate


sparql = SPARQLWrapper("http://localhost:9999/blazegraph/sparql")
SERVER = "https://api.dbpedia-spotlight.org/en/annotate"
# Test around with the confidence, and see how many names changes depending on the confidence.
# However, be aware that anything lower than this (0.83) it will replace James W. McCord and other names that includes James with LeBron James
CONFIDENCE = 0.83


# SELECT all triples in the database.
# This function uses DBpedia Spotlight, which was not a part of the CSV lab this year.
def annotate_entity(entity, filters={'types': 'DBpedia:Person'}):
annotations = []
try:
annotations = annotate(address=SERVER, text=entity, confidence=CONFIDENCE, filters=filters)
except SpotlightException as e:
print(e)
return annotations


sparql.setQuery("""
g = Graph()
    SELECT DISTINCT ?p WHERE {
ex = Namespace("http://example.org/")
    ?s ?p ?o.
g.bind("ex", ex)
    }
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()


for result in results["results"]["bindings"]:
#Pandas' read_csv function to load russia-investigation.csv
    print(result["p"]["value"])
df = read_csv("russia-investigation.csv")
#Replaces all instances of nan to None type with numpy's nan
df = df.replace(nan, None)


# SELECT all interests of Cade
#Function that prepares the values to be added to the graph as a URI (ex infront) or Literal
 
def prepareValue(row):
sparql.setQuery("""
if row == None: #none type
    PREFIX ex: <http://example.org/>
value = Literal(row)
    SELECT DISTINCT ?interest WHERE {
elif isinstance(row, str) and re.match(r'\d{4}-\d{2}-\d{2}', row): #date
    ex:Cade ex:interest ?interest.
value = Literal(row, datatype=XSD.date)
    }
elif isinstance(row, bool): #boolean value (true / false)
""")
value = Literal(row, datatype=XSD.boolean)
sparql.setReturnFormat(JSON)
elif isinstance(row, int): #integer
results = sparql.query().convert()
value = Literal(row, datatype=XSD.integer)
elif isinstance(row, str): #string
value = URIRef(ex + row.replace('"', '').replace(" ", "_").replace(",","").replace("-", "_"))
elif isinstance(row, float): #float
value = Literal(row, datatype=XSD.float)


for result in results["results"]["bindings"]:
return value
    print(result["interest"]["value"])
</syntaxhighlight>


===Updating data from Blazegraph via Python===
#Convert the non-semantic CSV dataset into a semantic RDF
<syntaxhighlight>
def csv_to_rdf(df):
from SPARQLWrapper import SPARQLWrapper, POST, DIGEST
for index, row in df.iterrows():
id = URIRef(ex + "Investigation_" + str(index))
investigation = prepareValue(row["investigation"])
investigation_start = prepareValue(row["investigation-start"])
investigation_end = prepareValue(row["investigation-end"])
investigation_days = prepareValue(row["investigation-days"])
indictment_days = prepareValue(row["indictment-days "])
cp_date = prepareValue(row["cp-date"])
cp_days = prepareValue(row["cp-days"])
overturned = prepareValue(row["overturned"])
pardoned = prepareValue(row["pardoned"])
american = prepareValue(row["american"])
outcome = prepareValue(row["type"])
name_ex = prepareValue(row["name"])
president_ex = prepareValue(row["president"])


namespace = "kb"
#Spotlight Search
sparql = SPARQLWrapper("http://localhost:9999/blazegraph/namespace/"+ namespace + "/sparql")
name = annotate_entity(str(row['name']))
president = annotate_entity(str(row['president']).replace(".", ""))
#Adds the tripples to the graph
g.add((id, RDF.type, ex.Investigation))
g.add((id, ex.investigation, investigation))
g.add((id, ex.investigation_start, investigation_start))
g.add((id, ex.investigation_end, investigation_end))
g.add((id, ex.investigation_days, investigation_days))
g.add((id, ex.indictment_days, indictment_days))
g.add((id, ex.cp_date, cp_date))
g.add((id, ex.cp_days, cp_days))
g.add((id, ex.overturned, overturned))
g.add((id, ex.pardoned, pardoned))
g.add((id, ex.american, american))
g.add((id, ex.outcome, outcome))


sparql.setMethod(POST)
#Spotlight search
sparql.setQuery("""
#Name
    PREFIX ex: <http://example.org/>
try:
    INSERT DATA{
g.add((id, ex.person, URIRef(name[0]["URI"])))
    ex:Cade ex:interest ex:Mathematics.
except:
    }
g.add((id, ex.person, name_ex))
""")


results = sparql.query()
#President
print(results.response.read())
try:
g.add((id, ex.president, URIRef(president[0]["URI"])))
except:
g.add((id, ex.president, president_ex))


csv_to_rdf(df)
print(g.serialize())
g.serialize("lab7.ttl", format="ttl")


</syntaxhighlight>
</syntaxhighlight>
===Retrieving data from Wikidata with SparqlWrapper===
<syntaxhighlight>
from SPARQLWrapper import SPARQLWrapper, JSON


sparql = SPARQLWrapper("https://query.wikidata.org/sparql")
=JSON-LD (Lab 8)=
# In the query I want to select all the Vitamins in wikidata.
== Task 1) Basic JSON-LD ==
 
<syntaxhighlight lang="JSON-LD">


sparql.setQuery("""
    SELECT ?nutrient ?nutrientLabel WHERE
{
{
  ?nutrient wdt:P279 wd:Q34956.
    "@context": {
  SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
        "@base": "http://example.org/",
        "edges": "http://example.org/triple",
        "start": "http://example.org/source",
        "rel": "http://exaxmple.org/predicate",
        "end": "http://example.org/object",
        "Person" : "http://example.org/Person",
        "birthday" : {
            "@id" : "http://example.org/birthday",
            "@type" : "xsd:date"
        },
        "nameEng" : {
            "@id" : "http://example.org/en/name",
            "@language" : "en"
        },
        "nameFr" : {
            "@id" : "http://example.org/fr/name",
            "@language" : "fr"
        },
        "nameCh" : {
            "@id" : "http://example.org/ch/name",
            "@language" : "ch"
        },
        "age" : {
            "@id" : "http://example.org/age",
            "@type" : "xsd:int"
        },
        "likes" : "http://example.org/games/likes",
        "haircolor" : "http://example.org/games/haircolor"
    },
    "@graph": [
        {
            "@id": "people/Jeremy",
            "@type": "Person",
            "birthday" : "1987.1.1",
            "nameEng" : "Jeremy",
            "age" : 26
        },
        {
            "@id": "people/Tom",
            "@type": "Person"
        },
        {
            "@id": "people/Ju",
            "@type": "Person",
            "birthday" : "2001.1.1",
            "nameCh" : "Ju",
            "age" : 22,
            "likes" : "bastketball"
        },
        {
            "@id": "people/Louis",
            "@type": "Person",
            "birthday" : "1978.1.1",
            "haircolor" : "Black",
            "nameFr" : "Louis",
            "age" : 45
        },
        {"edges" : [
        {
            "start" : "people/Jeremy",
            "rel" : "knows",
            "end" : "people/Tom"
        },
        {
            "start" : "people/Tom",
            "rel" : "knows",
            "end" : "people/Louis"
        },
        {
            "start" : "people/Louis",
            "rel" : "teaches",
            "end" : "people/Ju"
        },
        {
            "start" : "people/Ju",
            "rel" : "plays",
            "end" : "people/Jeremy"
        },
        {
            "start" : "people/Ju",
            "rel" : "plays",
            "end" : "people/Tom"
        }
        ]}
    ]
}
}
""")


sparql.setReturnFormat(JSON)
results = sparql.query().convert()


for result in results["results"]["bindings"]:
    print(result["nutrient"]["value"], "  ", result["nutrientLabel"]["value"])
</syntaxhighlight>
</syntaxhighlight>


== Task 2 & 3) Retrieving JSON-LD from ConceptNet / Programming JSON-LD in Python ==
<syntaxhighlight lang="Python">


More examples can be found in the example section on the official query service here: https://query.wikidata.org/.
import rdflib


===Download from BlazeGraph===
CN_BASE = 'http://api.conceptnet.io/c/en/'


<syntaxhighlight>
g = rdflib.Graph()
"""
g.parse(CN_BASE+'indictment', format='json-ld')
Dumps a database to a local RDF file.
You need to install the SPARQLWrapper package first...
"""


import datetime
# To download JSON object:
from SPARQLWrapper import SPARQLWrapper, RDFXML


# your namespace, the default is 'kb'
import json
ns = 'kb'
import requests


# the SPARQL endpoint
json_obj = requests.get(CN_BASE+'indictment').json()
endpoint = 'http://info216.i2s.uib.no/bigdata/namespace/' + ns + '/sparql'


# - the endpoint just moved, the old one was:
# To change the @context:
# endpoint = 'http://i2s.uib.no:8888/bigdata/namespace/' + ns + '/sparql'


# create wrapper
context = {
wrapper = SPARQLWrapper(endpoint)
    "@base": "http://ex.org/",
    "edges": "http://ex.org/triple/",
    "start": "http://ex.org/s/",
    "rel": "http://ex.org/p/",
    "end": "http://ex.org/o/",
    "label": "http://ex.org/label"
}
json_obj['@context'] = context
json_str = json.dumps(json_obj)


# prepare the SPARQL update
g = rdflib.Graph()
wrapper.setQuery('CONSTRUCT { ?s ?p ?o } WHERE { ?s ?p ?o }')
g.parse(data=json_str, format='json-ld')
wrapper.setReturnFormat(RDFXML)


# execute the SPARQL update and convert the result to an rdflib.Graph
# To extract triples (here with labels):
graph = wrapper.query().convert()


# the destination file, with code to make it timestamped
r = g.query("""
destfile = 'rdf_dumps/slr-kg4news-' + datetime.datetime.now().strftime('%Y%m%d-%H%M') + '.rdf'
        SELECT ?s ?sLabel ?p ?o ?oLabel WHERE {
            ?edge
                <http://ex.org/s/> ?s ;
                <http://ex.org/p/> ?p ;
                <http://ex.org/o/> ?o .
            ?s <http://ex.org/label> ?sLabel .
            ?o <http://ex.org/label> ?oLabel .
}
        """, initNs={'cn': CN_BASE})
print(r.serialize(format='txt').decode())


# serialize the result to file
# Construct a new graph:
graph.serialize(destination=destfile, format='ttl')
 
r = g.query("""
        CONSTRUCT {
            ?s ?p ?o .
            ?s <http://ex.org/label> ?sLabel .
            ?o <http://ex.org/label> ?oLabel .
        } WHERE {
            ?edge <http://ex.org/s/> ?s ;
                  <http://ex.org/p/> ?p ;
                  <http://ex.org/o/> ?o .
            ?s <http://ex.org/label> ?sLabel .
            ?o <http://ex.org/label> ?oLabel .
}
        """, initNs={'cn': CN_BASE})
 
print(r.graph.serialize(format='ttl'))


# report and quit
print('Wrote %u triples to file %s .' %
      (len(res), destfile))
</syntaxhighlight>
</syntaxhighlight>


===Query Dbpedia with SparqlWrapper===
=SHACL (Lab 9)=


<syntaxhighlight>
<syntaxhighlight lang="Python">
from SPARQLWrapper import SPARQLWrapper, JSON


sparql = SPARQLWrapper("http://dbpedia.org/sparql")
from pyshacl import validate
from rdflib import Graph


sparql.setQuery("""
data_graph = Graph()
    PREFIX dbr: <http://dbpedia.org/resource/>
# parses the Turtle example from the task
    PREFIX dbo: <http://dbpedia.org/ontology/>
data_graph.parse("data_graph.ttl")
    PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
    SELECT ?comment
    WHERE {
    dbr:Barack_Obama rdfs:comment ?comment.
    FILTER (langMatches(lang(?comment),"en"))
    }
""")


sparql.setReturnFormat(JSON)
prefixes = """
results = sparql.query().convert()
@prefix ex: <http://example.org/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
"""


for result in results["results"]["bindings"]:
shape_graph = """
     print(result["comment"]["value"])
ex:PUI_Shape
</syntaxhighlight>
    a sh:NodeShape ;
    sh:targetClass ex:PersonUnderInvestigation ;
     sh:property [
        sh:path foaf:name ;
        sh:minCount 1 ; #Every person under investigation has exactly one name.
        sh:maxCount 1 ; #Every person under investigation has exactly one name.
        sh:datatype rdf:langString ; #All person names must be language-tagged
    ] ;
    sh:property [
        sh:path ex:chargedWith ;
        sh:nodeKind sh:IRI ; #The object of a charged with property must be a URI.
        sh:class ex:Offense ; #The object of a charged with property must be an offense.
    ] .


==Lifting CSV to RDF==
# --- If you have more time tasks ---
ex:User_Shape rdf:type sh:NodeShape;
    sh:targetClass ex:Indictment;
    # The only allowed values for ex:american are true, false or unknown.
    sh:property [
        sh:path ex:american;
        sh:pattern "(true|false|unknown)" ;
    ];
   
    # The value of a property that counts days must be an integer.
    sh:property [
        sh:path ex:indictment_days;
        sh:datatype xsd:integer;
    ]; 
    sh:property [
        sh:path ex:investigation_days;
        sh:datatype xsd:integer;
    ];
   
    # The value of a property that indicates a start date must be xsd:date.
    sh:property [
        sh:path ex:investigation_start;
        sh:datatype xsd:date;
    ];


<syntaxhighlight>
    # The value of a property that indicates an end date must be xsd:date or unknown (tip: you can use sh:or (...) ).
from rdflib import Graph, Literal, Namespace, URIRef
    sh:property [
from rdflib.namespace import RDF, FOAF, RDFS, OWL
        sh:path ex:investigation_end;
import pandas as pd
        sh:or (
        [ sh:datatype xsd:date ]
        [ sh:hasValue "unknown" ]
    )];
   
    # Every indictment must have exactly one FOAF name for the investigated person.
    sh:property [
        sh:path foaf:name;
        sh:minCount 1;
        sh:maxCount 1;
    ];
   
    # Every indictment must have exactly one investigated person property, and that person must have the type ex:PersonUnderInvestigation.
    sh:property [
        sh:path ex:investigatedPerson ;
        sh:minCount 1 ;
        sh:maxCount 1 ;
        sh:class ex:PersonUnderInvestigation ;
        sh:nodeKind sh:IRI ;
    ] ;


g = Graph()
    # No URI-s can contain hyphens ('-').
ex = Namespace("http://example.org/")
    sh:property [
g.bind("ex", ex)
        sh:path ex:outcome ;
        sh:nodeKind sh:IRI ;
        sh:pattern "^[^-]*$" ;
    ] ;


# Load the CSV data as a pandas Dataframe.
    # Presidents must be identified with URIs.
csv_data = pd.read_csv("task1.csv")
    sh:property [
        sh:path ex:president ;
        sh:minCount 1 ;
        sh:class ex:President ;
        sh:nodeKind sh:IRI ;
    ] .
"""


# Here I deal with spaces (" ") in the data. I replace them with "_" so that URI's become valid.
shacl_graph = Graph()
csv_data = csv_data.replace(to_replace=" ", value="_", regex=True)
# parses the contents of a shape_graph you made in the previous task
shacl_graph.parse(data=prefixes+shape_graph)


# Here I mark all missing/empty data as "unknown". This makes it easy to delete triples containing this later.
# uses pySHACL's validate method to apply the shape_graph constraints to the data_graph
csv_data = csv_data.fillna("unknown")
results = validate(
    data_graph,
    shacl_graph=shacl_graph,
    inference='both'
)


# Loop through the CSV data, and then make RDF triples.
# prints out the validation result
for index, row in csv_data.iterrows():
boolean_value, results_graph, results_text = results
    # The names of the people act as subjects.
    subject = row['Name']
    # Create triples: e.g. "Cade_Tracey - age - 27"
    g.add((URIRef(ex + subject), URIRef(ex + "age"), Literal(row["Age"])))
    g.add((URIRef(ex + subject), URIRef(ex + "married"), URIRef(ex + row["Spouse"])))
    g.add((URIRef(ex + subject), URIRef(ex + "country"), URIRef(ex + row["Country"])))


    # If We want can add additional RDF/RDFS/OWL information e.g
# print(boolean_value)
    g.add((URIRef(ex + subject), RDF.type, FOAF.Person))
print(results_graph.serialize(format='ttl'))
# print(results_text)


# I remove triples that I marked as unknown earlier.
#Write a SPARQL query to print out each distinct sh:resultMessage in the results_graph
g.remove((None, None, URIRef("http://example.org/unknown")))
distinct_messages = """
PREFIX sh: <http://www.w3.org/ns/shacl#>


# Clean printing of the graph.
SELECT DISTINCT ?message WHERE {
print(g.serialize(format="turtle").decode())
    [] sh:result / sh:resultMessage ?message .
</syntaxhighlight>
}
"""
messages = results_graph.query(distinct_messages)
for row in messages:
    print(row.message)


===CSV file for above example===
#each sh:resultMessage in the results_graph once, along with the number of times that message has been repeated in the results
count_messages = """
PREFIX sh: <http://www.w3.org/ns/shacl#>


<syntaxhighlight>
SELECT ?message (COUNT(?node) AS ?num_messages) WHERE {
"Name","Age","Spouse","Country"
    [] sh:result ?result .
"Cade Tracey","26","Mary Jackson","US"
    ?result sh:resultMessage ?message ;
"Bob Johnson","21","","Canada"
            sh:focusNode ?node .
"Mary Jackson","25","","France"
}
"Phil Philips","32","Catherine Smith","Japan"
GROUP BY ?message
</syntaxhighlight>
ORDER BY DESC(?count) ?message
"""


messages = results_graph.query(count_messages)
for row in messages:
    print("COUNT    MESSAGE")
    print(row.num_messages, "      ", row.message)


=Coding Tasks Lab 6=
<syntaxhighlight>
import pandas as pd


</syntaxhighlight>


from rdflib import Graph, Namespace, URIRef, Literal, BNode
=RDFS (Lab 10)=
from rdflib.namespace import RDF, XSD


<syntaxhighlight lang="Python">


ex = Namespace("http://example.org/")
import owlrl
sem = Namespace("http://semanticweb.cs.vu.nl/2009/11/sem/")
from rdflib import Graph, RDF, Namespace, Literal, XSD, FOAF, RDFS
from rdflib.collection import Collection


g = Graph()
g = Graph()
ex = Namespace('http://example.org/')
g.bind("ex", ex)
g.bind("ex", ex)
g.bind("sem", sem)
g.bind("foaf", FOAF)
 
 
NS = {
    'ex': ex,
    'rdf': RDF,
    'rdfs': RDFS,
    'foaf': FOAF,
}
 
#Write a small function that computes the RDFS closure on your graph.
def flush():
    engine = owlrl.RDFSClosure.RDFS_Semantics(g, False, False, False)
    engine.closure()
    engine.flush_stored_triples()
 
#Rick Gates was charged with money laundering and tax evasion.
g.add((ex.Rick_Gates, ex.chargedWith, ex.MoneyLaundering))
g.add((ex.Rick_Gates, ex.chargedWith, ex.TaxEvasion))
 
#When one thing that is charged with another thing,
g.add((ex.chargedWith, RDFS.domain, ex.PersonUnderInvestigation))  #the first thing (subject) is a person under investigation and
g.add((ex.chargedWith, RDFS.range, ex.Offense))  #the second thing (object) is an offense.


#Write a SPARQL query that checks the RDF type(s) of Rick Gates and money laundering in your RDF graph.
print(g.query('ASK {ex:Rick_Gates rdf:type ex:PersonUnderInvestigation}', initNs=NS).askAnswer)
print(g.query('ASK {ex:MoneyLaundering rdf:type ex:Offense}', initNs=NS).askAnswer)
flush()
print(g.query('ASK {ex:Rick_Gates rdf:type ex:PersonUnderInvestigation}', initNs=NS).askAnswer)
print(g.query('ASK {ex:MoneyLaundering rdf:type ex:Offense}', initNs=NS).askAnswer)


# Removing unwanted characters
#A person under investigation is a FOAF person
df = pd.read_csv('russia-investigation.csv')
g.add((ex.PersonUnderInvestigation, RDFS.subClassOf, FOAF.Person))
# Here I deal with spaces (" ") in the data. I replace them with "_" so that URI's become valid.
print(g.query('ASK {ex:Rick_Gates rdf:type foaf:Person}', initNs=NS).askAnswer)
df = df.replace(to_replace=" ", value="_", regex=True)
flush()
# This may seem odd, but in the data set we have a name like this:("Scooter"). So we have to remove quotation marks
print(g.query('ASK {ex:Rick_Gates rdf:type foaf:Person}', initNs=NS).askAnswer)
df = df.replace(to_replace=f'"', value="", regex=True)
# # Here I mark all missing/empty data as "unknown". This makes it easy to delete triples containing this later.
df = df.fillna("unknown")


# Loop through the CSV data, and then make RDF triples.
#Paul Manafort was convicted for tax evasion.
for index, row in df.iterrows():
g.add((ex.Paul_Manafort, ex.convictedFor, ex.TaxEvasion))
    name = row['investigation']
#the first thing is also charged with the second thing
    investigation = URIRef(ex + name)
g.add((ex.convictedFor, RDFS.subPropertyOf, ex.chargedWith))  
    g.add((investigation, RDF.type, sem.Event))
flush()
    investigation_start = row["investigation-start"]
print(g.query('ASK {ex:Paul_Manafort ex:chargedWith ex:TaxEvasion}', initNs=NS).askAnswer)
    g.add((investigation, sem.hasBeginTimeStamp, Literal(
        investigation_start, datatype=XSD.datetime)))
    investigation_end = row["investigation-end"]
    g.add((investigation, sem.hasEndTimeStamp, Literal(
        investigation_end, datatype=XSD.datetime)))
    investigation_end = row["investigation-days"]
    g.add((investigation, sem.hasXSDDuration, Literal(
        investigation_end, datatype=XSD.Days)))
    person = row["name"]
    person = URIRef(ex + person)
    g.add((investigation, sem.Actor, person))
    result = row['type']
    g.add((investigation, sem.hasSubEvent, Literal(result, datatype=XSD.string)))
    overturned = row["overturned"]
    g.add((investigation, ex.overtuned, Literal(overturned, datatype=XSD.boolean)))
    pardoned = row["pardoned"]
    g.add((investigation, ex.pardon, Literal(pardoned, datatype=XSD.boolean)))


g.serialize("output.ttl", format="ttl")
print(g.serialize())
print(g.serialize(format="turtle"))


</syntaxhighlight>
</syntaxhighlight>
<!--
 
==Lifting XML to RDF==
=OWL 1 (Lab 11)=
<syntaxhighlight>
<syntaxhighlight lang="Python">
from rdflib import Graph, Literal, Namespace, URIRef
 
from rdflib.namespace import RDF, XSD, RDFS
from rdflib import Graph, RDFS, Namespace, RDF, FOAF, BNode, OWL, URIRef, Literal, XSD
import xml.etree.ElementTree as ET
from rdflib.collection import Collection
import owlrl


g = Graph()
g = Graph()
ex = Namespace("http://example.org/TV/")
ex = Namespace('http://example.org/')
prov = Namespace("http://www.w3.org/ns/prov#")
schema = Namespace('http://schema.org/')
dbr = Namespace('https://dbpedia.org/page/')
 
g.bind("ex", ex)
g.bind("ex", ex)
g.bind("prov", prov)
# g.bind("schema", schema)
g.bind("foaf", FOAF)
 
# Donald Trump and Robert Mueller are two different persons.
g.add((ex.Donald_Trump, OWL.differentFrom, ex.Robert_Mueller))


tree = ET.parse("tv_shows.xml")
# Actually, all the names mentioned in connection with the Mueller investigation refer to different people.
root = tree.getroot()
b1 = BNode()
b2 = BNode()
Collection(g, b2, [ex.Robert_Mueller, ex.Paul_Manafort, ex.Rick_Gates, ex.George_Papadopoulos, ex.Michael_Flynn, ex.Michael_Cohen, ex.Roger_Stone, ex.Donald_Trump])
g.add((b1, RDF.type, OWL.AllDifferent))
g.add((b1, OWL.distinctMembers, b2))


for tv_show in root.findall('tv_show'):
# All these people are foaf:Persons as well as schema:Persons
    show_id = tv_show.attrib["id"]
g.add((FOAF.Person, OWL.equivalentClass, schema.Person))
    title = tv_show.find("title").text


    g.add((URIRef(ex + show_id), ex.title, Literal(title, datatype=XSD.string)))
# Tax evation is a kind of bank and tax fraud.
    g.add((URIRef(ex + show_id), RDF.type, ex.TV_Show))
g.add((ex.TaxEvation, RDFS.subClassOf, ex.BankFraud))
g.add((ex.TaxEvation, RDFS.subClassOf, ex.TaxFraud))


    for actor in tv_show.findall("actor"):
# The Donald Trump involved in the Mueller investigation is dbpedia:Donald_Trump and not dbpedia:Donald_Trump_Jr.
        first_name = actor.find("firstname").text
g.add((ex.Donald_Trump, OWL.sameAs, dbr.Donald_Trump))
        last_name = actor.find("lastname").text
g.add((ex.Donald_Trump, OWL.differentFrom, URIRef(dbr + "Donald_Trump_Jr.")))
        full_name = first_name + "_" + last_name
       
        g.add((URIRef(ex + show_id), ex.stars, URIRef(ex + full_name)))
        g.add((URIRef(ex + full_name), ex.starsIn, URIRef(title)))
        g.add((URIRef(ex + full_name), RDF.type, ex.Actor))


print(g.serialize(format="turtle").decode())
# Congress, FBI and the Mueller investigation are foaf:Organizations.
</syntaxhighlight>
g.add((ex.Congress, RDF.type, FOAF.Organization))
g.add((ex.FBI, RDF.type, FOAF.Organization))
g.add((ex.Mueller_Investigation, RDF.type, FOAF.Organization))


# Nothing can be both a person and an organization.
g.add((FOAF.Person, OWL.disjointWith, FOAF.Organization))


==RDFS==
# Leading an organization is a way of being involved in an organization.
g.add((ex.leading, RDFS.subPropertyOf, ex.involved))


===RDFS-plus (OWL) Properties===
# Being a campaign manager or an advisor for is a way of supporting someone.
<syntaxhighlight>
g.add((ex.campaignManagerTo, RDFS.subPropertyOf, ex.supports))
g.add((ex.married, RDF.type, OWL.SymmetricProperty))
g.add((ex.advisorTo, RDFS.subPropertyOf, ex.supports))
g.add((ex.married, RDF.type, OWL.IrreflexiveProperty))
g.add((ex.livesWith, RDF.type, OWL.ReflexiveProperty))
g.add((ex.livesWith, RDF.type, OWL.SymmetricProperty))
g.add((ex.sibling, RDF.type, OWL.TransitiveProperty))
g.add((ex.sibling, RDF.type, OWL.SymmetricProperty))
g.add((ex.sibling, RDF.type, OWL.IrreflexiveProperty))
g.add((ex.hasFather, RDF.type, OWL.FunctionalProperty))
g.add((ex.hasFather, RDF.type, OWL.AsymmetricProperty))
g.add((ex.hasFather, RDF.type, OWL.IrreflexiveProperty))
g.add((ex.fatherOf, RDF.type, OWL.AsymmetricProperty))
g.add((ex.fatherOf, RDF.type, OWL.IrreflexiveProperty))


# Sometimes there is no definite answer, and it comes down to how we want to model our properties
# Donald Trump is a politician and a Republican.
# e.g is livesWith a transitive property? Usually yes, but we can also want to specify that a child lives with both of her divorced parents.
g.add((ex.Donald_Trump, RDF.type, ex.Politician))
# which means that: (mother livesWith child % child livesWith father) != mother livesWith father. Which makes it non-transitive.
g.add((ex.Donald_Trump, RDF.type, ex.Republican))
</syntaxhighlight>


===RDFS inference with RDFLib===
# A Republican politician is both a politician and a Republican.
You can use the OWL-RL package to add inference capabilities to RDFLib. It can be installed using the pip install command:
g.add((ex.RepublicanPolitician, RDFS.subClassOf, ex.Politician))
<syntaxhighlight>
g.add((ex.RepublicanPolitician, RDFS.subClassOf, ex.Republican))
pip install owlrl
</syntaxhighlight>
Or download it from [https://github.com/RDFLib/OWL-RL GitHub] and copy the ''owlrl'' subfolder into your project folder next to your Python files.


[https://owl-rl.readthedocs.io/en/latest/owlrl.html OWL-RL documentation.]
#hasBusinessPartner
g.add((ex.Paul_Manafort, ex.hasBusinessPartner, ex.Rick_Gates))
g.add((ex.hasBusinessPartner, RDF.type, OWL.SymmetricProperty))
g.add((ex.hasBusinessPartner, RDF.type, OWL.IrreflexiveProperty))


Example program to get you started. In this example we are creating the graph using sparql.update, but it is also possible to parse the data from a file.
#adviserTo
<syntaxhighlight>
g.add((ex.Michael_Flynn, ex.adviserTo, ex.Donald_Trump))
import rdflib.plugins.sparql.update
g.add((ex.adviserTo, RDF.type, OWL.IrreflexiveProperty))
import owlrl.RDFSClosure
# Not necessarily asymmetric as it's not a given that they couldn't be advisors to each other 


g = rdflib.Graph()
#wasLyingTo
g.add((ex.Rick_Gates_Lying, ex.wasLyingTo, ex.FBI))
g.add((ex.wasLyingTo, RDF.type, OWL.IrreflexiveProperty))
# Not asymmetric as the subject and object could lie to each other; also in this context, the FBI can lie to you


ex = rdflib.Namespace('http://example.org#')
#presidentOf
g.bind('', ex)
g.add((ex.Donald_Trump, ex.presidentOf, ex.USA))
g.add((ex.presidentOf, RDF.type, OWL.AsymmetricProperty))
g.add((ex.presidentOf, RDF.type, OWL.IrreflexiveProperty))
g.add((ex.presidentOf, RDF.type, OWL.FunctionalProperty)) #can only be president of one country
#not inversefunctionalproperty as Bosnia has 3 presidents https://www.culturalworld.org/do-any-countries-have-more-than-one-president.htm


g.update("""
#hasPresident
PREFIX ex: <http://example.org#>
g.add((ex.USA, ex.hasPresident, ex.Donald_Trump))
PREFIX owl: <http://www.w3.org/2002/07/owl#>
g.add((ex.hasPresident, RDF.type, OWL.AsymmetricProperty))
INSERT DATA {
g.add((ex.hasPresident, RDF.type, OWL.IrreflexiveProperty))
    ex:Socrates rdf:type ex:Man .
g.add((ex.hasPresident, RDF.type, OWL.InverseFunctionalProperty)) #countries do not share their president with another
    ex:Man rdfs:subClassOf ex:Mortal .
#not functionalproperty as a country (Bosnia) can have more than one president
}""")


rdfs = owlrl.RDFSClosure.RDFS_Semantics(g, False, False, False)
#Closure
# RDF_Semantics parameters:
owlrl.DeductiveClosure(owlrl.OWLRL_Semantics).expand(g)
# - graph (rdflib.Graph) – The RDF graph to be extended.
# - axioms (bool) – Whether (non-datatype) axiomatic triples should be added or not.
# - daxioms (bool) – Whether datatype axiomatic triples should be added or not.
# - rdfs (bool) – Whether RDFS inference is also done (used in subclassed only).
# For now, you will in most cases use all False in RDFS_Semtantics.


# Generates the closure of the graph - generates the new entailed triples, but does not add them to the graph.
#Serialization
rdfs.closure()
print(g.serialize(format="ttl"))
# Adds the new triples to the graph and empties the RDFS triple-container.
# g.serialize("lab8.xml", format="xml") #serializes to XML file
rdfs.flush_stored_triples()


# Ask-query to check whether a new triple has been generated from the entailment.
b = g.query("""
PREFIX ex: <http://example.org#>
ASK {
    ex:Socrates rdf:type ex:Mortal .
}
""")
print('Result: ' + bool(b))
</syntaxhighlight>
</syntaxhighlight>


===Language tagged RDFS labels===
=OWL 2 (Lab 12)=
<syntaxhighlight>
<syntaxhighlight lang="Python">
from rdflib import Graph, Namespace, Literal
 
from rdflib.namespace import RDFS
@prefix : <http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#> .
@prefix dc: <http://purl.org/dc/terms#> .
@prefix io: <http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#> .
@prefix dbr: <http://dbpedia.org/resource/> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xml: <http://www.w3.org/XML/1998/namespace> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix prov: <http://www.w3.org/ns/prov#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@base <http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#> .
 
<http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology> rdf:type owl:Ontology .
 
#################################################################
#    Object Properties
#################################################################
 
###  http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#indictedIn
io:indictedIn rdf:type owl:ObjectProperty ;
              rdfs:subPropertyOf io:involvedIn ;
              rdfs:domain io:InvestigatedPerson ;
              rdfs:range io:Investigation .
 


g = Graph()
###  http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#investigating
ex = Namespace("http://example.org/")
io:investigating rdf:type owl:ObjectProperty ;
                rdfs:subPropertyOf io:involvedIn ;
                rdfs:domain io:Investigator ;
                rdfs:range io:Investigation .


g.add((ex.France, RDFS.label, Literal("Frankrike", lang="no")))
g.add((ex.France, RDFS.label, Literal("France", lang="en")))
g.add((ex.France, RDFS.label, Literal("Francia", lang="es")))


###  http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#involvedIn
io:involvedIn rdf:type owl:ObjectProperty ;
              rdfs:domain foaf:Person ;
              rdfs:range io:Investigation .


</syntaxhighlight>


==OWL==
###  http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#leading
===Basic inference with RDFLib===
io:leading rdf:type owl:ObjectProperty ;
          rdfs:subPropertyOf io:investigating ;
          rdfs:domain io:InvestigationLeader ;
          rdfs:range io:Investigation .


You can use the OWL-RL package again as for Lecture 5.


Instead of:
#################################################################
<syntaxhighlight>
#    Data properties
# The next three lines add inferred triples to g.
#################################################################
rdfs = owlrl.RDFSClosure.RDFS_Semantics(g, False, False, False)
rdfs.closure()
rdfs.flush_stored_triples()
</syntaxhighlight>
you can write this to get both RDFS and basic RDFS Plus / OWL inference:
<syntaxhighlight>
# The next three lines add inferred triples to g.
owl = owlrl.CombinedClosure.RDFS_OWLRL_Semantics(g, False, False, False)
owl.closure()
owl.flush_stored_triples()
</syntaxhighlight>


Example updates and queries:
###  http://purl.org/dc/elements/1.1/description
<syntaxhighlight>
<http://purl.org/dc/elements/1.1/description> rdf:type owl:DatatypeProperty ;
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
                                              rdfs:domain io:Investigation ;
PREFIX owl: <http://www.w3.org/2002/07/owl#>
                                              rdfs:range xsd:string .
PREFIX ex: <http://example.org#>


INSERT DATA {
    ex:Socrates ex:hasWife ex:Xanthippe .
    ex:hasHusband owl:inverseOf ex:hasWife .
}
</syntaxhighlight>


<syntaxhighlight>
###  http://www.w3.org/ns/prov#endedAtTime
ASK {
prov:endedAtTime rdf:type owl:DatatypeProperty ,
  ex:Xanthippe ex:hasHusband ex:Socrates .
                          owl:FunctionalProperty ;
}
                rdfs:domain io:Investigation ;
</syntaxhighlight>
                rdfs:range xsd:dateTime .


<syntaxhighlight>
ASK {
  ex:Socrates ^ex:hasHusband ex:Xanthippe .
}
</syntaxhighlight>


<syntaxhighlight>
###  http://www.w3.org/ns/prov#startedAtTime
INSERT DATA {
prov:startedAtTime rdf:type owl:DatatypeProperty ,
    ex:hasWife rdfs:subPropertyOf ex:hasSpouse .
                            owl:FunctionalProperty ;
    ex:hasSpouse rdf:type owl:SymmetricProperty .
                  rdfs:domain io:Investigation ;
}
                  rdfs:range xsd:dateTime .
</syntaxhighlight>


<syntaxhighlight>
ASK {
  ex:Socrates ex:hasSpouse ex:Xanthippe .
}
</syntaxhighlight>


<syntaxhighlight>
###  http://xmlns.com/foaf/0.1/name
ASK {
foaf:name rdf:type owl:DatatypeProperty ;
  ex:Socrates ^ex:hasSpouse ex:Xanthippe .
          rdfs:domain foaf:Person ;
}
          rdfs:range xsd:string .
</syntaxhighlight>




###  http://xmlns.com/foaf/0.1/title
foaf:title rdf:type owl:DatatypeProperty ;
          rdfs:domain io:Investigation ;
          rdfs:range xsd:string .




#################################################################
#    Classes
#################################################################


###  http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#InvestigatedPerson
io:InvestigatedPerson rdf:type owl:Class ;
                      rdfs:subClassOf io:Person ;
                      owl:disjointWith io:Investigator .


===XML Data for above example===
<syntaxhighlight>
<data>
    <tv_show id="1050">
        <title>The_Sopranos</title>
        <actor>
            <firstname>James</firstname>
            <lastname>Gandolfini</lastname>
        </actor>
    </tv_show>
    <tv_show id="1066">
        <title>Seinfeld</title>
        <actor>
            <firstname>Jerry</firstname>
            <lastname>Seinfeld</lastname>
        </actor>
        <actor>
            <firstname>Julia</firstname>
            <lastname>Louis-dreyfus</lastname>
        </actor>
        <actor>
            <firstname>Jason</firstname>
            <lastname>Alexander</lastname>
        </actor>
    </tv_show>
</data>
</syntaxhighlight>


==Lifting HTML to RDF==
###  http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#Investigation
<syntaxhighlight>
io:Investigation rdf:type owl:Class .
from bs4 import BeautifulSoup as bs, NavigableString
from rdflib import Graph, URIRef, Namespace
from rdflib.namespace import RDF


g = Graph()
ex = Namespace("http://example.org/")
g.bind("ex", ex)


html = open("tv_shows.html").read()
###  http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#InvestigationLeader
html = bs(html, features="html.parser")
io:InvestigationLeader rdf:type owl:Class ;
                      rdfs:subClassOf io:Investigator .


shows = html.find_all('li', attrs={'class': 'show'})
for show in shows:
    title = show.find("h3").text
    actors = show.find('ul', attrs={'class': 'actor_list'})
    for actor in actors:
        if isinstance(actor, NavigableString):
            continue
        else:
            actor = actor.text.replace(" ", "_")
            g.add((URIRef(ex + title), ex.stars, URIRef(ex + actor)))
            g.add((URIRef(ex + actor), RDF.type, ex.Actor))


    g.add((URIRef(ex + title), RDF.type, ex.TV_Show))
###  http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#Investigator
io:Investigator rdf:type owl:Class ;
                rdfs:subClassOf io:Person .




print(g.serialize(format="turtle").decode())
###  http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#Person
</syntaxhighlight>
io:Person rdf:type owl:Class ;
          rdfs:subClassOf foaf:Person .


===HTML code for the example above===
<syntaxhighlight>
<!DOCTYPE html>
<html>
<head>
    <meta charset="utf-8">
    <title></title>
</head>
<body>
    <div class="tv_shows">
        <ul>
            <li class="show">
                <h3>The_Sopranos</h3>
                <div class="irrelevant_data"></div>
                <ul class="actor_list">
                    <li>James Gandolfini</li>
                </ul>
            </li>
            <li class="show">
                <h3>Seinfeld</h3>
                <div class="irrelevant_data"></div>
                <ul class="actor_list">
                    <li >Jerry Seinfeld</li>
                    <li>Jason Alexander</li>
                    <li>Julia Louis-Dreyfus</li>
                </ul>
            </li>
        </ul>
    </div>
</body>
</html>
</syntaxhighlight>


==Web APIs with JSON==
###  http://xmlns.com/foaf/0.1/Person
<syntaxhighlight>
foaf:Person rdf:type owl:Class .
import requests
import json
import pprint


# Retrieve JSON data from API service URL. Then load it with the json library as a json object.
url = "http://api.geonames.org/postalCodeLookupJSON?postalcode=46020&#country=ES&username=demo"
data = requests.get(url).content.decode("utf-8")
data = json.loads(data)
pprint.pprint(data)
</syntaxhighlight>


#################################################################
#    Individuals
#################################################################


==JSON-LD==
###  http://dbpedia.org/resource/Donald_Trump
dbr:Donald_Trump rdf:type owl:NamedIndividual ;
                foaf:name "Donald Trump" .


<syntaxhighlight>
import rdflib


g = rdflib.Graph()
###  http://dbpedia.org/resource/Elizabeth_Prelogar
dbr:Elizabeth_Prelogar rdf:type owl:NamedIndividual ;
                      io:investigating <http://dbpedia.org/resource/Special_Counsel_investigation_(2017–2019)> ;
                      foaf:name "Elizabeth Prelogar" .


example = """
{
  "@context": {
    "name": "http://xmlns.com/foaf/0.1/name",
    "homepage": {
      "@id": "http://xmlns.com/foaf/0.1/homepage",
      "@type": "@id"
    }
  },
  "@id": "http://me.markus-lanthaler.com/",
  "name": "Markus Lanthaler",
  "homepage": "http://www.markus-lanthaler.com/"
}
"""


# json-ld parsing automatically deals with @contexts
###  http://dbpedia.org/resource/Michael_Flynn
g.parse(data=example, format='json-ld')
dbr:Michael_Flynn rdf:type owl:NamedIndividual ;
                  foaf:name "Michael Flynn" .


# serialisation does expansion by default
for line in g.serialize(format='json-ld').decode().splitlines():
    print(line)


# by supplying a context object, serialisation can do compaction
###  http://dbpedia.org/resource/Paul_Manafort
context = {
dbr:Paul_Manafort rdf:type owl:NamedIndividual ;
    "foaf": "http://xmlns.com/foaf/0.1/"
                  io:indictedIn <http://dbpedia.org/resource/Special_Counsel_investigation_(2017–2019)> ;
}
                  foaf:name "Paul Manafort" .
for line in g.serialize(format='json-ld', context=context).decode().splitlines():
    print(line)
</syntaxhighlight>




<div class="credits" style="text-align: right; direction: ltr; margin-left: 1em;">''INFO216, UiB, 2017-2020. All code examples are [https://creativecommons.org/choose/zero/ CC0].'' </div>
###  http://dbpedia.org/resource/Robert_Mueller
dbr:Robert_Mueller rdf:type owl:NamedIndividual ;
                  io:leading <http://dbpedia.org/resource/Special_Counsel_investigation_(2017–2019)> ;
                  foaf:name "Robert Mueller" .


==OWL - Complex Classes and Restrictions==
<syntaxhighlight>
import owlrl
from rdflib import Graph, Literal, Namespace, BNode
from rdflib.namespace import RDF, OWL, RDFS
from rdflib.collection import Collection


g = Graph()
###  http://dbpedia.org/resource/Roger_Stone
ex = Namespace("http://example.org/")
dbr:Roger_Stone rdf:type owl:NamedIndividual ;
g.bind("ex", ex)
                foaf:name "Roger Stone" .
g.bind("owl", OWL)


# a Season is either Autumn, Winter, Spring, Summer
seasons = BNode()
Collection(g, seasons, [ex.Winter, ex.Autumn, ex.Spring, ex.Summer])
g.add((ex.Season, OWL.oneOf, seasons))


# A Parent is a Father or Mother
###  http://dbpedia.org/resource/Special_Counsel_investigation_(2017–2019)
b = BNode()
<http://dbpedia.org/resource/Special_Counsel_investigation_(2017–2019)> rdf:type owl:NamedIndividual ;
Collection(g, b, [ex.Father, ex.Mother])
                                                                        foaf:title "Mueller Investigation" .
g.add((ex.Parent, OWL.unionOf, b))


# A Woman is a person who has the "female" gender
br = BNode()
g.add((br, RDF.type, OWL.Restriction))
g.add((br, OWL.onProperty, ex.gender))
g.add((br, OWL.hasValue, ex.Female))
bi = BNode()
Collection(g, bi, [ex.Person, br])
g.add((ex.Woman, OWL.intersectionOf, bi))


# A vegetarian is a Person who only eats vegetarian food
#################################################################
br = BNode()
#    General axioms
g.add((br, RDF.type, OWL.Restriction))
#################################################################
g.add((br, OWL.onProperty, ex.eats))
g.add((br, OWL.allValuesFrom, ex.VeganFood))
bi = BNode()
Collection(g, bi, [ex.Person, br])
g.add((ex.Vegetarian, OWL.intersectionOf, bi))


# A vegetarian is a Person who can not eat meat.
[ rdf:type owl:AllDifferent ;
br = BNode()
  owl:distinctMembers ( dbr:Donald_Trump
g.add((br, RDF.type, OWL.Restriction))
                        dbr:Elizabeth_Prelogar
g.add((br, OWL.onProperty, ex.eats))
                        dbr:Michael_Flynn
g.add((br, OWL.QualifiedCardinality, Literal(0)))
                        dbr:Paul_Manafort
g.add((br, OWL.onClass, ex.Meat))
                        dbr:Robert_Mueller
bi = BNode()
                        dbr:Roger_Stone
Collection(g, bi, [ex.Person, br])
                      )
g.add((ex.Vegetarian, OWL.intersectionOf, bi))
] .


# A Worried Parent is a parent who has at least one sick child
br = BNode()
g.add((br, RDF.type, OWL.Restriction))
g.add((br, OWL.onProperty, ex.hasChild))
g.add((br, OWL.QualifiedMinCardinality, Literal(1)))
g.add((br, OWL.onClass, ex.Sick))
bi = BNode()
Collection(g, bi, [ex.Parent, br])
g.add((ex.WorriedParent, OWL.intersectionOf, bi))


# using the restriction above, If we now write...:  
###  Generated by the OWL API (version 4.5.25.2023-02-15T19:15:49Z) https://github.com/owlcs/owlapi
g.add((ex.Bob, RDF.type, ex.Parent))
g.add((ex.Bob, ex.hasChild, ex.John))
g.add((ex.John, RDF.type, ex.Sick))
# ...we can infer with owl reasoning that Bob is a worried Parent even though we didn't specify it ourselves because Bob fullfills the restriction and Parent requirements.


</syntaxhighlight>
</syntaxhighlight>


==Protege-OWL reasoning with HermiT==
=Using Graph Embeddings (Lab 13)=
 
[[:File:DL-reasoning-RoyalFamily-final.owl.txt | Example file]] from Lecture 13 about OWL-DL, rules and reasoning.


-->
https://colab.research.google.com/drive/1WkRJUeUBVF5yVv7o0pOKfsd4pqG6369k

Latest revision as of 10:30, 10 May 2024

Here we will present suggested solutions after each lab. The page will be updated as the course progresses

Getting started (Lab 1)

from rdflib import Graph, Namespace

ex = Namespace('http://example.org/')

g = Graph()

g.bind("ex", ex)

# The Mueller Investigation was lead by Robert Mueller
g.add((ex.MuellerInvestigation, ex.leadBy, ex.RobertMueller))

# It involved Paul Manafort, Rick Gates, George Papadopoulos, Michael Flynn, Michael Cohen, and Roger Stone.
g.add((ex.MuellerInvestigation, ex.involved, ex.PaulManafort))
g.add((ex.MuellerInvestigation, ex.involved, ex.RickGates))
g.add((ex.MuellerInvestigation, ex.involved, ex.GeorgePapadopoulos))
g.add((ex.MuellerInvestigation, ex.involved, ex.MichaelFlynn))
g.add((ex.MuellerInvestigation, ex.involved, ex.MichaelCohen))
g.add((ex.MuellerInvestigation, ex.involved, ex.RogerStone))

# Paul Manafort was business partner of Rick Gates
g.add((ex.PaulManafort, ex.businessPartner, ex.RickGates))

# He was campaign chairman for Donald Trump
g.add((ex.PaulManafort, ex.campaignChairman, ex.DonaldTrump))

# He was charged with money laundering, tax evasion, and foreign lobbying.
g.add((ex.PaulManafort, ex.chargedWith, ex.MoneyLaundering))
g.add((ex.PaulManafort, ex.chargedWith, ex.TaxEvasion))
g.add((ex.PaulManafort, ex.chargedWith, ex.ForeignLobbying))

# He was convicted for bank and tax fraud.
g.add((ex.PaulManafort, ex.convictedOf, ex.BankFraud))
g.add((ex.PaulManafort, ex.convictedOf, ex.TaxFraud))

# He pleaded guilty to conspiracy.
g.add((ex.PaulManafort, ex.pleadGuiltyTo, ex.Conspiracy))

# He was sentenced to prison.
g.add((ex.PaulManafort, ex.sentencedTo, ex.Prison))

# He negotiated a plea agreement.
g.add((ex.PaulManafort, ex.negotiated, ex.PleaAgreement))

# Rick Gates was charged with money laundering, tax evasion and foreign lobbying.
g.add((ex.RickGates, ex.chargedWith, ex.MoneyLaundering))
g.add((ex.RickGates, ex.chargedWith, ex.TaxEvasion))
g.add((ex.RickGates, ex.chargedWith, ex.ForeignLobbying))

# He pleaded guilty to conspiracy and lying to FBI.
g.add((ex.RickGates, ex.pleadGuiltyTo, ex.Conspiracy))
g.add((ex.RickGates, ex.pleadGuiltyTo, ex.LyingToFBI))

# Use the serialize method of rdflib.Graph to write out the model in different formats (on screen or to file)
print(g.serialize(format="ttl")) # To screen
#g.serialize("lab1.ttl", format="ttl") # To file

# Loop through the triples in the model to print out all triples that have pleading guilty as predicate
for subject, object in g[ : ex.pleadGuiltyTo :]:
    print(subject, ex.pleadGuiltyTo, object)

# --- IF you have more time tasks ---

# Michael Cohen, Michael Flynn and the lying is part of lab 2 and therefore the answer is not provided this week 

#Write a method (function) that submits your model for rendering and saves the returned image to file.
import requests
import shutil

def graphToImage(graphInput):
    data = {"rdf":graphInput, "from":"ttl", "to":"png"}
    link = "http://www.ldf.fi/service/rdf-grapher"
    response = requests.get(link, params = data, stream=True)
    # print(response.content)
    print(response.raw)
    with open("lab1.png", "wb") as file:
        shutil.copyfileobj(response.raw, file)

graph = g.serialize(format="ttl")
graphToImage(graph)

RDF programming with RDFlib (Lab 2)

from rdflib import Graph, Namespace, Literal, BNode, XSD, FOAF, RDF, URIRef
from rdflib.collection import Collection

g = Graph()

# Getting the graph created in the first lab
g.parse("lab1.ttl", format="ttl")

ex = Namespace("http://example.org/")

g.bind("ex", ex)
g.bind("foaf", FOAF)

# --- Michael Cohen ---
# Michael Cohen was Donald Trump's attorney.
g.add((ex.MichaelCohen, ex.attorneyTo, ex.DonaldTrump))
# He pleaded guilty for lying to Congress.
g.add((ex.MichaelCohen, ex.pleadGuiltyTo, ex.LyingToCongress))

# --- Michael Flynn ---
# Michael Flynn was adviser to Donald Trump.
g.add((ex.MichaelFlynn, ex.adviserTo, ex.DonaldTrump))
# He pleaded guilty for lying to the FBI.
g.add((ex.MichaelFlynn, ex.pleadGuiltyTo, ex.LyingToFBI))
# He negotiated a plea agreement.
g.add((ex.MichaelFlynn, ex.negotiated, ex.PleaAgreement))

# Change your graph so it represents instances of lying as blank nodes.
# Remove the triples that will be duplicated
g.remove((ex.Michael_Flynn, ex.pleadGuiltyTo, ex.LyingToFBI)) 
g.remove((ex.Michael_Flynn, ex.negoiated, ex.PleaBargain))
g.remove((ex.Rick_Gates, ex.pleadGuiltyTo, ex.LyingToFBI))
g.remove((ex.Rick_Gates, ex.pleadGuiltyTo, ex.Conspiracy))
g.remove((ex.Rick_Gates, ex.chargedWith, ex.ForeignLobbying))
g.remove((ex.Rick_Gates, ex.chargedWith, ex.MoneyLaundering))
g.remove((ex.Rick_Gates, ex.chargedWith, ex.TaxEvasion))
g.remove((ex.Michael_Cohen, ex.pleadGuiltyTo, ex.LyingToCongress))

# --- Michael Flynn ---
FlynnLying = BNode() 
g.add((FlynnLying, ex.crime, ex.LyingToFBI))
g.add((FlynnLying, ex.pleadGulityOn, Literal("2017-12-1", datatype=XSD.date)))
g.add((FlynnLying, ex.liedAbout, Literal("His communications with a former Russian ambassador during the presidential transition", datatype=XSD.string)))
g.add((FlynnLying, ex.pleaBargain, Literal("true", datatype=XSD.boolean)))
g.add((ex.Michael_Flynn, ex.pleadGuiltyTo, FlynnLying))

# --- Rick Gates ---
GatesLying = BNode()
Crimes = BNode()
Charged = BNode()
Collection(g, Crimes, [ex.LyingToFBI, ex.Conspiracy])
Collection(g, Charged, [ex.ForeignLobbying, ex.MoneyLaundering, ex.TaxEvasion])
g.add((GatesLying, ex.crime, Crimes))
g.add((GatesLying, ex.chargedWith, Charged))
g.add((GatesLying, ex.pleadGulityOn, Literal("2018-02-23", datatype=XSD.date)))
g.add((GatesLying, ex.pleaBargain, Literal("true", datatype=XSD.boolean)))
g.add((ex.Rick_Gates, ex.pleadGuiltyTo, GatesLying))

# --- Michael Cohen ---
CohenLying = BNode()
g.add((CohenLying, ex.crime, ex.LyingToCongress))
g.add((CohenLying, ex.liedAbout, ex.TrumpRealEstateDeal))
g.add((CohenLying, ex.prosecutorsAlleged, Literal("In an August 2017 letter Cohen sent to congressional committees investigating Russian election interference, he falsely stated that the project ended in January 2016", datatype=XSD.string)))
g.add((CohenLying, ex.mullerInvestigationAlleged, Literal("Cohen falsely stated that he had never agreed to travel to Russia for the real estate deal and that he did not recall any contact with the Russian government about the project", datatype=XSD.string)))
g.add((CohenLying, ex.pleadGulityOn, Literal("2018-11-29", datatype=XSD.date)))
g.add((CohenLying, ex.pleaBargain, Literal("true", datatype=XSD.boolean)))
g.add((ex.Michael_Cohen, ex.pleadGuiltyTo, CohenLying))

print(g.serialize(format="ttl"))

#Save (serialize) your graph to a Turtle file.
# g.serialize("lab2.ttl", format="ttl")

#Add a few triples to the Turtle file with more information about Donald Trump.
'''
ex:Donald_Trump ex:address [ ex:city ex:Palm_Beach ;
            ex:country ex:United_States ;
            ex:postalCode 33480 ;
            ex:residence ex:Mar_a_Lago ;
            ex:state ex:Florida ;
            ex:streetName "1100 S Ocean Blvd"^^xsd:string ] ;
    ex:previousAddress [ ex:city ex:Washington_DC ;
            ex:country ex:United_States ;
            ex:phoneNumber "1 202 456 1414"^^xsd:integer ;
            ex:postalCode "20500"^^xsd:integer ;
            ex:residence ex:The_White_House ;
            ex:streetName "1600 Pennsylvania Ave."^^xsd:string ];
    ex:marriedTo ex:Melania_Trump;
    ex:fatherTo (ex:Ivanka_Trump ex:Donald_Trump_Jr ex: ex:Tiffany_Trump ex:Eric_Trump ex:Barron_Trump).
'''

#Read (parse) the Turtle file back into a Python program, and check that the new triples are there
def serialize_Graph():
    newGraph = Graph()
    newGraph.parse("lab2.ttl")
    print(newGraph.serialize())

#Don't need this to run until after adding the triples above to the ttl file
# serialize_Graph() 

#Write a method (function) that starts with Donald Trump prints out a graph depth-first to show how the other graph nodes are connected to him
visited_nodes = set()

def create_Tree(model, nodes):
    #Traverse the model breadth-first to create the tree.
    global visited_nodes
    tree = Graph()
    children = set()
    visited_nodes |= set(nodes)
    for s, p, o in model:
        if s in nodes and o not in visited_nodes:
            tree.add((s, p, o))
            visited_nodes.add(o)
            children.add(o)
        if o in nodes and s not in visited_nodes:
            invp = URIRef(f'{p}_inv') #_inv represents inverse of
            tree.add((o, invp, s))
            visited_nodes.add(s)
            children.add(s)
    if len(children) > 0:
        children_tree = create_Tree(model, children)
        for triple in children_tree:
            tree.add(triple)
    return tree

def print_Tree(tree, root, indent=0):
    #Print the tree depth-first.
    print(str(root))
    for s, p, o in tree:
        if s==root:
            print('    '*indent + '  ' + str(p), end=' ')
            print_Tree(tree, o, indent+1)
    
tree = create_Tree(g, [ex.Donald_Trump])
print_Tree(tree, ex.Donald_Trump)

SPARQL (Lab 3-4)

List all triples

SELECT ?s ?p ?o
WHERE {?s ?p ?o .}

List the first 100 triples

SELECT ?s ?p ?o
WHERE {?s ?p ?o .}
LIMIT 100

Count the number of triples

SELECT (COUNT(*) as ?count)
WHERE {?s ?p ?o .}

Count the number of indictments

PREFIX ns1: <http://example.org#>

SELECT (COUNT(?ind) as ?amount)
WHERE {
   ?s ns1:outcome ?ind;
      ns1:outcome ns1:indictment.
}

List the names of everyone who pleaded guilty, along with the name of the investigation

PREFIX ns1: <http://example.org#>

SELECT ?name ?invname
WHERE {
   ?s ns1:name ?name;
      ns1:investigation ?invname;
      ns1:outcome ns1:guilty-plea .
}

List the names of everyone who were convicted, but who had their conviction overturned by which president

PREFIX ns1: <http://example.org#>

SELECT ?name ?president
WHERE {
   ?s ns1:name ?name;
      ns1:president ?president;
      ns1:outcome ns1:conviction;
      ns1:overturned ns1:true.
}

For each investigation, list the number of indictments made

PREFIX ns1: <http://example.org#>

SELECT ?invs (COUNT(?invs) as ?count)
WHERE {
   ?s ns1:investigation ?invs;
      ns1:outcome ns1:indictment .
}
GROUP BY ?invs

For each investigation with multiple indictments, list the number of indictments made

PREFIX ns1: <http://example.org#>

SELECT ?invs (COUNT(?invs) as ?count)
WHERE {
   ?s ns1:investigation ?invs;
      ns1:outcome ns1:indictment .
}
GROUP BY ?invs
HAVING(?count > 1)

For each investigation with multiple indictments, list the number of indictments made, sorted with the most indictments first

PREFIX ns1: <http://example.org#>

SELECT ?invs (COUNT(?invs) as ?count)
WHERE {
   ?s ns1:investigation ?invs;
      ns1:outcome ns1:indictment .
}
GROUP BY ?invs
HAVING(?count > 1)
ORDER BY DESC(?count)

For each president, list the numbers of convictions and of pardons made

PREFIX ns1: <http://example.org#>

SELECT ?president (COUNT(?outcome) as ?conviction) (COUNT(?pardon) as
?pardons)
WHERE {
   ?s ns1:president ?president;
      ns1:outcome ?outcome ;
      ns1:outcome ns1:conviction.
      OPTIONAL{
         ?s ns1:pardoned ?pardon .
         FILTER (?pardon = ns1:true)
      }
}
GROUP BY ?president

Rename mullerkg:name to something like muellerkg:person

PREFIX ns1: <http://example.org#>

DELETE{?s ns1:name ?o}
INSERT{?s ns1:person ?o}
WHERE {?s ns1:name ?o}

Update the graph so all the investigated person and president nodes become the subjects in foaf:name triples with the corresponding strings

PREFIX ns1: <http://example.org#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>

#Persons
INSERT {?person foaf:name ?name}
WHERE {
      ?investigation ns1:person ?person .
      BIND(REPLACE(STR(?person), STR(ns1:), "") AS ?name)
}

#Presidents
INSERT {?president foaf:name ?name}
WHERE {
      ?investigation ns1:president ?president .
      BIND(REPLACE(STR(?president), STR(ns1:), "") AS ?name)
}

Use INSERT DATA updates to add these triples

PREFIX ns1: <http://example.org#>

INSERT DATA {
     ns1:George_Papadopoulos ns1:adviserTo ns1:Donald_Trump;
         ns1:pleadGuiltyTo ns1:LyingToFBI;
         ns1:sentencedTo ns1:Prison.

     ns1:Roger_Stone a ns1:Republican;
         ns1:adviserTo ns1:Donald_Trump;
         ns1:officialTo ns1:Trump_Campaign;
         ns1:interactedWith ns1:Wikileaks;
         ns1:providedTestimony ns1:House_Intelligence_Committee;
         ns1:clearedOf ns1:AllCharges.
}

#To test if added
SELECT ?p ?o
WHERE {ns1:Roger_Stone ?p ?o .}

Use DELETE DATA and then INSERT DATA updates to correct that Roger Stone was cleared of all charges

PREFIX ns1: <http://example.org#>

DELETE DATA {
      ns1:Roger_Stone ns1:clearedOf ns1:AllCharges .
}

INSERT DATA {
      ns1:Roger_Stone ns1:indictedFor ns1:ObstructionOfJustice,
                                      ns1:WitnessTampering,
                                      ns1:FalseStatements.
}

#The task specifically requested DELETE DATA & INSERT DATA, put below is
a more efficient solution

DELETE{ns1:Roger_Stone ns1:clearedOf ns1:AllCharges.}
INSERT{
   ns1:Roger_Stone ns1:indictedFor ns1:ObstructionOfJustice,
                                   ns1:WitnessTampering,
                                   ns1:FalseStatements.
}
WHERE{ns1:Roger_Stone ns1:clearedOf ns1:AllCharges.}

Use a DESCRIBE query to show the updated information about Roger Stone

PREFIX ns1: <http://example.org#>

DESCRIBE ?o
WHERE {ns1:Roger_Stone ns1:indictedFor ?o .}

Use a CONSTRUCT query to create a new RDF group with triples only about Roger Stone

PREFIX ns1: <http://example.org#>

CONSTRUCT {
   ns1:Roger_Stone ?p ?o.
   ?s ?p2 ns1:Roger_Stone.
}
WHERE {
   ns1:Roger_Stone ?p ?o .
   ?s ?p2 ns1:Roger_Stone
}

Write a DELETE/INSERT statement to change one of the prefixes in your graph

PREFIX ns1: <http://example.org#>
PREFIX dbp: <https://dbpedia.org/page/>

DELETE {?s ns1:person ?o1}
INSERT {?s ns1:person ?o2}
WHERE{
   ?s ns1:person ?o1 .
   BIND (IRI(replace(str(?o1), str(ns1:), str(dbp:)))  AS ?o2)
}

#This update changes the object in triples with ns1:person as the
predicate. It changes it's prefix of ns1 (which is the
"shortcut/shorthand" for example.org) to the prefix dbp (dbpedia.org)

Write an INSERT statement to add at least one significant date to the Mueller investigation, with literal type xsd:date. Write a DELETE/INSERT statement to change the date to a string, and a new DELETE/INSERT statement to change it back to xsd:date.

#Whilst this solution is not exactly what the task asks for, I feel like
this is more appropiate given the dataset. The following update
changes the objects that uses the cp_date as predicate from a URI, to a
literal with date as it's datatype

PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX ns1: <http://example.org#>

DELETE {?s ns1:cp_date ?o}
INSERT{?s ns1:cp_date ?o3}
WHERE{
   ?s ns1:cp_date ?o .
   BIND (replace(str(?o), str(ns1:), "")  AS ?o2)
   BIND (STRDT(STR(?o2), xsd:date) AS ?o3)
}

#To test:

PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX ns1: <http://example.org#>

SELECT ?s ?o
WHERE{
   ?s ns1:cp_date ?o.
   FILTER(datatype(?o) = xsd:date)
}

#To change it to an integer, use the following code, and to change it
back to date, swap "xsd:integer" to "xsd:date"

PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX ns1: <http://example.org#>

DELETE {?s ns1:cp_date ?o}
INSERT{?s ns1:cp_date ?o2}
WHERE{
   ?s ns1:cp_date ?o .
   BIND (STRDT(STR(?o), xsd:integer) AS ?o2)
}

SPARQL Programming (Lab 5)

from rdflib import Graph, Namespace, RDF, FOAF
from SPARQLWrapper import SPARQLWrapper, JSON, POST, GET, TURTLE

g = Graph()
g.parse("Russia_investigation_kg.ttl")

# ----- RDFLIB -----
ex = Namespace('http://example.org#')

NS = {
    '': ex,
    'rdf': RDF,
    'foaf': FOAF,
}

# Print out a list of all the predicates used in your graph.
task1 = g.query("""
SELECT DISTINCT ?p WHERE{
    ?s ?p ?o .
}
""", initNs=NS)

print(list(task1))

# Print out a sorted list of all the presidents represented in your graph.
task2 = g.query("""
SELECT DISTINCT ?president WHERE{
    ?s :president ?president .
}
ORDER BY ?president
""", initNs=NS)

print(list(task2))

# Create dictionary (Python dict) with all the represented presidents as keys. For each key, the value is a list of names of people indicted under that president.
task3_dic = {}

task3 = g.query("""
SELECT ?president ?person WHERE{
    ?s :president ?president;
       :name ?person;
       :outcome :indictment.
}
""", initNs=NS)

for president, person in task3:
    if president not in task3_dic:
        task3_dic[president] = [person]
    else:
        task3_dic[president].append(person)

print(task3_dic)

# Use an ASK query to investigate whether Donald Trump has pardoned more than 5 people.

# This task is a lot trickier than it needs to be. As far as I'm aware RDFLib has no HAVING support, so a query like this:
task4 = g.query("""
ASK {
  	SELECT (COUNT(?s) as ?count) WHERE{
    	?s :pardoned :true;
   	   :president :Bill_Clinton  .
    }
    HAVING (?count > 5)
}
""", initNs=NS)

print(task4.askAnswer)

# Which works fine in Blazegraph and is a valid SPARQL query will always provide false in RDFLib cause it uses HAVING. 
# Instead you have to use a nested SELECT query like below, where you use FILTER instead of HAVING. Donald Trump has no pardons,
# so I have instead chosen Bill Clinton with 13 to check if the query works. 

task4 = g.query("""
    ASK{
        SELECT ?count WHERE{{
  	        SELECT (COUNT(?s) as ?count) WHERE{
    	        ?s :pardoned :true;
                   :president :Bill_Clinton  .
                }}
        FILTER (?count > 5) 
        }
    }
""", initNs=NS)

print(task4.askAnswer)

# Use a DESCRIBE query to create a new graph with information about Donald Trump. Print out the graph in Turtle format.

# By all accounts, it seems DESCRIBE querires are yet to be implemented in RDFLib, but they are attempting to implement it:
# https://github.com/RDFLib/rdflib/pull/2221 <--- Issue and proposed solution rasied
# https://github.com/RDFLib/rdflib/commit/2325b4a81724c1ccee3a131067db4fbf9b4e2629 <--- Solution commited to RDFLib
# This solution does not work. However, this proposed solution should work if DESCRIBE is implemented in RDFLib

# task5 = g.query(""" 
# DESCRIBE :Donald_Trump
# """, initNs=NS)

# print(task5.serialize())

# ----- SPARQLWrapper -----

SERVER = 'http://localhost:7200' #Might need to replace this
REPOSITORY = 'Labs' #Replace with your repository name

# Query Endpoint
sparql = SPARQLWrapper(f'{SERVER}/repositories/{REPOSITORY}') 
# Update Endpoint
sparqlUpdate = SPARQLWrapper(f'{SERVER}/repositories/{REPOSITORY}/statements')

# Ask whether there was an ongoing indictment on the date 1990-01-01.
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    ASK {
        SELECT ?end ?start
        WHERE{
            ?s ns1:investigation_end ?end;
               ns1:investigation_start ?start;
               ns1:outcome ns1:indictment.
            FILTER(?start <= "1990-01-01"^^xsd:date && ?end >= "1990-01-01"^^xsd:date) 
	    }
    }
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print(f"Are there any investigation on the 1990-01-01: {results['boolean']}")

# List ongoing indictments on that date 1990-01-01.
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    SELECT ?s
    WHERE{
        ?s ns1:investigation_end ?end;
           ns1:investigation_start ?start;
           ns1:outcome ns1:indictment.
        FILTER(?start <= "1990-01-01"^^xsd:date && ?end >= "1990-01-01"^^xsd:date) 
    }
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()

print("The ongoing investigations on the 1990-01-01 are:")
for result in results["results"]["bindings"]:
    print(result["s"]["value"])

# Describe investigation number 100 (muellerkg:investigation_100).
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    DESCRIBE ns1:investigation_100
""")

sparql.setReturnFormat(TURTLE)
results = sparql.query().convert()

print(results)

# Print out a list of all the types used in your graph.
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>

    SELECT DISTINCT ?types
    WHERE{
        ?s rdf:type ?types . 
    }
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()

rdf_Types = []

for result in results["results"]["bindings"]:
    rdf_Types.append(result["types"]["value"])

print(rdf_Types)

# Update the graph to that every resource that is an object in a muellerkg:investigation triple has the rdf:type muellerkg:Investigation.
update_str = """
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>

    INSERT{
        ?invest rdf:type ns1:Investigation .
    }
    WHERE{
        ?s ns1:investigation ?invest .
}"""

sparqlUpdate.setQuery(update_str)
sparqlUpdate.setMethod(POST)
sparqlUpdate.query()

#To Test
sparql.setQuery("""
    prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
    PREFIX ns1: <http://example.org#>

    ASK{
        ns1:watergate rdf:type ns1:Investigation.
    }
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print(results['boolean'])

# Update the graph to that every resource that is an object in a muellerkg:person triple has the rdf:type muellerkg:IndictedPerson.
update_str = """
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>

    INSERT{
        ?person rdf:type ns1:IndictedPerson .
    }
    WHERE{
        ?s ns1:name ?person .
}"""

sparqlUpdate.setQuery(update_str)
sparqlUpdate.setMethod(POST)
sparqlUpdate.query()

#To test, run the query in the above task, replacing the ask query with e.g. ns1:Deborah_Gore_Dean rdf:type ns1:IndictedPerson

# Update the graph so all the investigation nodes (such as muellerkg:watergate) become the subject in a dc:title triple with the corresponding string (watergate) as the literal.
update_str = """
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
    PREFIX dc: <http://purl.org/dc/elements/1.1/>

    INSERT{
        ?invest dc:title ?investString.
    }
    WHERE{
        ?s ns1:investigation ?invest .
        BIND (replace(str(?invest), str(ns1:), "")  AS ?investString)
}"""

sparqlUpdate.setQuery(update_str)
sparqlUpdate.setMethod(POST)
sparqlUpdate.query()

#Same test as above, replace it with e.g. ns1:watergate dc:title "watergate"

# Print out a sorted list of all the indicted persons represented in your graph.
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    PREFIX foaf: <http://xmlns.com/foaf/0.1/>

    SELECT ?name
    WHERE{
    ?s  ns1:name ?name;
            ns1:outcome ns1:indictment.
    }
    ORDER BY ?name
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()

names = []

for result in results["results"]["bindings"]:
    names.append(result["name"]["value"])

print(names)

# Print out the minimum, average and maximum indictment days for all the indictments in the graph.

sparql.setQuery("""
    prefix xsd: <http://www.w3.org/2001/XMLSchema#>
    PREFIX ns1: <http://example.org#>

    SELECT (AVG(?daysRemoved) as ?avg) (MAX(?daysRemoved) as ?max) (MIN(?daysRemoved) as ?min)  WHERE{
        ?s  ns1:indictment_days ?days;
            ns1:outcome ns1:indictment.
    
    BIND (replace(str(?days), str(ns1:), "")  AS ?daysR)
    BIND (STRDT(STR(?daysR), xsd:float) AS ?daysRemoved)
}
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()

for result in results["results"]["bindings"]:
    print(f'The longest an investigation lasted was: {result["max"]["value"]}')
    print(f'The shortest an investigation lasted was: {result["min"]["value"]}')
    print(f'The average investigation lasted: {result["avg"]["value"]}')

# Print out the minimum, average and maximum indictment days for all the indictments in the graph per investigation.

sparql.setQuery("""
    prefix xsd: <http://www.w3.org/2001/XMLSchema#>
    PREFIX ns1: <http://example.org#>

    SELECT ?investigation (AVG(?daysRemoved) as ?avg) (MAX(?daysRemoved) as ?max) (MIN(?daysRemoved) as ?min)  WHERE{
    ?s  ns1:indictment_days ?days;
        ns1:outcome ns1:indictment;
        ns1:investigation ?investigation.
    
    BIND (replace(str(?days), str(ns1:), "")  AS ?daysR)
    BIND (STRDT(STR(?daysR), xsd:float) AS ?daysRemoved)
    }
    GROUP BY ?investigation
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()

for result in results["results"]["bindings"]:
    print(f'{result["investigation"]["value"]} - min: {result["min"]["value"]}, max: {result["max"]["value"]}, avg: {result["avg"]["value"]}')

Wikidata SPARQL (Lab 6)

Use a DESCRIBE query to retrieve some triples about your entity

DESCRIBE wd:Q42 LIMIT 100

Use a SELECT query to retrieve the first 100 triples about your entity

SELECT * WHERE {
  wd:Q42 ?p ?o .
} LIMIT 100

Write a local SELECT query that embeds a SERVICE query to retrieve the first 100 triples about your entity to your local machine

PREFIX wd: <http://www.wikidata.org/entity/>

SELECT * WHERE {
    SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> {
        SELECT * WHERE {
            wd:Q42 ?p ?o .
        } LIMIT 100
    }
}

Change the SELECT query to an INSERT query that adds the Wikidata triples your local repository

PREFIX wd: <http://www.wikidata.org/entity/>

INSERT {
    wd:Q42 ?p ?o .
} WHERE {
    SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> {
        SELECT * WHERE {
            wd:Q42 ?p ?o .
        } LIMIT 100
    }
}

Use a FILTER statement to only SELECT primary triples in this sense.

PREFIX wd: <http://www.wikidata.org/entity/>

SELECT * WHERE {
    wd:Q42 ?p ?o .
 
    FILTER (STRSTARTS(STR(?p), STR(wdt:)))
    FILTER (STRSTARTS(STR(?o), STR(wd:)))
} LIMIT 100

Use Wikidata's in-built SERVICE wikibase:label to get labels for all the object resources

PREFIX wd: <http://www.wikidata.org/entity/>

SELECT ?p ?oLabel WHERE {
    wd:Q42 ?p ?o .
 
    FILTER (STRSTARTS(STR(?p), STR(wdt:)))
    FILTER (STRSTARTS(STR(?o), STR(wd:)))
 
    SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
 
} LIMIT 100

Edit your query (by relaxing the FILTER expression) so it also returns triples where the object has DATATYPE xsd:string.

PREFIX wd: <http://www.wikidata.org/entity/>

SELECT ?p ?oLabel ?o WHERE {
    wd:Q42 ?p ?o .
 
    FILTER (STRSTARTS(STR(?p), STR(wdt:)))
    FILTER (
      STRSTARTS(STR(?o), STR(wd:)) ||  # comment out this whole line to see only string literals!
      DATATYPE(?o) = xsd:string
    )
 
    SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
 
} LIMIT 100

Relax the FILTER expression again so it also returns triples with these three predicates (rdfs:label, skos:altLabel and schema:description)

PREFIX wd: <http://www.wikidata.org/entity/>

SELECT ?p ?oLabel ?o WHERE {
    wd:Q42 ?p ?o .
 
    FILTER (
      (STRSTARTS(STR(?p), STR(wdt:)) &&  # comment out these three lines to see only fingerprint literals!
       STRSTARTS(STR(?o), STR(wd:)) || DATATYPE(?o) = xsd:string)
      ||
      (?p IN (rdfs:label, skos:altLabel, schema:description) &&
       DATATYPE(?o) = rdf:langString && LANG(?o) = "en")
    )
 
    SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
 
} LIMIT 100

Try to restrict the FILTER expression again so that, when the predicate is rdfs:label, skos:altLabel and schema:description, the object must have LANG "en"

PREFIX wikibase: <http://wikiba.se/ontology#>
PREFIX bd: <http://www.bigdata.com/rdf#>
PREFIX wd: <http://www.wikidata.org/entity/>
PREFIX wdt: <http://www.wikidata.org/prop/direct/>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX schema: <http://schema.org/>

SELECT * WHERE {
  SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> {
    SELECT ?p ?oLabel ?o WHERE {
        wd:Q42 ?p ?o .

        FILTER (
          (STRSTARTS(STR(?p), STR(wdt:)) &&
           STRSTARTS(STR(?o), STR(wd:)) || DATATYPE(?o) = xsd:string)
          ||
          (?p IN (rdfs:label, skos:altLabel, schema:description) &&
           DATATYPE(?o) = rdf:langString && LANG(?o) = "en")
        )

        SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }

    } LIMIT 100
  }
}

Change the SELECT query to an INSERT query that adds the Wikidata triples your local repository

PREFIX wikibase: <http://wikiba.se/ontology#>
PREFIX bd: <http://www.bigdata.com/rdf#>
PREFIX wd: <http://www.wikidata.org/entity/>
PREFIX wdt: <http://www.wikidata.org/prop/direct/>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX schema: <http://schema.org/>

INSERT {
  wd:Q42 ?p ?o .
  ?o rdfs:label ?oLabel .
} WHERE {
  SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> {
    SELECT ?p ?oLabel ?o WHERE {
        wd:Q42 ?p ?o .

        FILTER (
          (STRSTARTS(STR(?p), STR(wdt:)) &&
           STRSTARTS(STR(?o), STR(wd:)) || DATATYPE(?o) = xsd:string)
          ||
          (?p IN (rdfs:label, skos:altLabel, schema:description) &&
           DATATYPE(?o) = rdf:langString && LANG(?o) = "en")
        )

        SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }

    } LIMIT 500
  }
}

If you have more time

You must therefore REPLACE all wdt: prefixes of properties with wd: prefixes and BIND the new URI AS a new variable, for example ?pw.

PREFIX wd: <http://www.wikidata.org/entity/>

SELECT ?pwLabel ?oLabel WHERE {
    wd:Q42 ?p ?o .
 
    FILTER (STRSTARTS(STR(?p), STR(wdt:)))
    FILTER (STRSTARTS(STR(?o), STR(wd:)))
 
    BIND (IRI(REPLACE(STR(?p), STR(wdt:), STR(wd:))) AS ?pw)

    SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
 
} LIMIT 100

Now you can go back to the SELECT statement that returned primary triples with only resource objects (not literal objects or fingerprints). Extend it so it also includes primary triples "one step out", i.e., triples where the subjects are objects of triples involving your reference entity.

PREFIX wikibase: <http://wikiba.se/ontology#>
PREFIX bd: <http://www.bigdata.com/rdf#>
PREFIX wd: <http://www.wikidata.org/entity/>
PREFIX wdt: <http://www.wikidata.org/prop/direct/>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX schema: <http://schema.org/>

INSERT {
  wd:Q42 ?p1 ?o1 .
  ?o1 rdfs:label ?o1Label .
  ?o1 ?p2 ?o2 .
  ?o2 rdfs:label ?o2Label .
} WHERE {
  SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> {
    SELECT ?p1 ?o1Label ?o1 ?p2 ?o2Label ?o2 WHERE {
        wd:Q42 ?p1 ?o1 .
        ?o1 ?p2 ?o2 .

        FILTER (
           STRSTARTS(STR(?p1), STR(wdt:)) &&
           STRSTARTS(STR(?o1), STR(wd:)) &&
           STRSTARTS(STR(?p2), STR(wdt:)) &&
           STRSTARTS(STR(?o2), STR(wd:))
        )

        SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }

    } LIMIT 500
  }
}

CSV to RDF (Lab 7)

#Imports
import re
from pandas import *
from numpy import nan
from rdflib import Graph, Namespace, URIRef, Literal, RDF, XSD, FOAF
from spotlight import SpotlightException, annotate

SERVER = "https://api.dbpedia-spotlight.org/en/annotate"
# Test around with the confidence, and see how many names changes depending on the confidence.
# However, be aware that anything lower than this (0.83) it will replace James W. McCord and other names that includes James with LeBron James
CONFIDENCE = 0.83 

# This function uses DBpedia Spotlight, which was not a part of the CSV lab this year.  
def annotate_entity(entity, filters={'types': 'DBpedia:Person'}):
	annotations = []
	try:
		annotations = annotate(address=SERVER, text=entity, confidence=CONFIDENCE, filters=filters)
	except SpotlightException as e:
		print(e)
	return annotations

g = Graph()
ex = Namespace("http://example.org/")
g.bind("ex", ex)

#Pandas' read_csv function to load russia-investigation.csv
df = read_csv("russia-investigation.csv")
#Replaces all instances of nan to None type with numpy's nan
df = df.replace(nan, None)

#Function that prepares the values to be added to the graph as a URI (ex infront) or Literal
def prepareValue(row):
	if row == None: #none type
		value = Literal(row)
	elif isinstance(row, str) and re.match(r'\d{4}-\d{2}-\d{2}', row): #date
		value = Literal(row, datatype=XSD.date)
	elif isinstance(row, bool): #boolean value (true / false)
		value = Literal(row, datatype=XSD.boolean)
	elif isinstance(row, int): #integer
		value = Literal(row, datatype=XSD.integer)
	elif isinstance(row, str): #string
		value = URIRef(ex + row.replace('"', '').replace(" ", "_").replace(",","").replace("-", "_"))
	elif isinstance(row, float): #float
		value = Literal(row, datatype=XSD.float)

	return value

#Convert the non-semantic CSV dataset into a semantic RDF 
def csv_to_rdf(df):
	for index, row in df.iterrows():
		id = URIRef(ex + "Investigation_" + str(index))
		investigation = prepareValue(row["investigation"])
		investigation_start = prepareValue(row["investigation-start"])
		investigation_end = prepareValue(row["investigation-end"])
		investigation_days = prepareValue(row["investigation-days"])
		indictment_days = prepareValue(row["indictment-days "])
		cp_date = prepareValue(row["cp-date"])
		cp_days = prepareValue(row["cp-days"])
		overturned = prepareValue(row["overturned"])
		pardoned = prepareValue(row["pardoned"])
		american = prepareValue(row["american"])
		outcome = prepareValue(row["type"])
		name_ex = prepareValue(row["name"])
		president_ex = prepareValue(row["president"])

		#Spotlight Search
		name = annotate_entity(str(row['name']))
		president = annotate_entity(str(row['president']).replace(".", ""))
		
		#Adds the tripples to the graph
		g.add((id, RDF.type, ex.Investigation))
		g.add((id, ex.investigation, investigation))
		g.add((id, ex.investigation_start, investigation_start))
		g.add((id, ex.investigation_end, investigation_end))
		g.add((id, ex.investigation_days, investigation_days))
		g.add((id, ex.indictment_days, indictment_days))
		g.add((id, ex.cp_date, cp_date))
		g.add((id, ex.cp_days, cp_days))
		g.add((id, ex.overturned, overturned))
		g.add((id, ex.pardoned, pardoned))
		g.add((id, ex.american, american))
		g.add((id, ex.outcome, outcome))

		#Spotlight search
		#Name
		try:
			g.add((id, ex.person, URIRef(name[0]["URI"])))
		except:
			g.add((id, ex.person, name_ex))

		#President
		try:
			g.add((id, ex.president, URIRef(president[0]["URI"])))
		except:
			g.add((id, ex.president, president_ex))

csv_to_rdf(df)
print(g.serialize())
g.serialize("lab7.ttl", format="ttl")

JSON-LD (Lab 8)

Task 1) Basic JSON-LD

{
    "@context": {
        "@base": "http://example.org/",
        "edges": "http://example.org/triple",
        "start": "http://example.org/source",
        "rel": "http://exaxmple.org/predicate",
        "end": "http://example.org/object",
        "Person" : "http://example.org/Person",
        "birthday" : {
            "@id" : "http://example.org/birthday",
            "@type" : "xsd:date"
        },
        "nameEng" : {
            "@id" : "http://example.org/en/name",
            "@language" : "en"
        },
        "nameFr" : {
            "@id" : "http://example.org/fr/name",
            "@language" : "fr"
        },
        "nameCh" : {
            "@id" : "http://example.org/ch/name",
            "@language" : "ch"
        },
        "age" : {
            "@id" : "http://example.org/age",
            "@type" : "xsd:int"
        },
        "likes" : "http://example.org/games/likes",
        "haircolor" : "http://example.org/games/haircolor"
    },
    "@graph": [
        {
            "@id": "people/Jeremy",
            "@type": "Person",
            "birthday" : "1987.1.1",
            "nameEng" : "Jeremy",
            "age" : 26
        },
        {
            "@id": "people/Tom",
            "@type": "Person"
        },
        {
            "@id": "people/Ju",
            "@type": "Person",
            "birthday" : "2001.1.1",
            "nameCh" : "Ju",
            "age" : 22,
            "likes" : "bastketball"
        },
        {
            "@id": "people/Louis",
            "@type": "Person",
            "birthday" : "1978.1.1",
            "haircolor" : "Black",
            "nameFr" : "Louis",
            "age" : 45
        },
        {"edges" : [
        {
            "start" : "people/Jeremy",
            "rel" : "knows",
            "end" : "people/Tom"
        },
        {
            "start" : "people/Tom",
            "rel" : "knows",
            "end" : "people/Louis"
        },
        {
            "start" : "people/Louis",
            "rel" : "teaches",
            "end" : "people/Ju"
        },
        {
            "start" : "people/Ju",
            "rel" : "plays",
            "end" : "people/Jeremy"
        },
        {
            "start" : "people/Ju",
            "rel" : "plays",
            "end" : "people/Tom"
        }
        ]}
    ]
}

Task 2 & 3) Retrieving JSON-LD from ConceptNet / Programming JSON-LD in Python

import rdflib

CN_BASE = 'http://api.conceptnet.io/c/en/'

g = rdflib.Graph()
g.parse(CN_BASE+'indictment', format='json-ld')

# To download JSON object:

import json
import requests

json_obj = requests.get(CN_BASE+'indictment').json()

# To change the @context:

context = {
     "@base": "http://ex.org/",
     "edges": "http://ex.org/triple/",
     "start": "http://ex.org/s/",
     "rel": "http://ex.org/p/",
     "end": "http://ex.org/o/",
     "label": "http://ex.org/label"
}
json_obj['@context'] = context
json_str = json.dumps(json_obj)

g = rdflib.Graph()
g.parse(data=json_str, format='json-ld')

# To extract triples (here with labels):

r = g.query("""
         SELECT ?s ?sLabel ?p ?o ?oLabel WHERE {
             ?edge
                 <http://ex.org/s/> ?s ;
                 <http://ex.org/p/> ?p ;
                 <http://ex.org/o/> ?o .
             ?s <http://ex.org/label> ?sLabel .
             ?o <http://ex.org/label> ?oLabel .
}
         """, initNs={'cn': CN_BASE})
print(r.serialize(format='txt').decode())

# Construct a new graph:

r = g.query("""
         CONSTRUCT {
             ?s ?p ?o .
             ?s <http://ex.org/label> ?sLabel .
             ?o <http://ex.org/label> ?oLabel .
         } WHERE {
             ?edge <http://ex.org/s/> ?s ;
                   <http://ex.org/p/> ?p ;
                   <http://ex.org/o/> ?o .
             ?s <http://ex.org/label> ?sLabel .
             ?o <http://ex.org/label> ?oLabel .
}
         """, initNs={'cn': CN_BASE})

print(r.graph.serialize(format='ttl'))

SHACL (Lab 9)

from pyshacl import validate
from rdflib import Graph

data_graph = Graph()
# parses the Turtle example from the task
data_graph.parse("data_graph.ttl")

prefixes = """
@prefix ex: <http://example.org/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix sh: <http://www.w3.org/ns/shacl#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
"""

shape_graph = """
ex:PUI_Shape
    a sh:NodeShape ;
    sh:targetClass ex:PersonUnderInvestigation ;
    sh:property [
        sh:path foaf:name ;
        sh:minCount 1 ; #Every person under investigation has exactly one name. 
        sh:maxCount 1 ; #Every person under investigation has exactly one name.
        sh:datatype rdf:langString ; #All person names must be language-tagged
    ] ;
    sh:property [
        sh:path ex:chargedWith ;
        sh:nodeKind sh:IRI ; #The object of a charged with property must be a URI.
        sh:class ex:Offense ; #The object of a charged with property must be an offense.
    ] .

# --- If you have more time tasks ---
ex:User_Shape rdf:type sh:NodeShape;
    sh:targetClass ex:Indictment;
    # The only allowed values for ex:american are true, false or unknown.
    sh:property [
        sh:path ex:american;
        sh:pattern "(true|false|unknown)" ;
    ];
    
    # The value of a property that counts days must be an integer.
    sh:property [
        sh:path ex:indictment_days;
        sh:datatype xsd:integer;
    ];   
    sh:property [
        sh:path ex:investigation_days;
        sh:datatype xsd:integer;
    ];
    
    # The value of a property that indicates a start date must be xsd:date.
    sh:property [
        sh:path ex:investigation_start;
        sh:datatype xsd:date;
    ];

    # The value of a property that indicates an end date must be xsd:date or unknown (tip: you can use sh:or (...) ).
    sh:property [
        sh:path ex:investigation_end;
        sh:or (
         [ sh:datatype xsd:date ]
         [ sh:hasValue "unknown" ]
    )];
    
    # Every indictment must have exactly one FOAF name for the investigated person.
    sh:property [
        sh:path foaf:name;
        sh:minCount 1;
        sh:maxCount 1;
    ];
    
    # Every indictment must have exactly one investigated person property, and that person must have the type ex:PersonUnderInvestigation.
    sh:property [
        sh:path ex:investigatedPerson ;
        sh:minCount 1 ;
        sh:maxCount 1 ;
        sh:class ex:PersonUnderInvestigation ;
        sh:nodeKind sh:IRI ;
    ] ;

    # No URI-s can contain hyphens ('-').
    sh:property [
        sh:path ex:outcome ;
        sh:nodeKind sh:IRI ;
        sh:pattern "^[^-]*$" ;
    ] ;

    # Presidents must be identified with URIs.
    sh:property [
        sh:path ex:president ;
        sh:minCount 1 ;
        sh:class ex:President ;
        sh:nodeKind sh:IRI ;
    ] .
"""

shacl_graph = Graph()
# parses the contents of a shape_graph you made in the previous task
shacl_graph.parse(data=prefixes+shape_graph)

# uses pySHACL's validate method to apply the shape_graph constraints to the data_graph
results = validate(
    data_graph,
    shacl_graph=shacl_graph,
    inference='both'
)

# prints out the validation result
boolean_value, results_graph, results_text = results

# print(boolean_value)
print(results_graph.serialize(format='ttl'))
# print(results_text)

#Write a SPARQL query to print out each distinct sh:resultMessage in the results_graph
distinct_messages = """
PREFIX sh: <http://www.w3.org/ns/shacl#> 

SELECT DISTINCT ?message WHERE {
    [] sh:result / sh:resultMessage ?message .
}
"""
messages = results_graph.query(distinct_messages)
for row in messages:
    print(row.message)

#each sh:resultMessage in the results_graph once, along with the number of times that message has been repeated in the results
count_messages = """
PREFIX sh: <http://www.w3.org/ns/shacl#> 

SELECT ?message (COUNT(?node) AS ?num_messages) WHERE {
    [] sh:result ?result .
    ?result sh:resultMessage ?message ;
            sh:focusNode ?node .
}
GROUP BY ?message
ORDER BY DESC(?count) ?message
"""

messages = results_graph.query(count_messages)
for row in messages:
    print("COUNT    MESSAGE")
    print(row.num_messages, "      ", row.message)

RDFS (Lab 10)

import owlrl
from rdflib import Graph, RDF, Namespace, Literal, XSD, FOAF, RDFS
from rdflib.collection import Collection

g = Graph()
ex = Namespace('http://example.org/')

g.bind("ex", ex)
g.bind("foaf", FOAF)


NS = {
    'ex': ex,
    'rdf': RDF,
    'rdfs': RDFS,
    'foaf': FOAF,
}

#Write a small function that computes the RDFS closure on your graph.
def flush():
    engine = owlrl.RDFSClosure.RDFS_Semantics(g, False, False, False)
    engine.closure()
    engine.flush_stored_triples()

#Rick Gates was charged with money laundering and tax evasion.
g.add((ex.Rick_Gates, ex.chargedWith, ex.MoneyLaundering))
g.add((ex.Rick_Gates, ex.chargedWith, ex.TaxEvasion))

#When one thing that is charged with another thing,
g.add((ex.chargedWith, RDFS.domain, ex.PersonUnderInvestigation))  #the first thing (subject) is a person under investigation and
g.add((ex.chargedWith, RDFS.range, ex.Offense))  #the second thing (object) is an offense.

#Write a SPARQL query that checks the RDF type(s) of Rick Gates and money laundering in your RDF graph.
print(g.query('ASK {ex:Rick_Gates rdf:type ex:PersonUnderInvestigation}', initNs=NS).askAnswer)
print(g.query('ASK {ex:MoneyLaundering rdf:type ex:Offense}', initNs=NS).askAnswer)
flush()
print(g.query('ASK {ex:Rick_Gates rdf:type ex:PersonUnderInvestigation}', initNs=NS).askAnswer)
print(g.query('ASK {ex:MoneyLaundering rdf:type ex:Offense}', initNs=NS).askAnswer)

#A person under investigation is a FOAF person
g.add((ex.PersonUnderInvestigation, RDFS.subClassOf, FOAF.Person))
print(g.query('ASK {ex:Rick_Gates rdf:type foaf:Person}', initNs=NS).askAnswer)
flush()
print(g.query('ASK {ex:Rick_Gates rdf:type foaf:Person}', initNs=NS).askAnswer)

#Paul Manafort was convicted for tax evasion.
g.add((ex.Paul_Manafort, ex.convictedFor, ex.TaxEvasion))
#the first thing is also charged with the second thing
g.add((ex.convictedFor, RDFS.subPropertyOf, ex.chargedWith)) 
flush()
print(g.query('ASK {ex:Paul_Manafort ex:chargedWith ex:TaxEvasion}', initNs=NS).askAnswer)

print(g.serialize())

OWL 1 (Lab 11)

from rdflib import Graph, RDFS, Namespace, RDF, FOAF, BNode, OWL, URIRef, Literal, XSD
from rdflib.collection import Collection
import owlrl

g = Graph()
ex = Namespace('http://example.org/')
schema = Namespace('http://schema.org/')
dbr = Namespace('https://dbpedia.org/page/')

g.bind("ex", ex)
# g.bind("schema", schema)
g.bind("foaf", FOAF)

# Donald Trump and Robert Mueller are two different persons.
g.add((ex.Donald_Trump, OWL.differentFrom, ex.Robert_Mueller))

# Actually, all the names mentioned in connection with the Mueller investigation refer to different people.
b1 = BNode()
b2 = BNode()
Collection(g, b2, [ex.Robert_Mueller, ex.Paul_Manafort, ex.Rick_Gates, ex.George_Papadopoulos, ex.Michael_Flynn, ex.Michael_Cohen, ex.Roger_Stone, ex.Donald_Trump])
g.add((b1, RDF.type, OWL.AllDifferent))
g.add((b1, OWL.distinctMembers, b2))

# All these people are foaf:Persons as well as schema:Persons
g.add((FOAF.Person, OWL.equivalentClass, schema.Person))

# Tax evation is a kind of bank and tax fraud.
g.add((ex.TaxEvation, RDFS.subClassOf, ex.BankFraud))
g.add((ex.TaxEvation, RDFS.subClassOf, ex.TaxFraud))

# The Donald Trump involved in the Mueller investigation is dbpedia:Donald_Trump and not dbpedia:Donald_Trump_Jr.
g.add((ex.Donald_Trump, OWL.sameAs, dbr.Donald_Trump))
g.add((ex.Donald_Trump, OWL.differentFrom, URIRef(dbr + "Donald_Trump_Jr.")))

# Congress, FBI and the Mueller investigation are foaf:Organizations.
g.add((ex.Congress, RDF.type, FOAF.Organization))
g.add((ex.FBI, RDF.type, FOAF.Organization))
g.add((ex.Mueller_Investigation, RDF.type, FOAF.Organization))

# Nothing can be both a person and an organization.
g.add((FOAF.Person, OWL.disjointWith, FOAF.Organization))

# Leading an organization is a way of being involved in an organization.
g.add((ex.leading, RDFS.subPropertyOf, ex.involved))

# Being a campaign manager or an advisor for is a way of supporting someone.
g.add((ex.campaignManagerTo, RDFS.subPropertyOf, ex.supports))
g.add((ex.advisorTo, RDFS.subPropertyOf, ex.supports))

# Donald Trump is a politician and a Republican.
g.add((ex.Donald_Trump, RDF.type, ex.Politician))
g.add((ex.Donald_Trump, RDF.type, ex.Republican))

# A Republican politician is both a politician and a Republican.
g.add((ex.RepublicanPolitician, RDFS.subClassOf, ex.Politician))
g.add((ex.RepublicanPolitician, RDFS.subClassOf, ex.Republican))

#hasBusinessPartner
g.add((ex.Paul_Manafort, ex.hasBusinessPartner, ex.Rick_Gates))
g.add((ex.hasBusinessPartner, RDF.type, OWL.SymmetricProperty))
g.add((ex.hasBusinessPartner, RDF.type, OWL.IrreflexiveProperty))

#adviserTo
g.add((ex.Michael_Flynn, ex.adviserTo, ex.Donald_Trump))
g.add((ex.adviserTo, RDF.type, OWL.IrreflexiveProperty))
# Not necessarily asymmetric as it's not a given that they couldn't be advisors to each other  

#wasLyingTo
g.add((ex.Rick_Gates_Lying, ex.wasLyingTo, ex.FBI))
g.add((ex.wasLyingTo, RDF.type, OWL.IrreflexiveProperty))
# Not asymmetric as the subject and object could lie to each other; also in this context, the FBI can lie to you

#presidentOf
g.add((ex.Donald_Trump, ex.presidentOf, ex.USA))
g.add((ex.presidentOf, RDF.type, OWL.AsymmetricProperty))
g.add((ex.presidentOf, RDF.type, OWL.IrreflexiveProperty))
g.add((ex.presidentOf, RDF.type, OWL.FunctionalProperty)) #can only be president of one country
#not inversefunctionalproperty as Bosnia has 3 presidents https://www.culturalworld.org/do-any-countries-have-more-than-one-president.htm

#hasPresident
g.add((ex.USA, ex.hasPresident, ex.Donald_Trump))
g.add((ex.hasPresident, RDF.type, OWL.AsymmetricProperty))
g.add((ex.hasPresident, RDF.type, OWL.IrreflexiveProperty))
g.add((ex.hasPresident, RDF.type, OWL.InverseFunctionalProperty)) #countries do not share their president with another
#not functionalproperty as a country (Bosnia) can have more than one president

#Closure
owlrl.DeductiveClosure(owlrl.OWLRL_Semantics).expand(g)

#Serialization
print(g.serialize(format="ttl"))
# g.serialize("lab8.xml", format="xml") #serializes to XML file

OWL 2 (Lab 12)

@prefix : <http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#> .
@prefix dc: <http://purl.org/dc/terms#> .
@prefix io: <http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#> .
@prefix dbr: <http://dbpedia.org/resource/> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix xml: <http://www.w3.org/XML/1998/namespace> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix prov: <http://www.w3.org/ns/prov#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@base <http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#> .

<http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology> rdf:type owl:Ontology .

#################################################################
#    Object Properties
#################################################################

###  http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#indictedIn
io:indictedIn rdf:type owl:ObjectProperty ;
              rdfs:subPropertyOf io:involvedIn ;
              rdfs:domain io:InvestigatedPerson ;
              rdfs:range io:Investigation .


###  http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#investigating
io:investigating rdf:type owl:ObjectProperty ;
                 rdfs:subPropertyOf io:involvedIn ;
                 rdfs:domain io:Investigator ;
                 rdfs:range io:Investigation .


###  http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#involvedIn
io:involvedIn rdf:type owl:ObjectProperty ;
              rdfs:domain foaf:Person ;
              rdfs:range io:Investigation .


###  http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#leading
io:leading rdf:type owl:ObjectProperty ;
           rdfs:subPropertyOf io:investigating ;
           rdfs:domain io:InvestigationLeader ;
           rdfs:range io:Investigation .


#################################################################
#    Data properties
#################################################################

###  http://purl.org/dc/elements/1.1/description
<http://purl.org/dc/elements/1.1/description> rdf:type owl:DatatypeProperty ;
                                              rdfs:domain io:Investigation ;
                                              rdfs:range xsd:string .


###  http://www.w3.org/ns/prov#endedAtTime
prov:endedAtTime rdf:type owl:DatatypeProperty ,
                          owl:FunctionalProperty ;
                 rdfs:domain io:Investigation ;
                 rdfs:range xsd:dateTime .


###  http://www.w3.org/ns/prov#startedAtTime
prov:startedAtTime rdf:type owl:DatatypeProperty ,
                            owl:FunctionalProperty ;
                   rdfs:domain io:Investigation ;
                   rdfs:range xsd:dateTime .


###  http://xmlns.com/foaf/0.1/name
foaf:name rdf:type owl:DatatypeProperty ;
          rdfs:domain foaf:Person ;
          rdfs:range xsd:string .


###  http://xmlns.com/foaf/0.1/title
foaf:title rdf:type owl:DatatypeProperty ;
           rdfs:domain io:Investigation ;
           rdfs:range xsd:string .


#################################################################
#    Classes
#################################################################

###  http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#InvestigatedPerson
io:InvestigatedPerson rdf:type owl:Class ;
                      rdfs:subClassOf io:Person ;
                      owl:disjointWith io:Investigator .


###  http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#Investigation
io:Investigation rdf:type owl:Class .


###  http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#InvestigationLeader
io:InvestigationLeader rdf:type owl:Class ;
                       rdfs:subClassOf io:Investigator .


###  http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#Investigator
io:Investigator rdf:type owl:Class ;
                rdfs:subClassOf io:Person .


###  http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#Person
io:Person rdf:type owl:Class ;
          rdfs:subClassOf foaf:Person .


###  http://xmlns.com/foaf/0.1/Person
foaf:Person rdf:type owl:Class .


#################################################################
#    Individuals
#################################################################

###  http://dbpedia.org/resource/Donald_Trump
dbr:Donald_Trump rdf:type owl:NamedIndividual ;
                 foaf:name "Donald Trump" .


###  http://dbpedia.org/resource/Elizabeth_Prelogar
dbr:Elizabeth_Prelogar rdf:type owl:NamedIndividual ;
                       io:investigating <http://dbpedia.org/resource/Special_Counsel_investigation_(20172019)> ;
                       foaf:name "Elizabeth Prelogar" .


###  http://dbpedia.org/resource/Michael_Flynn
dbr:Michael_Flynn rdf:type owl:NamedIndividual ;
                  foaf:name "Michael Flynn" .


###  http://dbpedia.org/resource/Paul_Manafort
dbr:Paul_Manafort rdf:type owl:NamedIndividual ;
                  io:indictedIn <http://dbpedia.org/resource/Special_Counsel_investigation_(20172019)> ;
                  foaf:name "Paul Manafort" .


###  http://dbpedia.org/resource/Robert_Mueller
dbr:Robert_Mueller rdf:type owl:NamedIndividual ;
                   io:leading <http://dbpedia.org/resource/Special_Counsel_investigation_(20172019)> ;
                   foaf:name "Robert Mueller" .


###  http://dbpedia.org/resource/Roger_Stone
dbr:Roger_Stone rdf:type owl:NamedIndividual ;
                foaf:name "Roger Stone" .


###  http://dbpedia.org/resource/Special_Counsel_investigation_(2017–2019)
<http://dbpedia.org/resource/Special_Counsel_investigation_(20172019)> rdf:type owl:NamedIndividual ;
                                                                        foaf:title "Mueller Investigation" .


#################################################################
#    General axioms
#################################################################

[ rdf:type owl:AllDifferent ;
  owl:distinctMembers ( dbr:Donald_Trump
                        dbr:Elizabeth_Prelogar
                        dbr:Michael_Flynn
                        dbr:Paul_Manafort
                        dbr:Robert_Mueller
                        dbr:Roger_Stone
                      )
] .


###  Generated by the OWL API (version 4.5.25.2023-02-15T19:15:49Z) https://github.com/owlcs/owlapi

Using Graph Embeddings (Lab 13)

https://colab.research.google.com/drive/1WkRJUeUBVF5yVv7o0pOKfsd4pqG6369k