Lab Solutions: Difference between revisions

From info216
No edit summary
 
(65 intermediate revisions by 5 users not shown)
Line 1: Line 1:
This page will be updated with Python examples related to the lectures and labs. We will add more examples after each lab has ended. The first examples will use Python's RDFlib. We will introduce other relevant libraries later.
Here we will present suggested solutions after each lab. ''The page will be updated as the course progresses''
 
=[[/info216.wiki.uib.no/Lab: Getting started with VSCode, Python and RDFlib|1 Lab: Getting started with VSCode, Python and RDFlib]] =
=Example lab solutions=
 
==Getting started==
 
<syntaxhighlight>
<syntaxhighlight>


from rdflib.collection import Collection
from rdflib import Graph, Namespace
from rdflib import Graph, Namespace, Literal, URIRef
from rdflib.namespace import RDF, FOAF, XSD
 
g = Graph()
EX = Namespace('http://EXample.org/')
RL = Namespace('http://purl.org/vocab/relationship/')
DBO = Namespace('https://dbpedia.org/ontology/')
DBR = Namespace('https://dbpedia.org/page/')
 
g.namespace_manager.bind('exampleURI', EX)
g.namespace_manager.bind('relationship', RL)
g.namespace_manager.bind('dbpediaOntology', DBO)
g.namespace_manager.bind('dbpediaPage', DBR)
 
g.add((EX.Cade, RDF.type, FOAF.Person))
g.add((EX.Mary, RDF.type, FOAF.Person))
g.add((EX.Cade, RL.spouseOf, EX.Mary)) # a symmetrical relation from an established namespace
g.add((DBR.France, DBO.capital, DBR.Paris))
g.add((EX.Cade, FOAF.age, Literal(27)))
g.add((EX.Mary, FOAF.age, Literal('26', datatype=XSD.int)))
Collection (g, EX.MaryInterests, [EX.hiking, EX.choclate, EX.biology])
g.add((EX.Mary, EX.hasIntrest, EX.MaryInterests))
g.add((EX.Mary, RDF.type, EX.student))
g.add((DBO.capital, EX.range, EX.city))
g.add((EX.Mary, RDF.type, EX.kind))
g.add((EX.Cade, RDF.type, EX.kindPerson))
 
#hobbies = ['hiking', 'choclate', 'biology']
#for i in hobbies:
#    g.add((EX.Mary, FOAF.interest, EX[i]))
 
print(g.serialize(format="turtle"))
</syntaxhighlight>
 
==RDFlib==
<syntaxhighlight>
 
from rdflib.namespace import RDF, XSD, FOAF
from rdflib import Graph, Namespace, Literal, BNode
from rdflib.collection import Collection
 


g = Graph()
ex = Namespace('http://example.org/')
ex = Namespace('http://example.org/')
schema = Namespace("https://schema.org/")
dbp = Namespace("https://dbpedia.org/resource/")
g.bind("ex", ex)
g.bind("dbp", dbp)
g.bind("schema", schema)
address = BNode()
degree = BNode()
# from lab 1
g.add((ex.Cade, FOAF.name, Literal("Cade Tracey", datatype=XSD.string)))
g.add((ex.Mary, FOAF.name, Literal("Mary", datatype=XSD.string)))
g.add((ex.Cade, RDF.type, FOAF.Person))
g.add((ex.Mary, RDF.type, FOAF.Person))
g.add((ex.Mary, RDF.type, ex.Student))
g.add((ex.Cade, ex.married, ex.Mary))
g.add((ex.Cade, FOAF.age, Literal('27', datatype=XSD.int)))
g.add((ex.Mary, FOAF.age, Literal('26', datatype=XSD.int)))
g.add((ex.Paris, RDF.type, ex.City))
g.add((ex.France, ex.Capital, ex.Paris))
g.add((ex.Mary, FOAF.interest, ex.hiking))
g.add((ex.Mary, FOAF.interest, ex.Chocolate))
g.add((ex.Mary, FOAF.interest, ex.biology))
g.add((ex.France, ex.City, ex.Paris))
g.add((ex.Mary, ex.Characterostic, ex.kind))
g.add((ex.Cade, ex.Characterostic, ex.kind))
g.add((ex.France, RDF.type, ex.Country))
g.add((ex.Cade, schema.address, address))
# BNode address
g.add((address, RDF.type, schema.PostalAdress))
g.add((address, schema.streetAddress, Literal('1516 Henry Street')))
g.add((address, schema.addresCity, dbp.Berkeley))
g.add((address, schema.addressRegion, dbp.California))
g.add((address, schema.postalCode, Literal('94709')))
g.add((address, schema.addressCountry, dbp.United_States))
# More info about Cade
g.add((ex.Cade, ex.Degree, degree))
g.add((degree, ex.Field, dbp.Biology))
g.add((degree, RDF.type, dbp.Bachelors_degree))
g.add((degree, ex.Universety, dbp.University_of_California))
g.add((degree, ex.year, Literal('2001', datatype=XSD.gYear)))
# Emma
emma_degree = BNode()
g.add((ex.Emma, FOAF.name, Literal("Emma Dominguez", datatype=XSD.string)))
g.add((ex.Emma, RDF.type, FOAF.Person))
g.add((ex.Emma, ex.Degree, emma_degree))
g.add((degree, ex.Field, dbp.Chemistry))
g.add((degree, RDF.type, dbp.Masters_degree))
g.add((degree, ex.Universety, dbp.University_of_Valencia))
g.add((degree, ex.year, Literal('2015', datatype=XSD.gYear)))
# Address
emma_address = BNode()
g.add((ex.Emma, schema.address, emma_address))
g.add((emma_address, RDF.type, schema.PostalAdress))
g.add((emma_address, schema.streetAddress,
      Literal('Carrer de la Guardia Civil 20')))
g.add((emma_address, schema.addressRegion, dbp.Valencia))
g.add((emma_address, schema.postalCode, Literal('46020')))
g.add((emma_address, schema.addressCountry, dbp.Spain))
b = BNode()
g.add((ex.Emma, ex.visit, b))
Collection(g, b,
          [dbp.Portugal, dbp.Italy, dbp.France, dbp.Germany, dbp.Denmark, dbp.Sweden])
</syntaxhighlight>
==SPARQL - Blazegraph==
<syntaxhighlight>
PREFIX ex: <http://example.org/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX xml: <http://www.w3.org/XML/1998/namespace>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
#select all triplets in graph
SELECT ?s ?p ?o
WHERE {
    ?s ?p ?o .
}
#select the interestes of Cade
SELECT ?cadeInterest
WHERE {
    ex:Cade ex:interest ?cadeInterest .
}
#select the country and city where Emma lives
SELECT ?emmaCity ?emmaCountry
WHERE {
    ex:Emma ex:address ?address .
  ?address ex:city ?emmaCity .
  ?address ex:country ?emmaCountry .
}
#select the people who are over 26 years old
SELECT ?person ?age
WHERE {
    ?person ex:age ?age .
  FILTER(?age > 26) .   
}
#select people who graduated with Bachelor
SELECT ?person ?degree
WHERE {
    ?person ex:degree ?degree .
  ?degree ex:degreeLevel "Bachelor" .
         
}
# delete cades photography interest
DELETE DATA
{
    ex:Cade ex:interest ex:Photography .
}
# delete and insert university of valencia
DELETE { ?s ?p ex:University_of_Valencia }
INSERT { ?s ?p ex:Universidad_de_Valencia }
WHERE  { ?s ?p ex:University_of_Valencia }
#check if the deletion worked
SELECT ?s ?o2
WHERE  {
  ?s ex:degree ?o .
  ?o ex:degreeSource ?o2 .
      }
#describe sergio
DESCRIBE ex:Sergio ?o
WHERE {
  ex:Sergio ?p ?o .
  ?o ?p2 ?o2 .
  }
</syntaxhighlight>
==SPARQL - RDFlib==
<syntaxhighlight>
from SPARQLWrapper import SPARQLWrapper, JSON, POST, GET, TURTLE
namespace = "lab4"
sparql = SPARQLWrapper("http://10.111.21.183:9999/blazegraph/namespace/"+ namespace + "/sparql")
# Print out Cades interests
sparql.setQuery("""
    PREFIX ex: <http://example.org/>
    SELECT * WHERE {
    ex:Cade ex:interest ?interest.
    }
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
    print(result["interest"]["value"])
# Print Emmas city and country
sparql.setQuery("""
    PREFIX ex: <http://example.org/>
    SELECT ?emmaCity ?emmaCountry
    WHERE {
        ex:Emma ex:address ?address .
        ?address ex:city ?emmaCity .
        ?address ex:country ?emmaCountry .
        }
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
    print("Emma's city is "+result["emmaCity"]["value"]+" and Emma's country is " + result["emmaCountry"]["value"])
#Select the people who are over 26 years old
sparql.setQuery("""
    PREFIX ex: <http://example.org/>
    SELECT ?person ?age
    WHERE {
        ?person ex:age ?age .
        FILTER(?age > 26) . 
        }
        """)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
    print("All people who are over 26 years old: "+result["person"]["value"])
#Select people who graduated with Bachelor
sparql.setQuery("""
    PREFIX ex: <http://example.org/>
    SELECT ?person ?degree
    WHERE {
        ?person ex:degree ?degree .
        ?degree ex:degreeLevel "Bachelor" .
        }
        """)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
    print("People who graduated with Bachelor: "+result["person"]["value"])
#Delete cades photography interest
sparql.setQuery("""
    PREFIX ex: <http://example.org/>
    DELETE DATA {
        ex:Cade ex:interest ex:Photography .
        }
        """)
sparql.setMethod(POST)
results = sparql.query()
print(results.response.read())
# Print out Cades interests again
sparql.setQuery("""
    PREFIX ex: <http://example.org/>
    SELECT * WHERE {
    ex:Cade ex:interest ?interest.
    }
""")
sparql.setReturnFormat(JSON)
sparql.setMethod(GET)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
    print(result["interest"]["value"])
# Check university names
sparql.setQuery("""
    PREFIX ex: <http://example.org/>
    SELECT ?s ?o2
    WHERE  {
        ?s ex:degree ?o .
        ?o ex:degreeSource ?o2 .
      }
    """)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
    print(result["o2"]["value"])
#Delete and insert university of valencia
sparql.setQuery("""
    PREFIX ex: <http://example.org/>
    DELETE { ?s ?p ex:University_of_Valencia }
    INSERT { ?s ?p ex:Universidad_de_Valencia }
    WHERE  { ?s ?p ex:University_of_Valencia }
        """)
sparql.setMethod(POST)
results = sparql.query()
print(results.response.read())
# Check university names again
sparql.setQuery("""
    PREFIX ex: <http://example.org/>
    SELECT ?s ?o2
    WHERE  {
        ?s ex:degree ?o .
        ?o ex:degreeSource ?o2 .
      }
    """)
sparql.setReturnFormat(JSON)
sparql.setMethod(GET)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
    print(result["o2"]["value"])
#Insert Sergio
sparql.setQuery("""
    PREFIX ex: <http://example.org/>
    PREFIX foaf: <http://xmlns.com/foaf/0.1/>
    INSERT DATA {
        ex:Sergio a foaf:Person ;
        ex:address [ a ex:Address ;
                ex:city ex:Valenciay ;
                ex:country ex:Spain ;
                ex:postalCode "46021"^^xsd:string ;
                ex:state ex:California ;
                ex:street "4_Carrer_del_Serpis"^^xsd:string ] ;
        ex:degree [ ex:degreeField ex:Computer_science ;
                ex:degreeLevel "Master"^^xsd:string ;
                ex:degreeSource ex:University_of_Valencia ;
                ex:year "2008"^^xsd:gYear ] ;
        ex:expertise ex:Big_data,
            ex:Semantic_technologies,
            ex:Machine_learning;
        foaf:name "Sergio_Pastor"^^xsd:string .
        }
    """)
sparql.setMethod(POST)
results = sparql.query()
print(results.response.read())
sparql.setMethod(GET)
# Describe Sergio
sparql.setReturnFormat(TURTLE)
sparql.setQuery("""
    PREFIX ex: <http://example.org/>
    DESCRIBE ex:Sergio ?o
    WHERE {
        ex:Sergio ?p ?o .
        ?o ?p2 ?o2 .
    }
    """)
results = sparql.query().convert()
print(results.serialize(format='turtle'))
# Construct that any city is in the country in an address
sparql.setQuery("""
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
    PREFIX ex: <http://example.org/>
    CONSTRUCT {?city ex:locatedIn ?country}
    Where {
        ?s rdf:type ex:Address .
        ?s ex:city ?city .
        ?s ex:country ?country.
        }
    """)
sparql.setReturnFormat(TURTLE)
results = sparql.query().convert()
print(results.serialize(format='turtle'))
</syntaxhighlight>
==Web APIs and JSON-LD==
<syntaxhighlight>
import requests
from rdflib import FOAF, Namespace, Literal, RDF, Graph, TURTLE
r = requests.get('http://api.open-notify.org/astros.json').json()
g = Graph()
EX = Namespace('http://EXample.org/')
g.bind("ex", EX)
for item in r['people']:
    craft = item['craft'].replace(" ","_")
    person = item['name'].replace(" ","_")
    g.add((EX[person], EX.onCraft, EX[craft]))
    g.add((EX[person], RDF.type, FOAF.Person))
    g.add((EX[person], FOAF.name, Literal(item['name'])))
    g.add((EX[craft], FOAF.name, Literal(item['craft'])))
res = g.query("""
    CONSTRUCT {?person1 foaf:knows ?person2}
    WHERE {
        ?person1 ex:onCraft ?craft .
        ?person2 ex:onCraft ?craft .
        }
""")
for triplet in res:
    # (we don't need to add that they know themselves)
    if (triplet[0] != triplet[2]):
        g.add((triplet))
       
print(g.serialize(format="turtle"))
</syntaxhighlight>
==Semantic lifting - CSV==
<syntaxhighlight>
import pandas as pd
from rdflib import Graph, Namespace, URIRef, Literal
from rdflib.namespace import RDF, XSD
import spotlight
from spotlight import SpotlightException
# Parameter given to spotlight to filter out results with confidence lower than this value
CONFIDENCE = 0.5
SERVER = "https://api.dbpedia-spotlight.org/en/annotate"
def annotate_entity(entity):
annotations = []
try:
annotations = spotlight.annotate(address=SERVER,text=entity, confidence=CONFIDENCE)
    # This catches errors thrown from Spotlight, including when no resource is found in DBpedia
except SpotlightException as e:
print(e)
return annotations
ex = Namespace("http://example.org/")
dbr = Namespace("http://dbpedia.org/resource/")
dbp = Namespace("https://dbpedia.org/property/")
dbpage = Namespace("https://dbpedia.org/page/")
sem = Namespace("http://semanticweb.cs.vu.nl/2009/11/sem/")
tl = Namespace("http://purl.org/NET/c4dm/timeline.owl#")


g = Graph()
g = Graph()
g.bind("ex", ex)
g.bind("dbr", dbr)
g.bind("dbp", dbp)
g.bind("dbpage", dbpage)
g.bind("sem", sem)
g.bind("tl", tl)
df = pd.read_csv("russia-investigations.csv")
# We need to correct the type of the columns in the DataFrame, as Pandas assigns an incorrect type when it reads the file (for me at least). We use .astype("str") to convert the content of the columns to a string.
df["name"] = df["name"].astype("str")
df["type"] = df["type"].astype("str")
# iterrows creates an iterable object (list of rows)
for index, row in df.iterrows():
investigation = URIRef(ex + row['investigation'])
investigation_spotlight = annotate_entity(row['investigation'])
investigation_start = Literal(row['investigation-start'], datatype=XSD.date)
investigation_end = Literal(row['investigation-end'], datatype=XSD.date)
investigation_days = Literal(row['investigation-days'], datatype=XSD.integer)
name = Literal(row['name'], datatype=XSD.string)
name_underscore = URIRef(dbpage + row['name'].replace(" ", "_"))
investigation_result = URIRef(
ex + row['investigation'] + "_investigation_" + row['name'].replace(" ", "_"))
indictment_days = Literal(row['indictment-days'], datatype=XSD.integer)
type = URIRef(dbr + row['type'].replace(" ", "_"))
cp_date = Literal(row['cp-date'], datatype=XSD.date)
cp_days = Literal(row['cp-days'], datatype=XSD.duration)
overturned = Literal(row['overturned'], datatype=XSD.boolean)
pardoned = Literal(row['pardoned'], datatype=XSD.boolean)
american = Literal(row['american'], datatype=XSD.boolean)
president = Literal(row['president'], datatype=XSD.string)
president_underscore = URIRef(dbr + row['president'].replace(" ", "_"))
president_spotlight = annotate_entity(row['president'])
try:
g.add((( URIRef(investigation_spotlight[0]["URI"]), RDF.type, sem.Event)))
except:
g.add((investigation, RDF.type, sem.Event))
try:
g.add((( URIRef(investigation_spotlight[0]["URI"]), sem.hasBeginTimeStamp, investigation_start)))
except:
g.add((investigation, sem.hasBeginTimeStamp, investigation_start))
try:
g.add((( URIRef(investigation_spotlight[0]["URI"]), sem.hasEndTimeStamp, investigation_end)))
except:
g.add((investigation, sem.hasEndTimeStamp, investigation_end))
try:
g.add((URIRef(investigation_spotlight[0]["URI"]), tl.duration, investigation_days))
except:
g.add((investigation, tl.duration, investigation_days))
try:
g.add((URIRef(investigation_spotlight[0]["URI"]), dbp.president, URIRef(president_spotlight[0]["URI"])))
except:
g.add((investigation, dbp.president, dbr.president_underscore))
try:
g.add((URIRef(investigation_spotlight[0]["URI"]), sem.hasSubEvent, investigation_result))
except:
g.add((investigation, sem.hasSubEvent, investigation_result))
g.add((investigation_result, ex.resultType, type))
g.add((investigation_result, ex.objectOfInvestigation, name_underscore))
g.add((investigation_result, ex.isAmerican, american))
g.add((investigation_result, ex.indictmentDuration, indictment_days))
g.add((investigation_result, ex.caseSolved, cp_date))
g.add((investigation_result, ex.daysBeforeCaseSolved, cp_days))
g.add((investigation_result, ex.overturned, overturned))
g.add((investigation_result, ex.pardoned, pardoned))
g.serialize("output.ttl", format="ttl")
</syntaxhighlight>
==RDFS==
<syntaxhighlight>
from rdflib.namespace import RDF, FOAF, XSD, RDFS
from rdflib import OWL, Graph, Namespace, URIRef, Literal, BNode
from rdflib.namespace import RDF, RDFS, XSD, OWL
import owlrl
ex = Namespace("http://example.org/")
dbr = Namespace("http://dbpedia.org/resource/")
dbp = Namespace("https://dbpedia.org/property/")
dbpage = Namespace("https://dbpedia.org/page/")
sem = Namespace("http://semanticweb.cs.vu.nl/2009/11/sem/")
tl = Namespace("http://purl.org/NET/c4dm/timeline.owl#")


g = Graph()
g.bind("ex", ex)
g.bind("ex", ex)
g.bind("dbr", dbr)
g.bind("dbp", dbp)
g.bind("dbpage", dbpage)
g.bind("sem", sem)
g.bind("tl", tl)


g.parse(location="exampleTTL.ttl", format="turtle")
# The Mueller Investigation was lead by Robert Mueller
g.add((ex.MuellerInvestigation, ex.leadBy, ex.RobertMueller))


# University of California and University of Valencia are both Universities.  
# It involved Paul Manafort, Rick Gates, George Papadopoulos, Michael Flynn, Michael Cohen, and Roger Stone.
g.add((ex.University_of_California, RDF.type, ex.University))
g.add((ex.MuellerInvestigation, ex.involved, ex.PaulManafort))
g.add((ex.University_of_Valencia, RDF.type, ex.University))
g.add((ex.MuellerInvestigation, ex.involved, ex.RickGates))
# All universities are higher education institutions (HEIs).
g.add((ex.MuellerInvestigation, ex.involved, ex.GeorgePapadopoulos))
g.add((ex.University, RDFS.subClassOf, ex.Higher_education))
g.add((ex.MuellerInvestigation, ex.involved, ex.MichaelFlynn))
# Only persons can have an expertise, and what they have expertise in is always a subject.
g.add((ex.MuellerInvestigation, ex.involved, ex.MichaelCohen))
g.add((ex.expertise, RDFS.domain, FOAF.Person))
g.add((ex.MuellerInvestigation, ex.involved, ex.RogerStone))
g.add((ex.expertise, RDFS.range, ex.subject))
# Only persons can graduate from a HEI.
g.add((ex.graduatedFromHEI, RDFS.domain, FOAF.Person))
g.add((ex.graduatedFromHEI, RDFS.range, ex.Higher_education))
# If you are a student, you are in fact a person as well.
g.add((ex.Student, RDFS.subClassOf, FOAF.Person))
# That a person is married to someone, means that they know them.
g.add((ex.married, RDFS.subPropertyOf, FOAF.knows))
# Finally, if a person has a name, that name is also the label of that entity."
g.add((FOAF.name, RDFS.subPropertyOf, RDFS.label))


# Having a degree from a HEI means that you have also graduated from that HEI.
# Paul Manafort was business partner of Rick Gates
g.add((ex.graduatedFromHEI, RDFS.subPropertyOf, ex.degree))
g.add((ex.PaulManafort, ex.businessPartner, ex.RickGates))
# That a city is a capital of a country means that this city is located in that country.
g.add((ex.capital, RDFS.domain, ex.Country))
g.add((ex.capital, RDFS.range, ex.City))
g.add((ex.capital, RDFS.subPropertyOf, ex.hasLocation))
# That someone was involved in a meeting, means that they have met the other participants.
    # This question was bad for the RDFS lab because we need complex OWL or easy sparql.
res = g.query("""
    CONSTRUCT {?person1 ex:haveMet ?person2}
    WHERE {
        ?person1 ex:meeting ?Meeting .
        ?Meeting ex:involved ?person2 .
        }
""")
for triplet in res:
    #we don't need to add that people have met themselves
    if (triplet[0] != triplet[2]):
        g.add((triplet))
# If someone partook in a meeting somewhere, means that they have visited that place"
    # This question was bad for the RDFS lab for the same reason.
res = g.query("""
    CONSTRUCT {?person ex:hasVisited ?place}
    WHERE {
        ?person1 ex:meeting ?Meeting .
        ?Meeting ex:location ?place .
        }
""")
for triplet in res:
        g.add((triplet))


rdfs = owlrl.OWLRL.OWLRL_Semantics(g, False, False, False)
# He was campaign chairman for Donald Trump
rdfs.closure()
g.add((ex.PaulManafort, ex.campaignChairman, ex.DonaldTrump))
rdfs.flush_stored_triples()
g.serialize("output.ttl",format="ttl")
</syntaxhighlight>
 
==OWL 1==
<syntaxhighlight>
import owlrl
from rdflib import Graph, Namespace, Literal, URIRef
from rdflib.namespace import RDF, RDFS, XSD, FOAF, OWL
from rdflib.collection import Collection
 
g = Graph()
print()
# Namespaces
ex = Namespace("http://example.org/")
dbp = Namespace("http://dbpedia.org/resource/")
geo = Namespace("http://sws.geonames.org/")
schema = Namespace("https://schema.org/")
akt = Namespace("http://www.aktors.org/ontology/portal#")
vcard = Namespace("http://www.w3.org/2006/vcard/ns#")


g.bind("ex", ex)
# He was charged with money laundering, tax evasion, and foreign lobbying.
g.bind("owl", OWL)
g.add((ex.PaulManafort, ex.chargedWith, ex.MoneyLaundering))
g.add((ex.PaulManafort, ex.chargedWith, ex.TaxEvasion))
g.add((ex.PaulManafort, ex.chargedWith, ex.ForeignLobbying))


g.parse(location="lab8turtle.txt", format="turtle")
# He was convicted for bank and tax fraud.
g.add((ex.PaulManafort, ex.convictedOf, ex.BankFraud))
g.add((ex.PaulManafort, ex.convictedOf, ex.TaxFraud))


# Cade and Emma are two different persons.
# He pleaded guilty to conspiracy.
g.add((ex.Cade, OWL.differentFrom, ex.Emma))
g.add((ex.PaulManafort, ex.pleadGuiltyTo, ex.Conspiracy))
# The country USA above is the same as the DBpedia resource http://dbpedia.org/resource/United_States (dbr:United_States) and the GeoNames resource http://sws.geonames.org/6252001/ (gn:6252001).
g.add((ex.USA, OWL.sameAs, dbp.United_States))
g.add((ex.USA, OWL.sameAs, geo["6252001"]))
# The person class (the RDF type the Cade and Emma resources) in your graph is the same as FOAF's, schema.org's and AKT's person classes
    # (they are http://xmlns.com/foaf/0.1/Person, http://schema.org/Person, and http://www.aktors.org/ontology/portal#Person, respectively.
g.add((FOAF.Person, OWL.sameAs, schema.Person))
g.add((FOAF.Person, OWL.sameAs, akt.Person))
# Nothing can be any two of a person, a university, or a city at the same time.
Collection(g, ex.DisjointClasses, [FOAF.Person, ex.University, ex.City])
g.add((OWL.AllDifferent, OWL.distinctMembers, ex.DisjointClasses))
# The property you have used in your RDF/RDFS graph to represent that 94709 is the US zip code of Berkeley, California in US
    # is a subproperty of VCard's postal code-property (http://www.w3.org/2006/vcard/ns#postal-code).
g.add((ex.postalCode, RDFS.subPropertyOf, vcard["postal-code"]))
# No two US cities can have the same postal code.
    # We have to add a relation from city to postal code first
res = g.query("""
    PREFIX RDF: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
    PREFIX ex: <http://example.org/>
    CONSTRUCT {?usa_city ex:us_city_postal_code ?postalcode}
    WHERE {
        ?address RDF:type ex:Address .
        ?address ex:country ex:USA .
        ?address ex:city ?usa_city .
        ?address ex:postalCode ?postalcode
        }
""")
for triplet in res:
        g.add((triplet))
    # Now we can make us cities have distinct postal codes
g.add((ex.us_city_postal_code, RDF.type, OWL.FunctionalProperty))
g.add((ex.us_city_postal_code, RDF.type, OWL.InverseFunctionalProperty))
g.add((ex.us_city_postal_code, RDFS.subPropertyOf, ex.postalcode))


# The property you have used for Emma living in Valencia is the same property as FOAF's based-near property
# He was sentenced to prison.
    # (http://xmlns.com/foaf/0.1/based_near), and it is the inverse of DBpedia's hometown property (http://dbpedia.org/ontology/hometown, dbo:hometown).  
g.add((ex.PaulManafort, ex.sentencedTo, ex.Prison))
g.add((ex.city, OWL.sameAs, FOAF.based_near))
g.add((ex.city, OWL.inverseOf, dbp.hometown))


g.add((ex.Cade, ex.married, ex.Mary))
# He negotiated a plea agreement.
g.add((ex.Cade, ex.livesWith, ex.Mary))
g.add((ex.PaulManafort, ex.negotiated, ex.PleaAgreement))
g.add((ex.Cade, ex.sibling, ex.Andrew))
g.add((ex.Cade, ex.hasFather, ex.Bob))
g.add((ex.Bob, ex.fatherOf, ex.Cade))


# Rick Gates was charged with money laundering, tax evasion and foreign lobbying.
g.add((ex.RickGates, ex.chargedWith, ex.MoneyLaundering))
g.add((ex.RickGates, ex.chargedWith, ex.TaxEvasion))
g.add((ex.RickGates, ex.chargedWith, ex.ForeignLobbying))


#Look through the predicates(properties) above and add new triples for each one that describes them as any of the following:
# He pleaded guilty to conspiracy and lying to FBI.
    # a reflexive , irreflexive, symmetric, asymmetric, transitive, functional, or an Inverse Functional Property.
g.add((ex.RickGates, ex.pleadGuiltyTo, ex.Conspiracy))
g.add((ex.married, RDF.type, OWL.SymmetricProperty))
g.add((ex.RickGates, ex.pleadGuiltyTo, ex.LyingToFBI))
g.add((ex.married, RDF.type, OWL.FunctionalProperty))
g.add((ex.married, RDF.type, OWL.InverseFunctionalProperty))
g.add((ex.married, RDF.type, OWL.IrreflexiveProperty))


g.add((ex.livesWith, RDF.type, OWL.SymmetricProperty))
# Use the serialize method of rdflib.Graph to write out the model in different formats (on screen or to file)
g.add((ex.livesWith, RDF.type, OWL.ReflexiveProperty))
print(g.serialize(format="ttl")) # To screen
g.add((ex.livesWith, RDF.type, OWL.TransitiveProperty))
#g.serialize("lab1.ttl", format="ttl") # To file


g.add((ex.sibling, RDF.type, OWL.SymmetricProperty))
# Loop through the triples in the model to print out all triples that have pleading guilty as predicate
for subject, object in g[ : ex.pleadGuiltyTo :]:
    print(subject, ex.pleadGuiltyTo, object)


g.add((ex.hasFather, RDF.type, OWL.AsymmetricProperty))
# --- IF you have more time tasks ---
g.add((ex.hasFather, RDF.type, OWL.FunctionalProperty))
g.add((ex.hasFather, RDF.type, OWL.IrreflexiveProperty))


g.add((ex.fatherOf, RDF.type, OWL.AsymmetricProperty))
# Michael Cohen, Michael Flynn and the lying is part of lab 2 and therefore the answer is not provided this week
g.add((ex.fatherOf, RDF.type, OWL.IrreflexiveProperty))
g.add((ex.fatherOf, RDF.type, OWL.InverseFunctionalProperty))
g.add((ex.fatherOf, RDF.type, OWL.IrreflexiveProperty))


# These three lines add inferred triples to the graph.
#Write a method (function) that submits your model for rendering and saves the returned image to file.
owl = owlrl.CombinedClosure.RDFS_OWLRL_Semantics(g, False, False, False)
owl.closure()
owl.flush_stored_triples()
 
g.serialize("lab8output.xml",format="xml")
</syntaxhighlight>
 
==Semantic lifting - XML==
<syntaxhighlight>
from rdflib import Graph, Literal, Namespace, URIRef
from rdflib.namespace import RDF
import xml.etree.ElementTree as ET
import requests
import requests
import shutil


g = Graph()
def graphToImage(graphInput):
ex = Namespace("http://example.org/")
    data = {"rdf":graphInput, "from":"ttl", "to":"png"}
prov = Namespace("http://www.w3.org/ns/prov#")
     link = "http://www.ldf.fi/service/rdf-grapher"
g.bind("ex", ex)
     response = requests.get(link, params = data, stream=True)
g.bind("prov", prov)
     # print(response.content)
 
     print(response.raw)
 
     with open("lab1.png", "wb") as file:
# URL of xml data
        shutil.copyfileobj(response.raw, file)
url = 'http://feeds.bbci.co.uk/news/rss.xml'
# Retrieve the xml data from the web-url.
resp = requests.get(url)
# Creating an ElementTree from the response content
tree = ET.ElementTree(ET.fromstring(resp.content))
root = tree.getroot()
 
# I just realized this is cheating, but whatever, you should do it with xmltree
writerDict = {
    "Mon":"Thomas_Smith",
    "Tue":"Thomas_Smith",
    "Wed":"Thomas_Smith",
     "Thu":"Joseph_Olson",
    "Fri":"Joseph_Olson",
    "Sat":"Sophia_Cruise",
    "Sun":"Sophia_Cruise"
}
copyright = Literal(root.findall("./channel")[0].find("copyright").text)
 
for item in root.findall("./channel/item"):
     copyright = Literal(root.findall("./channel")[0].find("copyright").text)
 
     News_article_id = URIRef(item.find("guid").text)
     title = Literal(item.find("title").text)
     description = Literal(item.find("description").text)
    link = URIRef(item.find("link").text)
    pubDate = Literal(item.find("pubDate").text)
    writerName = ex[writerDict[pubDate[:3]]]
 
    g.add((News_article_id, ex.title, title))
    g.add((News_article_id, ex.description, description))
    g.add((News_article_id, ex.source_link, link))
    g.add((News_article_id, ex.pubDate, pubDate))
    g.add((News_article_id, ex.copyright, copyright))
    g.add((News_article_id, RDF.type, ex.News_article))
    g.add((News_article_id, RDF.type, prov.Entity))


    g.add((News_article_id, ex.authoredBy, writerName))
graph = g.serialize(format="ttl")
    g.add((writerName, RDF.type, prov.Person))
graphToImage(graph)
    g.add((writerName, RDF.type, prov.Agent))
    g.add((ex.authoredBy, RDF.type, prov.Generation))


print(g.serialize(format="turtle"))
</syntaxhighlight>
</syntaxhighlight>


==OWL 2==
=2 [[/info216.wiki.uib.no/Lab: SPARQL|Lab: SPARQL queries]] =
<syntaxhighlight>
<syntaxhighlight>
from rdflib import Graph, Literal, Namespace, BNode
List all triples in your graph.
from rdflib.namespace import RDF, OWL, RDFS
from rdflib.collection import Collection


g = Graph()
select * where {
ex = Namespace("http://example.org/")
?s ?p ?o .
g.bind("ex", ex)
}
g.bind("owl", OWL)


# anyone who is a graduate has at least one degree
List the first 100 triples in your graph.
br = BNode()
g.add((br, RDF.type, OWL.Restriction))
g.add((br, OWL.onProperty, ex.degree))
g.add((br, OWL.minCardinality, Literal(1)))
bi = BNode()
Collection(g, bi, [ex.Person, br])
g.add((ex.Graduate, OWL.intersectionOf, bi))


#anyone who is a university graduate has at least one degree from a university
select * where {
br = BNode()
?s ?p ?o .
g.add((br, RDF.type, OWL.Restriction))
} limit 100
g.add((br, OWL.onProperty, ex.degree))
g.add((br, OWL.someValuesFrom, ex.University))
bi = BNode()
Collection(g, bi, [ex.Graduate, br])
                #[ex.Person, br] also someValueFrom implies a cardinality of at least one so they would be equivalent.
                #[ex.Person, ex.Graduate, br] would be redundant since intersection is associative.  
g.add((ex.University_graduate, OWL.intersectionOf, bi))


#a grade is either an A, B, C, D, E or F
Count the number of triples in your graph.


bi = BNode()
select (count(?s)as ?tripleCount) where {
Collection(g, bi, [Literal("A"), Literal("B"), Literal("C"), Literal("D"), Literal("E"), Literal("F")])
?s ?p ?o .
b1 = BNode()
}
g.add((b1, RDF.type, RDFS.Datatype))
g.add((b1, OWL.oneOf, bi))


g.add((ex.grade, RDFS.range, b1))
Count the number of indictments in your graph.


#a straight A student is a student that has only A grades
PREFIX muellerkg: <http://example.org#>
b1 = BNode()
select (Count(?s)as ?numIndictment) where {
g.add((b1, RDF.type, OWL.Restriction))
?s ?p muellerkg:Indictment .
g.add((b1, OWL.onProperty, ex.grade))
}
g.add((b1, OWL.allValuesFrom, Literal("A")))


b2 = BNode()
List everyone who pleaded guilty, along with the name of the investigation.
g.add((b2, RDF.type, OWL.Restriction))
g.add((b2, OWL.onProperty, ex.grade))
g.add((b2, OWL.someValuesFrom, Literal("A")))


bi = BNode()
PREFIX m: <http://example.org#>
Collection(g, bi, [ex.Student, b1, b2])
select ?name ?s where {
g.add((ex.Straight_A_student, OWL.intersectionOf, bi))
?s ?p m:guilty-plea;
    m:name ?name.


#a graduate has no F grades
List everyone who were convicted, but who had their conviction overturned by which president.
b3 = BNode()
Collection(g, b3, [Literal("A"), Literal("B"), Literal("C"), Literal("D"), Literal("E")])
b4 = BNode()
g.add((b4, RDF.type, RDFS.Datatype))
g.add((b4, OWL.oneOf, b3))
b5 = BNode()
g.add((b5, RDF.type, OWL.Restriction))
g.add((b5, OWL.onProperty, ex.grade))
g.add((b5, OWL.allValuesFrom, b4))


b6 = BNode()
PREFIX muellerkg: <http://example.org#>
Collection(g, b6, [ex.Person, b1, b5])
#List everyone who were convicted, but who had their conviction overturned by which president.
g.add((ex.Graduate, OWL.intersectionOf, b6))


#a student has a unique student number
select ?name ?president  where {
g.add((ex.student_number, RDF.type, OWL.FunctionalProperty))
?s ?p muellerkg:conviction;
g.add((ex.student_number, RDF.type, OWL.InverseFunctionalProperty))
muellerkg:name ?name;
    muellerkg:overturned true;
    muellerkg:president ?president.
} limit 100


#each student has exactly one average grade
For each investigation, list the number of indictments made.
b1 = BNode()
g.add((b1, RDF.type, OWL.Restriction))
g.add((b1, OWL.onProperty, ex.average_grade))
g.add((b1, OWL.cardinality, Literal(1)))


b2 = BNode()
PREFIX muellerkg: <http://example.org#>
g.add((b2, RDF.type, OWL.Restriction))
select ?investigation (count(?investigation) as ?numIndictments) where {
g.add((b2, OWL.onProperty, ex.student_number))
?s muellerkg:investigation ?investigation .
g.add((b2, OWL.cardinality, Literal(1)))
} group by (?investigation)


Collection(g, b3, [ex.Person, b1, b2])
For each investigation with multiple indictments, list the number of indictments made.
g.add((ex.Student, OWL.intersectionOf, b3))


#a course is either a bachelor, a master or a Ph.D course
PREFIX muellerkg: <http://example.org#>
bi = BNode()
select ?investigation (count(?investigation) as ?numIndictments) where {
Collection(g, bi, [ex.Bachelor_course, ex.Master_course, ex["Ph.D_course"]])
?s muellerkg:investigation ?investigation.
b1 = BNode()
} group by (?investigation)
#g.add((b1, RDF.type, OWL.Class))
having (?numIndictments > 1)
g.add((b1, OWL.oneOf, bi))


g.add((ex.Course, RDF.type, b1))
For each investigation with multiple indictments, list the number of indictments made, sorted with the most indictments first.


#a bachelor student takes only bachelor courses
PREFIX muellerkg: <http://example.org#>
g.add((ex.Bachelor_student, RDFS.subClassOf, ex.Student))
select ?investigation (count(?investigation) as ?numIndictments) where {
b1 = BNode()
?s muellerkg:investigation ?investigation.
g.add((b1, RDF.type, OWL.Restriction))
} group by (?investigation)
g.add((b1, OWL.onProperty, ex.hasCourse))
having (?numIndictments > 1)
g.add((b1, OWL.allValuesFrom, ex.Bachelor_course))
order by desc(?numIndictments)


b2 = BNode()
For each president, list the numbers of convictions and of pardons made after conviction.
Collection(g, b2, [ex.Student, b1])
g.add((ex.Bachelor_student, OWL.intersectionOf, b2))


#a masters student takes only master courses and at most one bachelor course
PREFIX muellerkg: <http://example.org#>
 
SELECT ?president (COUNT(?conviction) AS ?numConvictions) (COUNT(?pardon) AS ?numPardoned)  
b1 = BNode()
WHERE {
g.add((b1, RDF.type, OWL.Restriction))
    ?indictment muellerkg:president ?president ;
g.add((b1, OWL.onProperty, ex.hasCourse))
                muellerkg:outcome muellerkg:conviction .
g.add((b1, OWL.maxQualifiedCardinality, Literal(1)))
    BIND(?indictment AS ?conviction)
g.add((b1, OWL.onClass, ex.Bachelor_course))
    OPTIONAL {
 
        ?indictment muellerkg:pardoned true .
b2 = BNode()
        BIND(?indictment AS ?pardon)
g.add((b2, RDF.type, OWL.Restriction))
    }
g.add((b2, OWL.onProperty, ex.hasCourse))
}
g.add((b2, OWL.someValuesFrom, ex.Master_course))
GROUP BY ?president


b3 = BNode()
Collection(g, b3, [ex.Master_course, ex.Bachelor_course])
b5 = BNode()
g.add((b5, RDF.type, OWL.Restriction))
g.add((b5, OWL.onProperty, ex.hasCourse))
g.add((b5, OWL.allValuesFrom, b3))
b6 = BNode()
Collection(g, b6, [ex.Student, b1, b2, b5])
g.add((ex.Master_student, OWL.intersectionOf, b6))
#a Ph.D student takes only Ph.D and at most two masters courses
b1 = BNode()
g.add((b1, RDF.type, OWL.Restriction))
g.add((b1, OWL.onProperty, ex.hasCourse))
g.add((b1, OWL.maxQualifiedCardinality, Literal(2)))
g.add((b1, OWL.onClass, ex.Master_course))
b2 = BNode()
g.add((b2, RDF.type, OWL.Restriction))
g.add((b2, OWL.onProperty, ex.hasCourse))
g.add((b2, OWL.someValuesFrom, ex["Ph.D_course"]))
b3 = BNode()
Collection(g, b3, [ex.Master_course, ex["Ph.D_course"]])
b5 = BNode()
g.add((b5, RDF.type, OWL.Restriction))
g.add((b5, OWL.onProperty, ex.hasCourse))
g.add((b5, OWL.allValuesFrom, b3))
b6 = BNode()
Collection(g, b6, [ex.Student, b1, b2, b5])
g.add((ex["Ph.D_student"], OWL.intersectionOf, b6))
#a Ph.D. student cannot take a bachelor course
    #NA, it's already true
</syntaxhighlight>
</syntaxhighlight>


==Lab 11: Semantic Lifting - HTML==
== 3 [[/info216.wiki.uib.no/Lab: SPARQL Programming|Lab: SPARQL programming]] ==
 
<syntaxhighlight>
<syntaxhighlight>
from bs4 import BeautifulSoup as bs
from rdflib import Graph, Literal, URIRef, Namespace
from rdflib.namespace import RDF, SKOS, XSD
import requests


from rdflib import Graph, Namespace, RDF, FOAF
from SPARQLWrapper import SPARQLWrapper, JSON, POST, GET, TURTLE


g = Graph()
g = Graph()
ex = Namespace("http://example.org/")
g.parse("Russia_investigation_kg.ttl")
g.bind("ex", ex)


# Download html from URL and parse it with BeautifulSoup.
# ----- RDFLIB -----
url = "https://www.semanticscholar.org/topic/Knowledge-Graph/159858"
ex = Namespace('http://example.org#')
page = requests.get(url)
html = bs(page.content, features="html.parser")
# print(html.prettify())


# Find the html that surrounds all the papers
NS = {
papers = html.find_all('div', attrs={'class': 'flex-container'})
    '': ex,
# Find the html that surrounds the info box
     'rdf': RDF,
topic = html.find_all(
    'foaf': FOAF,
     'div', attrs={'class': 'flex-item__left-column entity-header'})
}


# Print out a list of all the predicates used in your graph.
task1 = g.query("""
SELECT DISTINCT ?p WHERE{
    ?s ?p ?o .
}
""", initNs=NS)


# Iterate through each paper to make triples:
print(list(task1))
for paper in papers:
    # e.g selecting title.
    title = paper.find('div', attrs={'class': 'timeline-paper-title'}).text
    author = paper.find('span', attrs={'class': 'author-list'}).text
    papper_year = paper.find(
        'li', attrs={'data-selenium-selector': "paper-year"}).text
    corpus_ID = paper.find(
        'li', attrs={'data-selenium-selector': "corpus-id"}).text
    corpus_ID = corpus_ID.replace(" ", "_")
    c_id = corpus_ID.replace("Corpus_ID:_", "")


     article = URIRef(ex + c_id)
# Print out a sorted list of all the presidents represented in your graph.
task2 = g.query("""
SELECT DISTINCT ?president WHERE{
     ?s :president ?president .
}
ORDER BY ?president
""", initNs=NS)


    # Adding tripels
print(list(task2))
    g.add((article, RDF.type, ex.paper))
    g.add((article, ex.HasID, Literal(c_id, datatype=XSD.int)))
    g.add((article, ex.HasTitle, Literal(title, datatype=XSD.string)))
    g.add((article, ex.Publisher_year, Literal(papper_year, datatype=XSD.year)))


    author = author.split(", ")
# Create dictionary (Python dict) with all the represented presidents as keys. For each key, the value is a list of names of people indicted under that president.
    for x in author:
task3_dic = {}
        name = x.replace(" ", "_")
        name = URIRef(ex + name)


        g.add((article, ex.hasAuthor, name))
task3 = g.query("""
 
SELECT ?president ?person WHERE{
# Iterate through the info box to make triples:
    ?s :president ?president;
    for items in topic:
      :name ?person;
        main_topic = items.find('h1', attrs={'class': 'entity-name'}).text
      :outcome :indictment.
        related_topic = items.find(
            'div', attrs={'class': 'entity-aliases'}).text
        related_topic = related_topic.replace("Known as: ", "")
        related_topic = related_topic.replace(f'\xa0Expand', "")
        related_topic = related_topic.replace(" ", "")
        main_topic = main_topic.replace(" ", "_")
 
        main_topic = URIRef(ex + main_topic)
 
        g.add((article, RDF.type, SKOS.Concept))
        g.add((article, SKOS.hasTopConcept, main_topic))
 
    related_topic = related_topic.split(',')
 
    for related_labels in related_topic:
        related_topic = URIRef(ex + related_labels)
        g.add((article, SKOS.broader, related_topic))
 
 
print(g.serialize(format='turtle'))
</syntaxhighlight>
 
=More miscellaneous examples=
 
 
===Printing the triples of the Graph in a readable way===
<syntaxhighlight>
# The turtle format has the purpose of being more readable for humans.
print(g.serialize(format="turtle"))
</syntaxhighlight>
 
===Coding Tasks Lab 1===
<syntaxhighlight>
from rdflib import Graph, Namespace, URIRef, BNode, Literal
from rdflib.namespace import RDF, FOAF, XSD
 
g = Graph()
ex = Namespace("http://example.org/")
 
g.add((ex.Cade, ex.married, ex.Mary))
g.add((ex.France, ex.capital, ex.Paris))
g.add((ex.Cade, ex.age, Literal("27", datatype=XSD.integer)))
g.add((ex.Mary, ex.age, Literal("26", datatype=XSD.integer)))
g.add((ex.Mary, ex.interest, ex.Hiking))
g.add((ex.Mary, ex.interest, ex.Chocolate))
g.add((ex.Mary, ex.interest, ex.Biology))
g.add((ex.Mary, RDF.type, ex.Student))
g.add((ex.Paris, RDF.type, ex.City))
g.add((ex.Paris, ex.locatedIn, ex.France))
g.add((ex.Cade, ex.characteristic, ex.Kind))
g.add((ex.Mary, ex.characteristic, ex.Kind))
g.add((ex.Mary, RDF.type, FOAF.Person))
g.add((ex.Cade, RDF.type, FOAF.Person))
 
# OR
 
g = Graph()
 
ex = Namespace('http://example.org/')
 
g.add((ex.Cade, FOAF.name, Literal("Cade", datatype=XSD.string)))
g.add((ex.Mary, FOAF.name, Literal("Mary", datatype=XSD.string)))
g.add((ex.Cade, RDF.type, FOAF.Person))
g.add((ex.Mary, RDF.type, FOAF.Person))
g.add((ex.Mary, RDF.type, ex.Student))
g.add((ex.Cade, ex.Married, ex.Mary))
g.add((ex.Cade, FOAF.age, Literal('27', datatype=XSD.int)))
g.add((ex.Mary, FOAF.age, Literal('26', datatype=XSD.int)))
g.add((ex.Paris, RDF.type, ex.City))
g.add((ex.France, ex.Capital, ex.Paris))
g.add((ex.Mary, FOAF.interest, ex.hiking))
g.add((ex.Mary, FOAF.interest, ex.Chocolate))
g.add((ex.Mary, FOAF.interest, ex.biology))
g.add((ex.France, ex.City, ex.Paris))
g.add((ex.Mary, ex.characteristic, ex.kind))
g.add((ex.Cade, ex.characteristic, ex.kind))
g.add((ex.France, RDF.type, ex.Country))
 
 
print(g.serialize(format="turtle"))
 
</syntaxhighlight>
 
==Basic RDF programming==
 
===Different ways to create an address===
 
<syntaxhighlight>
 
from rdflib import Graph, Namespace, URIRef, BNode, Literal
from rdflib.namespace import RDF, FOAF, XSD
 
g = Graph()
ex = Namespace("http://example.org/")
 
 
# How to represent the address of Cade Tracey. From probably the worst solution to the best.
 
# Solution 1 -
# Make the entire address into one Literal. However, Generally we want to separate each part of an address into their own triples. This is useful for instance if we want to find only the streets where people live.
 
g.add((ex.Cade_Tracey, ex.livesIn, Literal("1516_Henry_Street, Berkeley, California 94709, USA")))
 
 
# Solution 2 -
# Seperate the different pieces information into their own triples
 
g.add((ex.Cade_tracey, ex.street, Literal("1516_Henry_Street")))
g.add((ex.Cade_tracey, ex.city, Literal("Berkeley")))
g.add((ex.Cade_tracey, ex.state, Literal("California")))
g.add((ex.Cade_tracey, ex.zipcode, Literal("94709")))
g.add((ex.Cade_tracey, ex.country, Literal("USA")))
 
 
# Solution 3 - Some parts of the addresses can make more sense to be resources than Literals.
# Larger concepts like a city or state are typically represented as resources rather than Literals, but this is not necesarilly a requirement in the case that you don't intend to say more about them.
 
g.add((ex.Cade_tracey, ex.street, Literal("1516_Henry_Street")))
g.add((ex.Cade_tracey, ex.city, ex.Berkeley))
g.add((ex.Cade_tracey, ex.state, ex.California))
g.add((ex.Cade_tracey, ex.zipcode, Literal("94709")))
g.add((ex.Cade_tracey, ex.country, ex.USA))
 
 
# Solution 4
# Grouping of the information into an Address. We can Represent the address concept with its own URI OR with a Blank Node.
# One advantage of this is that we can easily remove the entire address, instead of removing each individual part of the address.
# Solution 4 or 5 is how I would recommend to make addresses. Here, ex.CadeAddress could also be called something like ex.address1 or so on, if you want to give each address a unique ID.
 
# Address URI - CadeAdress
 
g.add((ex.Cade_Tracey, ex.address, ex.CadeAddress))
g.add((ex.CadeAddress, RDF.type, ex.Address))
g.add((ex.CadeAddress, ex.street, Literal("1516 Henry Street")))
g.add((ex.CadeAddress, ex.city, ex.Berkeley))
g.add((ex.CadeAddress, ex.state, ex.California))
g.add((ex.CadeAddress, ex.postalCode, Literal("94709")))
g.add((ex.CadeAddress, ex.country, ex.USA))
 
# OR
 
# Blank node for Address. 
address = BNode()
g.add((ex.Cade_Tracey, ex.address, address))
g.add((address, RDF.type, ex.Address))
g.add((address, ex.street, Literal("1516 Henry Street", datatype=XSD.string)))
g.add((address, ex.city, ex.Berkeley))
g.add((address, ex.state, ex.California))
g.add((address, ex.postalCode, Literal("94709", datatype=XSD.string)))
g.add((address, ex.country, ex.USA))
 
 
# Solution 5 using existing vocabularies for address
 
# (in this case https://schema.org/PostalAddress from schema.org).
# Also using existing ontology for places like California. (like http://dbpedia.org/resource/California from dbpedia.org)
 
schema = Namespace("https://schema.org/")
dbp = Namespace("https://dpbedia.org/resource/")
 
g.add((ex.Cade_Tracey, schema.address, ex.CadeAddress))
g.add((ex.CadeAddress, RDF.type, schema.PostalAddress))
g.add((ex.CadeAddress, schema.streetAddress, Literal("1516 Henry Street")))
g.add((ex.CadeAddress, schema.addresCity, dbp.Berkeley))
g.add((ex.CadeAddress, schema.addressRegion, dbp.California))
g.add((ex.CadeAddress, schema.postalCode, Literal("94709")))
g.add((ex.CadeAddress, schema.addressCountry, dbp.United_States))
 
</syntaxhighlight>
 
===Typed Literals===
<syntaxhighlight>
from rdflib import Graph, Literal, Namespace
from rdflib.namespace import XSD
g = Graph()
ex = Namespace("http://example.org/")
 
g.add((ex.Cade, ex.age, Literal(27, datatype=XSD.integer)))
g.add((ex.Cade, ex.gpa, Literal(3.3, datatype=XSD.float)))
g.add((ex.Cade, FOAF.name, Literal("Cade Tracey", datatype=XSD.string)))
g.add((ex.Cade, ex.birthday, Literal("2006-01-01", datatype=XSD.date)))
</syntaxhighlight>
 
 
===Writing and reading graphs/files===
 
<syntaxhighlight>
  # Writing the graph to a file on your system. Possible formats = turtle, n3, xml, nt.
g.serialize(destination="triples.txt", format="turtle")
 
  # Parsing a local file
parsed_graph = g.parse(location="triples.txt", format="turtle")
 
  # Parsing a remote endpoint like Dbpedia
dbpedia_graph = g.parse("http://dbpedia.org/resource/Pluto")
</syntaxhighlight>
 
===Graph Binding===
<syntaxhighlight>
#Graph Binding is useful for at least two reasons:
#(1) We no longer need to specify prefixes with SPARQL queries if they are already binded to the graph.
#(2) When serializing the graph, the serialization will show the correct expected prefix
# instead of default namespace names ns1, ns2 etc.
 
g = Graph()
 
ex = Namespace("http://example.org/")
dbp = Namespace("http://dbpedia.org/resource/")
schema = Namespace("https://schema.org/")
 
g.bind("ex", ex)
g.bind("dbp", dbp)
g.bind("schema", schema)
</syntaxhighlight>
 
===Collection Example===
 
<syntaxhighlight>
from rdflib import Graph, Namespace
from rdflib.collection import Collection
 
 
# Sometimes we want to add many objects or subjects for the same predicate at once.
# In these cases we can use Collection() to save some time.
# In this case I want to add all countries that Emma has visited at once.
 
b = BNode()
g.add((ex.Emma, ex.visit, b))
Collection(g, b,
    [ex.Portugal, ex.Italy, ex.France, ex.Germany, ex.Denmark, ex.Sweden])
 
# OR
 
g.add((ex.Emma, ex.visit, ex.EmmaVisits))
Collection(g, ex.EmmaVisits,
    [ex.Portugal, ex.Italy, ex.France, ex.Germany, ex.Denmark, ex.Sweden])
 
</syntaxhighlight>
 
==SPARQL==
 
Also see the [[SPARQL Examples]] page!
 
===Querying a local ("in memory") graph===
 
Example contents of the file family.ttl:
@prefix rex: <http://example.org/royal#> .
@prefix fam: <http://example.org/family#> .
rex:IngridAlexandra fam:hasParent rex:HaakonMagnus .
rex:SverreMagnus fam:hasParent rex:HaakonMagnus .
rex:HaakonMagnus fam:hasParent rex:Harald .
rex:MarthaLouise fam:hasParent rex:Harald .
rex:HaakonMagnus fam:hasSister rex:MarthaLouise .
 
import rdflib
g = rdflib.Graph()
g.parse("family.ttl", format='ttl')
qres = g.query("""
PREFIX fam: <http://example.org/family#>
    SELECT ?child ?sister WHERE {
        ?child fam:hasParent ?parent .
        ?parent fam:hasSister ?sister .
    }""")
for row in qres:
    print("%s has aunt %s" % row)
 
With a prepared query, you can write the query once, and then bind some of the variables each time you use it:
import rdflib
g = rdflib.Graph()
g.parse("family.ttl", format='ttl')
q = rdflib.plugins.sparql.prepareQuery(
        """SELECT ?child ?sister WHERE {
                  ?child fam:hasParent ?parent .
                  ?parent fam:hasSister ?sister .
        }""",
        initNs = { "fam": "http://example.org/family#"})
sm = rdflib.URIRef("http://example.org/royal#SverreMagnus")
for row in g.query(q, initBindings={'child': sm}):
        print(row)
 
===Select all contents of lists (rdfllib.Collection)===
<syntaxhighlight>
 
# rdflib.Collection has a different interntal structure so it requires a slightly more advance query. Here I am selecting all places that Emma has visited.
 
PREFIX ex:  <http://example.org/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
 
SELECT ?visit
WHERE {
  ex:Emma ex:visit/rdf:rest*/rdf:first ?visit
}
}
</syntaxhighlight>
""", initNs=NS)


for president, person in task3:
    if president not in task3_dic:
        task3_dic[president] = [person]
    else:
        task3_dic[president].append(person)


===Using parameters/variables in rdflib queries===
print(task3_dic)


<syntaxhighlight>
# Use an ASK query to investigate whether Donald Trump has pardoned more than 5 people.
from rdflib import Graph, Namespace, URIRef
from rdflib.plugins.sparql import prepareQuery


g = Graph()
task4 = g.query("""
ex = Namespace("http://example.org/")
    ASK{
g.bind("ex", ex)
        SELECT ?count WHERE{{
 
          SELECT (COUNT(?s) as ?count) WHERE{
g.add((ex.Cade, ex.livesIn, ex.France))
            ?s :pardoned :true;
g.add((ex.Anne, ex.livesIn, ex.Norway))
                  :president :Bill_Clinton  .
g.add((ex.Sofie, ex.livesIn, ex.Sweden))
                }}
g.add((ex.Per, ex.livesIn, ex.Norway))
        FILTER (?count > 5)  
g.add((ex.John, ex.livesIn, ex.USA))
        }
    }
""", initNs=NS)


print(task4.askAnswer)


def find_people_from_country(country):
        country = URIRef(ex + country)
        q = prepareQuery(
        """
        PREFIX ex: <http://example.org/>
        SELECT ?person WHERE {
        ?person ex:livesIn ?country.
        }
        """)


        capital_result = g.query(q, initBindings={'country': country})
# task5 = g.query("""
# DESCRIBE :Donald_Trump
# """, initNs=NS)


        for row in capital_result:
# print(task5.serialize())
            print(row)


find_people_from_country("Norway")
# ----- SPARQLWrapper -----
</syntaxhighlight>


===SELECTING data from Blazegraph via Python===
SERVER = 'http://localhost:7200' #Might need to replace this
<syntaxhighlight>
REPOSITORY = 'Labs' #Replace with your repository name


from SPARQLWrapper import SPARQLWrapper, JSON
# Query Endpoint
 
sparql = SPARQLWrapper(f'{SERVER}/repositories/{REPOSITORY}')
# This creates a server connection to the same URL that contains the graphic interface for Blazegraph.
# Update Endpoint
# You also need to add "sparql" to end of the URL like below.
sparqlUpdate = SPARQLWrapper(f'{SERVER}/repositories/{REPOSITORY}/statements')
 
sparql = SPARQLWrapper("http://localhost:9999/blazegraph/sparql")
 
# SELECT all triples in the database.


# Ask whether there was an ongoing indictment on the date 1990-01-01.
sparql.setQuery("""
sparql.setQuery("""
     SELECT DISTINCT ?p WHERE {
     PREFIX ns1: <http://example.org#>
    ?s ?p ?o.
    ASK {
        SELECT ?end ?start
        WHERE{
            ?s ns1:investigation_end ?end;
              ns1:investigation_start ?start;
              ns1:outcome ns1:indictment.
            FILTER(?start <= "1990-01-01"^^xsd:date && ?end >= "1990-01-01"^^xsd:date)
    }
     }
     }
""")
""")
sparql.setReturnFormat(JSON)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
results = sparql.query().convert()
print(f"Are there any investigation on the 1990-01-01: {results['boolean']}")


for result in results["results"]["bindings"]:
# List ongoing indictments on that date 1990-01-01.
    print(result["p"]["value"])
 
# SELECT all interests of Cade
 
sparql.setQuery("""
sparql.setQuery("""
     PREFIX ex: <http://example.org/>
     PREFIX ns1: <http://example.org#>
     SELECT DISTINCT ?interest WHERE {
     SELECT ?s
    ex:Cade ex:interest ?interest.
    WHERE{
        ?s ns1:investigation_end ?end;
          ns1:investigation_start ?start;
          ns1:outcome ns1:indictment.
        FILTER(?start <= "1990-01-01"^^xsd:date && ?end >= "1990-01-01"^^xsd:date)
     }
     }
""")
""")
sparql.setReturnFormat(JSON)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
results = sparql.query().convert()


print("The ongoing investigations on the 1990-01-01 are:")
for result in results["results"]["bindings"]:
for result in results["results"]["bindings"]:
     print(result["interest"]["value"])
     print(result["s"]["value"])
</syntaxhighlight>


===Updating data from Blazegraph via Python===
# Describe investigation number 100 (muellerkg:investigation_100).
<syntaxhighlight>
from SPARQLWrapper import SPARQLWrapper, POST, DIGEST
 
namespace = "kb"
sparql = SPARQLWrapper("http://localhost:9999/blazegraph/namespace/"+ namespace + "/sparql")
 
sparql.setMethod(POST)
sparql.setQuery("""
sparql.setQuery("""
     PREFIX ex: <http://example.org/>
     PREFIX ns1: <http://example.org#>
     INSERT DATA{
     DESCRIBE ns1:investigation_100
    ex:Cade ex:interest ex:Mathematics.
    }
""")
""")


results = sparql.query()
sparql.setReturnFormat(TURTLE)
print(results.response.read())
results = sparql.query().convert()


print(results)


</syntaxhighlight>
# Print out a list of all the types used in your graph.
===Retrieving data from Wikidata with SparqlWrapper===
sparql.setQuery("""
<syntaxhighlight>
    PREFIX ns1: <http://example.org#>
from SPARQLWrapper import SPARQLWrapper, JSON
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>


sparql = SPARQLWrapper("https://query.wikidata.org/sparql")
     SELECT DISTINCT ?types
# In the query I want to select all the Vitamins in wikidata.
    WHERE{
 
        ?s rdf:type ?types .  
sparql.setQuery("""
    }
     SELECT ?nutrient ?nutrientLabel WHERE
{
  ?nutrient wdt:P279 wd:Q34956.
  SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
}
""")
""")


sparql.setReturnFormat(JSON)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
results = sparql.query().convert()
rdf_Types = []


for result in results["results"]["bindings"]:
for result in results["results"]["bindings"]:
     print(result["nutrient"]["value"], "  ", result["nutrientLabel"]["value"])
     rdf_Types.append(result["types"]["value"])
</syntaxhighlight>


print(rdf_Types)


More examples can be found in the example section on the official query service here: https://query.wikidata.org/.
# Update the graph to that every resource that is an object in a muellerkg:investigation triple has the rdf:type muellerkg:Investigation.
update_str = """
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>


===Download from BlazeGraph===
    INSERT{
        ?invest rdf:type ns1:Investigation .
    }
    WHERE{
        ?s ns1:investigation ?invest .
}"""


<syntaxhighlight>
sparqlUpdate.setQuery(update_str)
"""
sparqlUpdate.setMethod(POST)
Dumps a database to a local RDF file.
sparqlUpdate.query()
You need to install the SPARQLWrapper package first...
"""


import datetime
#To Test
from SPARQLWrapper import SPARQLWrapper, RDFXML
sparql.setQuery("""
 
    prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
# your namespace, the default is 'kb'
    PREFIX ns1: <http://example.org#>
ns = 'kb'
 
# the SPARQL endpoint
endpoint = 'http://info216.i2s.uib.no/bigdata/namespace/' + ns + '/sparql'
 
# - the endpoint just moved, the old one was:
# endpoint = 'http://i2s.uib.no:8888/bigdata/namespace/' + ns + '/sparql'


# create wrapper
    ASK{
wrapper = SPARQLWrapper(endpoint)
        ns1:watergate rdf:type ns1:Investigation.
 
# prepare the SPARQL update
wrapper.setQuery('CONSTRUCT { ?s ?p ?o } WHERE { ?s ?p ?o }')
wrapper.setReturnFormat(RDFXML)
 
# execute the SPARQL update and convert the result to an rdflib.Graph
graph = wrapper.query().convert()
 
# the destination file, with code to make it timestamped
destfile = 'rdf_dumps/slr-kg4news-' + datetime.datetime.now().strftime('%Y%m%d-%H%M') + '.rdf'
 
# serialize the result to file
graph.serialize(destination=destfile, format='ttl')
 
# report and quit
print('Wrote %u triples to file %s .' %
      (len(res), destfile))
</syntaxhighlight>
 
===Query Dbpedia with SparqlWrapper===
 
<syntaxhighlight>
from SPARQLWrapper import SPARQLWrapper, JSON
 
sparql = SPARQLWrapper("http://dbpedia.org/sparql")
 
sparql.setQuery("""
    PREFIX dbr: <http://dbpedia.org/resource/>
    PREFIX dbo: <http://dbpedia.org/ontology/>
    PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
    SELECT ?comment
    WHERE {
    dbr:Barack_Obama rdfs:comment ?comment.
    FILTER (langMatches(lang(?comment),"en"))
     }
     }
""")
""")
Line 1,444: Line 358:
sparql.setReturnFormat(JSON)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
results = sparql.query().convert()
print(results['boolean'])


for result in results["results"]["bindings"]:
# Update the graph to that every resource that is an object in a muellerkg:person triple has the rdf:type muellerkg:IndictedPerson.
     print(result["comment"]["value"])
update_str = """
</syntaxhighlight>
    PREFIX ns1: <http://example.org#>
     PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>


==Lifting CSV to RDF==
    INSERT{
        ?person rdf:type ns1:IndictedPerson .
    }
    WHERE{
        ?s ns1:name ?person .
}"""


<syntaxhighlight>
sparqlUpdate.setQuery(update_str)
from rdflib import Graph, Literal, Namespace, URIRef
sparqlUpdate.setMethod(POST)
from rdflib.namespace import RDF, FOAF, RDFS, OWL
sparqlUpdate.query()
import pandas as pd


g = Graph()
#To test, run the query in the above task, replacing the ask query with e.g. ns1:Deborah_Gore_Dean rdf:type ns1:IndictedPerson
ex = Namespace("http://example.org/")
g.bind("ex", ex)


# Load the CSV data as a pandas Dataframe.
# Update the graph so all the investigation nodes (such as muellerkg:watergate) become the subject in a dc:title triple with the corresponding string (watergate) as the literal.
csv_data = pd.read_csv("task1.csv")
update_str = """
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
    PREFIX dc: <http://purl.org/dc/elements/1.1/>


# Here I deal with spaces (" ") in the data. I replace them with "_" so that URI's become valid.
    INSERT{
csv_data = csv_data.replace(to_replace=" ", value="_", regex=True)
        ?invest dc:title ?investString.
    }
    WHERE{
        ?s ns1:investigation ?invest .
        BIND (replace(str(?invest), str(ns1:), "")  AS ?investString)
}"""


# Here I mark all missing/empty data as "unknown". This makes it easy to delete triples containing this later.
sparqlUpdate.setQuery(update_str)
csv_data = csv_data.fillna("unknown")
sparqlUpdate.setMethod(POST)
sparqlUpdate.query()


# Loop through the CSV data, and then make RDF triples.
#Same test as above, replace it with e.g. ns1:watergate dc:title "watergate"
for index, row in csv_data.iterrows():
    # The names of the people act as subjects.
    subject = row['Name']
    # Create triples: e.g. "Cade_Tracey - age - 27"
    g.add((URIRef(ex + subject), URIRef(ex + "age"), Literal(row["Age"])))
    g.add((URIRef(ex + subject), URIRef(ex + "married"), URIRef(ex + row["Spouse"])))
    g.add((URIRef(ex + subject), URIRef(ex + "country"), URIRef(ex + row["Country"])))


     # If We want can add additional RDF/RDFS/OWL information e.g
# Print out a sorted list of all the indicted persons represented in your graph.
     g.add((URIRef(ex + subject), RDF.type, FOAF.Person))
sparql.setQuery("""
     PREFIX ns1: <http://example.org#>
     PREFIX foaf: <http://xmlns.com/foaf/0.1/>


# I remove triples that I marked as unknown earlier.
     SELECT ?name
g.remove((None, None, URIRef("http://example.org/unknown")))
     WHERE{
 
     ?s  ns1:name ?name;
# Clean printing of the graph.
            ns1:outcome ns1:indictment.
print(g.serialize(format="turtle").decode())
     }
</syntaxhighlight>
     ORDER BY ?name
 
===CSV file for above example===
 
<syntaxhighlight>
"Name","Age","Spouse","Country"
"Cade Tracey","26","Mary Jackson","US"
"Bob Johnson","21","","Canada"
"Mary Jackson","25","","France"
"Phil Philips","32","Catherine Smith","Japan"
</syntaxhighlight>
 
 
=Coding Tasks Lab 6=
<syntaxhighlight>
import pandas as pd
 
 
from rdflib import Graph, Namespace, URIRef, Literal, BNode
from rdflib.namespace import RDF, XSD
 
 
ex = Namespace("http://example.org/")
sem = Namespace("http://semanticweb.cs.vu.nl/2009/11/sem/")
 
g = Graph()
g.bind("ex", ex)
g.bind("sem", sem)
 
 
# Removing unwanted characters
df = pd.read_csv('russia-investigation.csv')
# Here I deal with spaces (" ") in the data. I replace them with "_" so that URI's become valid.
df = df.replace(to_replace=" ", value="_", regex=True)
# This may seem odd, but in the data set we have a name like this:("Scooter"). So we have to remove quotation marks
df = df.replace(to_replace=f'"', value="", regex=True)
# # Here I mark all missing/empty data as "unknown". This makes it easy to delete triples containing this later.
df = df.fillna("unknown")
 
# Loop through the CSV data, and then make RDF triples.
for index, row in df.iterrows():
     name = row['investigation']
     investigation = URIRef(ex + name)
     g.add((investigation, RDF.type, sem.Event))
    investigation_start = row["investigation-start"]
    g.add((investigation, sem.hasBeginTimeStamp, Literal(
        investigation_start, datatype=XSD.datetime)))
    investigation_end = row["investigation-end"]
    g.add((investigation, sem.hasEndTimeStamp, Literal(
        investigation_end, datatype=XSD.datetime)))
    investigation_end = row["investigation-days"]
    g.add((investigation, sem.hasXSDDuration, Literal(
        investigation_end, datatype=XSD.Days)))
    person = row["name"]
    person = URIRef(ex + person)
    g.add((investigation, sem.Actor, person))
    result = row['type']
    g.add((investigation, sem.hasSubEvent, Literal(result, datatype=XSD.string)))
    overturned = row["overturned"]
    g.add((investigation, ex.overtuned, Literal(overturned, datatype=XSD.boolean)))
    pardoned = row["pardoned"]
    g.add((investigation, ex.pardon, Literal(pardoned, datatype=XSD.boolean)))
 
g.serialize("output.ttl", format="ttl")
print(g.serialize(format="turtle"))
 
 
</syntaxhighlight>
 
==RDFS==
 
===RDFS-plus (OWL) Properties===
<syntaxhighlight>
g.add((ex.married, RDF.type, OWL.SymmetricProperty))
g.add((ex.married, RDF.type, OWL.IrreflexiveProperty))
g.add((ex.livesWith, RDF.type, OWL.ReflexiveProperty))
g.add((ex.livesWith, RDF.type, OWL.SymmetricProperty))
g.add((ex.sibling, RDF.type, OWL.TransitiveProperty))
g.add((ex.sibling, RDF.type, OWL.SymmetricProperty))
g.add((ex.sibling, RDF.type, OWL.IrreflexiveProperty))
g.add((ex.hasFather, RDF.type, OWL.FunctionalProperty))
g.add((ex.hasFather, RDF.type, OWL.AsymmetricProperty))
g.add((ex.hasFather, RDF.type, OWL.IrreflexiveProperty))
g.add((ex.fatherOf, RDF.type, OWL.AsymmetricProperty))
g.add((ex.fatherOf, RDF.type, OWL.IrreflexiveProperty))
 
# Sometimes there is no definite answer, and it comes down to how we want to model our properties
# e.g is livesWith a transitive property? Usually yes, but we can also want to specify that a child lives with both of her divorced parents.
# which means that: (mother livesWith child % child livesWith father) != mother livesWith father. Which makes it non-transitive.
</syntaxhighlight>
 
<!--
==Lifting XML to RDF==
<syntaxhighlight>
from rdflib import Graph, Literal, Namespace, URIRef
from rdflib.namespace import RDF, XSD, RDFS
import xml.etree.ElementTree as ET
 
g = Graph()
ex = Namespace("http://example.org/TV/")
prov = Namespace("http://www.w3.org/ns/prov#")
g.bind("ex", ex)
g.bind("prov", prov)
 
tree = ET.parse("tv_shows.xml")
root = tree.getroot()
 
for tv_show in root.findall('tv_show'):
    show_id = tv_show.attrib["id"]
    title = tv_show.find("title").text
 
    g.add((URIRef(ex + show_id), ex.title, Literal(title, datatype=XSD.string)))
    g.add((URIRef(ex + show_id), RDF.type, ex.TV_Show))
 
     for actor in tv_show.findall("actor"):
        first_name = actor.find("firstname").text
        last_name = actor.find("lastname").text
        full_name = first_name + "_" + last_name
       
        g.add((URIRef(ex + show_id), ex.stars, URIRef(ex + full_name)))
        g.add((URIRef(ex + full_name), ex.starsIn, URIRef(title)))
        g.add((URIRef(ex + full_name), RDF.type, ex.Actor))
 
print(g.serialize(format="turtle").decode())
</syntaxhighlight>
 
 
 
 
===RDFS inference with RDFLib===
You can use the OWL-RL package to add inference capabilities to RDFLib. It can be installed using the pip install command:
<syntaxhighlight>
pip install owlrl
</syntaxhighlight>
Or download it from [https://github.com/RDFLib/OWL-RL GitHub] and copy the ''owlrl'' subfolder into your project folder next to your Python files.
 
[https://owl-rl.readthedocs.io/en/latest/owlrl.html OWL-RL documentation.]
 
Example program to get you started. In this example we are creating the graph using sparql.update, but it is also possible to parse the data from a file.
<syntaxhighlight>
import rdflib.plugins.sparql.update
import owlrl.RDFSClosure
 
g = rdflib.Graph()
 
ex = rdflib.Namespace('http://example.org#')
g.bind('', ex)
 
g.update("""
PREFIX ex: <http://example.org#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
INSERT DATA {
    ex:Socrates rdf:type ex:Man .
    ex:Man rdfs:subClassOf ex:Mortal .
}""")
 
rdfs = owlrl.RDFSClosure.RDFS_Semantics(g, False, False, False)
# RDF_Semantics parameters:
# - graph (rdflib.Graph) – The RDF graph to be extended.
# - axioms (bool) – Whether (non-datatype) axiomatic triples should be added or not.
# - daxioms (bool) – Whether datatype axiomatic triples should be added or not.
# - rdfs (bool) – Whether RDFS inference is also done (used in subclassed only).
# For now, you will in most cases use all False in RDFS_Semtantics.
 
# Generates the closure of the graph - generates the new entailed triples, but does not add them to the graph.
rdfs.closure()
# Adds the new triples to the graph and empties the RDFS triple-container.
rdfs.flush_stored_triples()
 
# Ask-query to check whether a new triple has been generated from the entailment.
b = g.query("""
PREFIX ex: <http://example.org#>
ASK {
     ex:Socrates rdf:type ex:Mortal .
}
""")
""")
print('Result: ' + bool(b))
</syntaxhighlight>


===Language tagged RDFS labels===
sparql.setReturnFormat(JSON)
<syntaxhighlight>
results = sparql.query().convert()
from rdflib import Graph, Namespace, Literal
from rdflib.namespace import RDFS


g = Graph()
names = []
ex = Namespace("http://example.org/")


g.add((ex.France, RDFS.label, Literal("Frankrike", lang="no")))
for result in results["results"]["bindings"]:
g.add((ex.France, RDFS.label, Literal("France", lang="en")))
    names.append(result["name"]["value"])
g.add((ex.France, RDFS.label, Literal("Francia", lang="es")))


print(names)


</syntaxhighlight>
# Print out the minimum, average and maximum indictment days for all the indictments in the graph.


==OWL==
sparql.setQuery("""
===Basic inference with RDFLib===
    prefix xsd: <http://www.w3.org/2001/XMLSchema#>
 
    PREFIX ns1: <http://example.org#>
You can use the OWL-RL package again as for Lecture 5.
 
Instead of:
<syntaxhighlight>
# The next three lines add inferred triples to g.
rdfs = owlrl.RDFSClosure.RDFS_Semantics(g, False, False, False)
rdfs.closure()
rdfs.flush_stored_triples()
</syntaxhighlight>
you can write this to get both RDFS and basic RDFS Plus / OWL inference:
<syntaxhighlight>
# The next three lines add inferred triples to g.
owl = owlrl.CombinedClosure.RDFS_OWLRL_Semantics(g, False, False, False)
owl.closure()
owl.flush_stored_triples()
</syntaxhighlight>
 
Example updates and queries:
<syntaxhighlight>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX ex: <http://example.org#>


INSERT DATA {
    SELECT (AVG(?daysRemoved) as ?avg) (MAX(?daysRemoved) as ?max) (MIN(?daysRemoved) as ?min)  WHERE{
    ex:Socrates ex:hasWife ex:Xanthippe .
        ?s  ns1:indictment_days ?days;
     ex:hasHusband owl:inverseOf ex:hasWife .
            ns1:outcome ns1:indictment.
      
    BIND (replace(str(?days), str(ns1:), "")  AS ?daysR)
    BIND (STRDT(STR(?daysR), xsd:float) AS ?daysRemoved)
}
}
</syntaxhighlight>
""")


<syntaxhighlight>
sparql.setReturnFormat(JSON)
ASK {
results = sparql.query().convert()
  ex:Xanthippe ex:hasHusband ex:Socrates .
}
</syntaxhighlight>


<syntaxhighlight>
for result in results["results"]["bindings"]:
ASK {
     print(f'The longest an investigation lasted was: {result["max"]["value"]}')
  ex:Socrates ^ex:hasHusband ex:Xanthippe .
     print(f'The shortest an investigation lasted was: {result["min"]["value"]}')
}
    print(f'The average investigation lasted: {result["avg"]["value"]}')
</syntaxhighlight>
 
<syntaxhighlight>
INSERT DATA {
     ex:hasWife rdfs:subPropertyOf ex:hasSpouse .
     ex:hasSpouse rdf:type owl:SymmetricProperty .
}
</syntaxhighlight>
 
<syntaxhighlight>
ASK {
  ex:Socrates ex:hasSpouse ex:Xanthippe .
}
</syntaxhighlight>
 
<syntaxhighlight>
ASK {
  ex:Socrates ^ex:hasSpouse ex:Xanthippe .
}
</syntaxhighlight>


# Print out the minimum, average and maximum indictment days for all the indictments in the graph per investigation.


sparql.setQuery("""
    prefix xsd: <http://www.w3.org/2001/XMLSchema#>
    PREFIX ns1: <http://example.org#>


    SELECT ?investigation (AVG(?daysRemoved) as ?avg) (MAX(?daysRemoved) as ?max) (MIN(?daysRemoved) as ?min)  WHERE{
    ?s  ns1:indictment_days ?days;
        ns1:outcome ns1:indictment;
        ns1:investigation ?investigation.
   
    BIND (replace(str(?days), str(ns1:), "")  AS ?daysR)
    BIND (STRDT(STR(?daysR), xsd:float) AS ?daysRemoved)
    }
    GROUP BY ?investigation
""")


sparql.setReturnFormat(JSON)
results = sparql.query().convert()


for result in results["results"]["bindings"]:
    print(f'{result["investigation"]["value"]} - min: {result["min"]["value"]}, max: {result["max"]["value"]}, avg: {result["avg"]["value"]}')


===XML Data for above example===
<syntaxhighlight>
<data>
    <tv_show id="1050">
        <title>The_Sopranos</title>
        <actor>
            <firstname>James</firstname>
            <lastname>Gandolfini</lastname>
        </actor>
    </tv_show>
    <tv_show id="1066">
        <title>Seinfeld</title>
        <actor>
            <firstname>Jerry</firstname>
            <lastname>Seinfeld</lastname>
        </actor>
        <actor>
            <firstname>Julia</firstname>
            <lastname>Louis-dreyfus</lastname>
        </actor>
        <actor>
            <firstname>Jason</firstname>
            <lastname>Alexander</lastname>
        </actor>
    </tv_show>
</data>
</syntaxhighlight>
</syntaxhighlight>


==Lifting HTML to RDF==
== Lab 4 JSON-LD==
<syntaxhighlight>
Part 1<syntaxhighlight lang="json-ld">
from bs4 import BeautifulSoup as bs, NavigableString
from rdflib import Graph, URIRef, Namespace
from rdflib.namespace import RDF
 
g = Graph()
ex = Namespace("http://example.org/")
g.bind("ex", ex)
 
html = open("tv_shows.html").read()
html = bs(html, features="html.parser")
 
shows = html.find_all('li', attrs={'class': 'show'})
for show in shows:
    title = show.find("h3").text
    actors = show.find('ul', attrs={'class': 'actor_list'})
    for actor in actors:
        if isinstance(actor, NavigableString):
            continue
        else:
            actor = actor.text.replace(" ", "_")
            g.add((URIRef(ex + title), ex.stars, URIRef(ex + actor)))
            g.add((URIRef(ex + actor), RDF.type, ex.Actor))
 
    g.add((URIRef(ex + title), RDF.type, ex.TV_Show))
 
 
print(g.serialize(format="turtle").decode())
</syntaxhighlight>
 
===HTML code for the example above===
<syntaxhighlight>
<!DOCTYPE html>
<html>
<head>
    <meta charset="utf-8">
    <title></title>
</head>
<body>
    <div class="tv_shows">
        <ul>
            <li class="show">
                <h3>The_Sopranos</h3>
                <div class="irrelevant_data"></div>
                <ul class="actor_list">
                    <li>James Gandolfini</li>
                </ul>
            </li>
            <li class="show">
                <h3>Seinfeld</h3>
                <div class="irrelevant_data"></div>
                <ul class="actor_list">
                    <li >Jerry Seinfeld</li>
                    <li>Jason Alexander</li>
                    <li>Julia Louis-Dreyfus</li>
                </ul>
            </li>
        </ul>
    </div>
</body>
</html>
</syntaxhighlight>
 
==Web APIs with JSON==
<syntaxhighlight>
import requests
import json
import pprint
 
# Retrieve JSON data from API service URL. Then load it with the json library as a json object.
url = "http://api.geonames.org/postalCodeLookupJSON?postalcode=46020&#country=ES&username=demo"
data = requests.get(url).content.decode("utf-8")
data = json.loads(data)
pprint.pprint(data)
</syntaxhighlight>
 
 
==JSON-LD==
 
<syntaxhighlight>
import rdflib
 
g = rdflib.Graph()
 
example = """
{
{
   "@context": {
   "@context": {
    "name": "http://xmlns.com/foaf/0.1/name",
      "@base": "http://example.org/",
    "homepage": {
      "edges": "http://example.org/triple",
       "@id": "http://xmlns.com/foaf/0.1/homepage",
      "start": "http://example.org/source",
       "@type": "@id"
      "rel": "http://exaxmple.org/predicate",
    }
      "end": "http://example.org/object",
      "Person" : "http://example.org/Person",
      "birthday" : {
          "@id" : "http://example.org/birthday",
          "@type" : "xsd:date"
      },
       "nameEng" : {
          "@id" : "http://example.org/en/name",
          "@language" : "en"
      },
      "nameFr" : {
          "@id" : "http://example.org/fr/name",
          "@language" : "fr"
       },
      "nameCh" : {
          "@id" : "http://example.org/ch/name",
          "@language" : "ch"
      },
      "age" : {
          "@id" : "http://example.org/age",
          "@type" : "xsd:int"
      },
      "likes" : "http://example.org/games/likes",
      "haircolor" : "http://example.org/games/haircolor"
   },
   },
   "@id": "http://me.markus-lanthaler.com/",
   "@graph": [
  "name": "Markus Lanthaler",
      {
  "homepage": "http://www.markus-lanthaler.com/"
          "@id": "people/Jeremy",
          "@type": "Person",
          "birthday" : "1987.1.1",
          "nameEng" : "Jeremy",
          "age" : 26
      },
      {
          "@id": "people/Tom",
          "@type": "Person"
      },
      {
          "@id": "people/Ju",
          "@type": "Person",
          "birthday" : "2001.1.1",
          "nameCh" : "Ju",
          "age" : 22,
          "likes" : "bastketball"
      },
      {
          "@id": "people/Louis",
          "@type": "Person",
          "birthday" : "1978.1.1",
          "haircolor" : "Black",
          "nameFr" : "Louis",
          "age" : 45
      },
      {"edges" : [
      {
          "start" : "people/Jeremy",
          "rel" : "knows",
          "end" : "people/Tom"
      },
      {
          "start" : "people/Tom",
          "rel" : "knows",
          "end" : "people/Louis"
      },
      {
          "start" : "people/Louis",
          "rel" : "teaches",
          "end" : "people/Ju"
      },
      {
          "start" : "people/Ju",
          "rel" : "plays",
          "end" : "people/Jeremy"
      },
      {
          "start" : "people/Ju",
          "rel" : "plays",
          "end" : "people/Tom"
      }
      ]}
  ]
}
}
"""
</syntaxhighlight>Part 2-3<syntaxhighlight lang="python">
import rdflib


# json-ld parsing automatically deals with @contexts
CN_BASE = 'http://api.conceptnet.io/c/en/'
g.parse(data=example, format='json-ld')


# serialisation does expansion by default
g = rdflib.Graph()
for line in g.serialize(format='json-ld').decode().splitlines():
g.parse(CN_BASE+'indictment', format='json-ld')
    print(line)
 
# by supplying a context object, serialisation can do compaction
context = {
    "foaf": "http://xmlns.com/foaf/0.1/"
}
for line in g.serialize(format='json-ld', context=context).decode().splitlines():
    print(line)
</syntaxhighlight>


# To download JSON object:


<div class="credits" style="text-align: right; direction: ltr; margin-left: 1em;">''INFO216, UiB, 2017-2020. All code examples are [https://creativecommons.org/choose/zero/ CC0].'' </div>
import json
import requests


==OWL - Complex Classes and Restrictions==
json_obj = requests.get(CN_BASE+'indictment').json()
<syntaxhighlight>
import owlrl
from rdflib import Graph, Literal, Namespace, BNode
from rdflib.namespace import RDF, OWL, RDFS
from rdflib.collection import Collection


g = Graph()
# To change the @context:
ex = Namespace("http://example.org/")
g.bind("ex", ex)
g.bind("owl", OWL)


# a Season is either Autumn, Winter, Spring, Summer
context = {
seasons = BNode()
    "@base": "http://ex.org/",
Collection(g, seasons, [ex.Winter, ex.Autumn, ex.Spring, ex.Summer])
    "edges": "http://ex.org/triple/",
g.add((ex.Season, OWL.oneOf, seasons))
    "start": "http://ex.org/s/",
    "rel": "http://ex.org/p/",
    "end": "http://ex.org/o/",
    "label": "http://ex.org/label"
}
json_obj['@context'] = context
json_str = json.dumps(json_obj)


# A Parent is a Father or Mother
g = rdflib.Graph()
b = BNode()
g.parse(data=json_str, format='json-ld')
Collection(g, b, [ex.Father, ex.Mother])
g.add((ex.Parent, OWL.unionOf, b))


# A Woman is a person who has the "female" gender
# To extract triples (here with labels):
br = BNode()
g.add((br, RDF.type, OWL.Restriction))
g.add((br, OWL.onProperty, ex.gender))
g.add((br, OWL.hasValue, ex.Female))
bi = BNode()
Collection(g, bi, [ex.Person, br])
g.add((ex.Woman, OWL.intersectionOf, bi))


# A vegetarian is a Person who only eats vegetarian food
r = g.query("""
br = BNode()
        SELECT ?s ?sLabel ?p ?o ?oLabel WHERE {
g.add((br, RDF.type, OWL.Restriction))
            ?edge
g.add((br, OWL.onProperty, ex.eats))
                <http://ex.org/s/> ?s ;
g.add((br, OWL.allValuesFrom, ex.VeganFood))
                <http://ex.org/p/> ?p ;
bi = BNode()
                <http://ex.org/o/> ?o .
Collection(g, bi, [ex.Person, br])
            ?s <http://ex.org/label> ?sLabel .
g.add((ex.Vegetarian, OWL.intersectionOf, bi))
            ?o <http://ex.org/label> ?oLabel .
}
        """, initNs={'cn': CN_BASE})
print(r.serialize(format='txt').decode())


# A vegetarian is a Person who can not eat meat.
# Construct a new graph:
br = BNode()
g.add((br, RDF.type, OWL.Restriction))
g.add((br, OWL.onProperty, ex.eats))
g.add((br, OWL.QualifiedCardinality, Literal(0)))
g.add((br, OWL.onClass, ex.Meat))
bi = BNode()
Collection(g, bi, [ex.Person, br])
g.add((ex.Vegetarian, OWL.intersectionOf, bi))


# A Worried Parent is a parent who has at least one sick child
r = g.query("""
br = BNode()
        CONSTRUCT {
g.add((br, RDF.type, OWL.Restriction))
            ?s ?p ?o .
g.add((br, OWL.onProperty, ex.hasChild))
            ?s <http://ex.org/label> ?sLabel .
g.add((br, OWL.QualifiedMinCardinality, Literal(1)))
            ?o <http://ex.org/label> ?oLabel .
g.add((br, OWL.onClass, ex.Sick))
        } WHERE {
bi = BNode()
            ?edge <http://ex.org/s/> ?s ;
Collection(g, bi, [ex.Parent, br])
                  <http://ex.org/p/> ?p ;
g.add((ex.WorriedParent, OWL.intersectionOf, bi))
                  <http://ex.org/o/> ?o .
 
            ?s <http://ex.org/label> ?sLabel .
# using the restriction above, If we now write...:  
            ?o <http://ex.org/label> ?oLabel .
g.add((ex.Bob, RDF.type, ex.Parent))
}
g.add((ex.Bob, ex.hasChild, ex.John))
        """, initNs={'cn': CN_BASE})
g.add((ex.John, RDF.type, ex.Sick))
# ...we can infer with owl reasoning that Bob is a worried Parent even though we didn't specify it ourselves because Bob fullfills the restriction and Parent requirements.


print(r.graph.serialize(format='ttl'))
</syntaxhighlight>
</syntaxhighlight>
==Protege-OWL reasoning with HermiT==
[[:File:DL-reasoning-RoyalFamily-final.owl.txt | Example file]] from Lecture 13 about OWL-DL, rules and reasoning.
-->

Latest revision as of 09:55, 10 March 2025

Here we will present suggested solutions after each lab. The page will be updated as the course progresses

1 Lab: Getting started with VSCode, Python and RDFlib

from rdflib import Graph, Namespace

ex = Namespace('http://example.org/')

g = Graph()

g.bind("ex", ex)

# The Mueller Investigation was lead by Robert Mueller
g.add((ex.MuellerInvestigation, ex.leadBy, ex.RobertMueller))

# It involved Paul Manafort, Rick Gates, George Papadopoulos, Michael Flynn, Michael Cohen, and Roger Stone.
g.add((ex.MuellerInvestigation, ex.involved, ex.PaulManafort))
g.add((ex.MuellerInvestigation, ex.involved, ex.RickGates))
g.add((ex.MuellerInvestigation, ex.involved, ex.GeorgePapadopoulos))
g.add((ex.MuellerInvestigation, ex.involved, ex.MichaelFlynn))
g.add((ex.MuellerInvestigation, ex.involved, ex.MichaelCohen))
g.add((ex.MuellerInvestigation, ex.involved, ex.RogerStone))

# Paul Manafort was business partner of Rick Gates
g.add((ex.PaulManafort, ex.businessPartner, ex.RickGates))

# He was campaign chairman for Donald Trump
g.add((ex.PaulManafort, ex.campaignChairman, ex.DonaldTrump))

# He was charged with money laundering, tax evasion, and foreign lobbying.
g.add((ex.PaulManafort, ex.chargedWith, ex.MoneyLaundering))
g.add((ex.PaulManafort, ex.chargedWith, ex.TaxEvasion))
g.add((ex.PaulManafort, ex.chargedWith, ex.ForeignLobbying))

# He was convicted for bank and tax fraud.
g.add((ex.PaulManafort, ex.convictedOf, ex.BankFraud))
g.add((ex.PaulManafort, ex.convictedOf, ex.TaxFraud))

# He pleaded guilty to conspiracy.
g.add((ex.PaulManafort, ex.pleadGuiltyTo, ex.Conspiracy))

# He was sentenced to prison.
g.add((ex.PaulManafort, ex.sentencedTo, ex.Prison))

# He negotiated a plea agreement.
g.add((ex.PaulManafort, ex.negotiated, ex.PleaAgreement))

# Rick Gates was charged with money laundering, tax evasion and foreign lobbying.
g.add((ex.RickGates, ex.chargedWith, ex.MoneyLaundering))
g.add((ex.RickGates, ex.chargedWith, ex.TaxEvasion))
g.add((ex.RickGates, ex.chargedWith, ex.ForeignLobbying))

# He pleaded guilty to conspiracy and lying to FBI.
g.add((ex.RickGates, ex.pleadGuiltyTo, ex.Conspiracy))
g.add((ex.RickGates, ex.pleadGuiltyTo, ex.LyingToFBI))

# Use the serialize method of rdflib.Graph to write out the model in different formats (on screen or to file)
print(g.serialize(format="ttl")) # To screen
#g.serialize("lab1.ttl", format="ttl") # To file

# Loop through the triples in the model to print out all triples that have pleading guilty as predicate
for subject, object in g[ : ex.pleadGuiltyTo :]:
    print(subject, ex.pleadGuiltyTo, object)

# --- IF you have more time tasks ---

# Michael Cohen, Michael Flynn and the lying is part of lab 2 and therefore the answer is not provided this week 

#Write a method (function) that submits your model for rendering and saves the returned image to file.
import requests
import shutil

def graphToImage(graphInput):
    data = {"rdf":graphInput, "from":"ttl", "to":"png"}
    link = "http://www.ldf.fi/service/rdf-grapher"
    response = requests.get(link, params = data, stream=True)
    # print(response.content)
    print(response.raw)
    with open("lab1.png", "wb") as file:
        shutil.copyfileobj(response.raw, file)

graph = g.serialize(format="ttl")
graphToImage(graph)

2 Lab: SPARQL queries

List all triples in your graph.

select * where { 
	?s ?p ?o .
} 

List the first 100 triples in your graph.

select * where { 
	?s ?p ?o .
} limit 100 

Count the number of triples in your graph.

select (count(?s)as ?tripleCount) where { 
	?s ?p ?o .
} 

Count the number of indictments in your graph.

PREFIX muellerkg: <http://example.org#>
select (Count(?s)as ?numIndictment) where { 
	?s ?p muellerkg:Indictment .
} 

List everyone who pleaded guilty, along with the name of the investigation.

PREFIX m: <http://example.org#>
select ?name ?s where { 
	?s ?p m:guilty-plea;
    	m:name ?name.
}  

List everyone who were convicted, but who had their conviction overturned by which president.

PREFIX muellerkg: <http://example.org#>
#List everyone who were convicted, but who had their conviction overturned by which president.

select ?name ?president   where { 
	?s ?p muellerkg:conviction;
		muellerkg:name ?name;
    	muellerkg:overturned true;
     	muellerkg:president ?president.  	
} limit 100 

For each investigation, list the number of indictments made.

PREFIX muellerkg: <http://example.org#>
select ?investigation (count(?investigation) as ?numIndictments) where { 
	?s muellerkg:investigation ?investigation .
} group by (?investigation)

For each investigation with multiple indictments, list the number of indictments made.

PREFIX muellerkg: <http://example.org#>
select ?investigation (count(?investigation) as ?numIndictments) where { 
	?s muellerkg:investigation ?investigation.
} group by (?investigation)
having (?numIndictments > 1)

For each investigation with multiple indictments, list the number of indictments made, sorted with the most indictments first.

PREFIX muellerkg: <http://example.org#>
select ?investigation (count(?investigation) as ?numIndictments) where { 
	?s muellerkg:investigation ?investigation.
} group by (?investigation)
having (?numIndictments > 1)
order by desc(?numIndictments)

For each president, list the numbers of convictions and of pardons made after conviction.

PREFIX muellerkg: <http://example.org#>
SELECT ?president (COUNT(?conviction) AS ?numConvictions) (COUNT(?pardon) AS ?numPardoned) 
WHERE { 
    ?indictment muellerkg:president ?president ;
                muellerkg:outcome muellerkg:conviction .
    BIND(?indictment AS ?conviction)
    OPTIONAL {
        ?indictment muellerkg:pardoned true .
        BIND(?indictment AS ?pardon)
    }
} 
GROUP BY ?president

3 Lab: SPARQL programming

from rdflib import Graph, Namespace, RDF, FOAF
from SPARQLWrapper import SPARQLWrapper, JSON, POST, GET, TURTLE

g = Graph()
g.parse("Russia_investigation_kg.ttl")

# ----- RDFLIB -----
ex = Namespace('http://example.org#')

NS = {
    '': ex,
    'rdf': RDF,
    'foaf': FOAF,
}

# Print out a list of all the predicates used in your graph.
task1 = g.query("""
SELECT DISTINCT ?p WHERE{
    ?s ?p ?o .
}
""", initNs=NS)

print(list(task1))

# Print out a sorted list of all the presidents represented in your graph.
task2 = g.query("""
SELECT DISTINCT ?president WHERE{
    ?s :president ?president .
}
ORDER BY ?president
""", initNs=NS)

print(list(task2))

# Create dictionary (Python dict) with all the represented presidents as keys. For each key, the value is a list of names of people indicted under that president.
task3_dic = {}

task3 = g.query("""
SELECT ?president ?person WHERE{
    ?s :president ?president;
       :name ?person;
       :outcome :indictment.
}
""", initNs=NS)

for president, person in task3:
    if president not in task3_dic:
        task3_dic[president] = [person]
    else:
        task3_dic[president].append(person)

print(task3_dic)

# Use an ASK query to investigate whether Donald Trump has pardoned more than 5 people.

task4 = g.query("""
    ASK{
        SELECT ?count WHERE{{
  	        SELECT (COUNT(?s) as ?count) WHERE{
    	        ?s :pardoned :true;
                   :president :Bill_Clinton  .
                }}
        FILTER (?count > 5) 
        }
    }
""", initNs=NS)

print(task4.askAnswer)


# task5 = g.query(""" 
# DESCRIBE :Donald_Trump
# """, initNs=NS)

# print(task5.serialize())

# ----- SPARQLWrapper -----

SERVER = 'http://localhost:7200' #Might need to replace this
REPOSITORY = 'Labs' #Replace with your repository name

# Query Endpoint
sparql = SPARQLWrapper(f'{SERVER}/repositories/{REPOSITORY}') 
# Update Endpoint
sparqlUpdate = SPARQLWrapper(f'{SERVER}/repositories/{REPOSITORY}/statements')

# Ask whether there was an ongoing indictment on the date 1990-01-01.
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    ASK {
        SELECT ?end ?start
        WHERE{
            ?s ns1:investigation_end ?end;
               ns1:investigation_start ?start;
               ns1:outcome ns1:indictment.
            FILTER(?start <= "1990-01-01"^^xsd:date && ?end >= "1990-01-01"^^xsd:date) 
	    }
    }
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print(f"Are there any investigation on the 1990-01-01: {results['boolean']}")

# List ongoing indictments on that date 1990-01-01.
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    SELECT ?s
    WHERE{
        ?s ns1:investigation_end ?end;
           ns1:investigation_start ?start;
           ns1:outcome ns1:indictment.
        FILTER(?start <= "1990-01-01"^^xsd:date && ?end >= "1990-01-01"^^xsd:date) 
    }
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()

print("The ongoing investigations on the 1990-01-01 are:")
for result in results["results"]["bindings"]:
    print(result["s"]["value"])

# Describe investigation number 100 (muellerkg:investigation_100).
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    DESCRIBE ns1:investigation_100
""")

sparql.setReturnFormat(TURTLE)
results = sparql.query().convert()

print(results)

# Print out a list of all the types used in your graph.
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>

    SELECT DISTINCT ?types
    WHERE{
        ?s rdf:type ?types . 
    }
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()

rdf_Types = []

for result in results["results"]["bindings"]:
    rdf_Types.append(result["types"]["value"])

print(rdf_Types)

# Update the graph to that every resource that is an object in a muellerkg:investigation triple has the rdf:type muellerkg:Investigation.
update_str = """
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>

    INSERT{
        ?invest rdf:type ns1:Investigation .
    }
    WHERE{
        ?s ns1:investigation ?invest .
}"""

sparqlUpdate.setQuery(update_str)
sparqlUpdate.setMethod(POST)
sparqlUpdate.query()

#To Test
sparql.setQuery("""
    prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
    PREFIX ns1: <http://example.org#>

    ASK{
        ns1:watergate rdf:type ns1:Investigation.
    }
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()
print(results['boolean'])

# Update the graph to that every resource that is an object in a muellerkg:person triple has the rdf:type muellerkg:IndictedPerson.
update_str = """
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>

    INSERT{
        ?person rdf:type ns1:IndictedPerson .
    }
    WHERE{
        ?s ns1:name ?person .
}"""

sparqlUpdate.setQuery(update_str)
sparqlUpdate.setMethod(POST)
sparqlUpdate.query()

#To test, run the query in the above task, replacing the ask query with e.g. ns1:Deborah_Gore_Dean rdf:type ns1:IndictedPerson

# Update the graph so all the investigation nodes (such as muellerkg:watergate) become the subject in a dc:title triple with the corresponding string (watergate) as the literal.
update_str = """
    PREFIX ns1: <http://example.org#>
    PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
    PREFIX dc: <http://purl.org/dc/elements/1.1/>

    INSERT{
        ?invest dc:title ?investString.
    }
    WHERE{
        ?s ns1:investigation ?invest .
        BIND (replace(str(?invest), str(ns1:), "")  AS ?investString)
}"""

sparqlUpdate.setQuery(update_str)
sparqlUpdate.setMethod(POST)
sparqlUpdate.query()

#Same test as above, replace it with e.g. ns1:watergate dc:title "watergate"

# Print out a sorted list of all the indicted persons represented in your graph.
sparql.setQuery("""
    PREFIX ns1: <http://example.org#>
    PREFIX foaf: <http://xmlns.com/foaf/0.1/>

    SELECT ?name
    WHERE{
    ?s  ns1:name ?name;
            ns1:outcome ns1:indictment.
    }
    ORDER BY ?name
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()

names = []

for result in results["results"]["bindings"]:
    names.append(result["name"]["value"])

print(names)

# Print out the minimum, average and maximum indictment days for all the indictments in the graph.

sparql.setQuery("""
    prefix xsd: <http://www.w3.org/2001/XMLSchema#>
    PREFIX ns1: <http://example.org#>

    SELECT (AVG(?daysRemoved) as ?avg) (MAX(?daysRemoved) as ?max) (MIN(?daysRemoved) as ?min)  WHERE{
        ?s  ns1:indictment_days ?days;
            ns1:outcome ns1:indictment.
    
    BIND (replace(str(?days), str(ns1:), "")  AS ?daysR)
    BIND (STRDT(STR(?daysR), xsd:float) AS ?daysRemoved)
}
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()

for result in results["results"]["bindings"]:
    print(f'The longest an investigation lasted was: {result["max"]["value"]}')
    print(f'The shortest an investigation lasted was: {result["min"]["value"]}')
    print(f'The average investigation lasted: {result["avg"]["value"]}')

# Print out the minimum, average and maximum indictment days for all the indictments in the graph per investigation.

sparql.setQuery("""
    prefix xsd: <http://www.w3.org/2001/XMLSchema#>
    PREFIX ns1: <http://example.org#>

    SELECT ?investigation (AVG(?daysRemoved) as ?avg) (MAX(?daysRemoved) as ?max) (MIN(?daysRemoved) as ?min)  WHERE{
    ?s  ns1:indictment_days ?days;
        ns1:outcome ns1:indictment;
        ns1:investigation ?investigation.
    
    BIND (replace(str(?days), str(ns1:), "")  AS ?daysR)
    BIND (STRDT(STR(?daysR), xsd:float) AS ?daysRemoved)
    }
    GROUP BY ?investigation
""")

sparql.setReturnFormat(JSON)
results = sparql.query().convert()

for result in results["results"]["bindings"]:
    print(f'{result["investigation"]["value"]} - min: {result["min"]["value"]}, max: {result["max"]["value"]}, avg: {result["avg"]["value"]}')

Lab 4 JSON-LD

Part 1

{
  "@context": {
      "@base": "http://example.org/",
      "edges": "http://example.org/triple",
      "start": "http://example.org/source",
      "rel": "http://exaxmple.org/predicate",
      "end": "http://example.org/object",
      "Person" : "http://example.org/Person",
      "birthday" : {
          "@id" : "http://example.org/birthday",
          "@type" : "xsd:date"
      },
      "nameEng" : {
          "@id" : "http://example.org/en/name",
          "@language" : "en"
      },
      "nameFr" : {
          "@id" : "http://example.org/fr/name",
          "@language" : "fr"
      },
      "nameCh" : {
          "@id" : "http://example.org/ch/name",
          "@language" : "ch"
      },
      "age" : {
          "@id" : "http://example.org/age",
          "@type" : "xsd:int"
      },
      "likes" : "http://example.org/games/likes",
      "haircolor" : "http://example.org/games/haircolor"
  },
  "@graph": [
      {
          "@id": "people/Jeremy",
          "@type": "Person",
          "birthday" : "1987.1.1",
          "nameEng" : "Jeremy",
          "age" : 26
      },
      {
          "@id": "people/Tom",
          "@type": "Person"
      },
      {
          "@id": "people/Ju",
          "@type": "Person",
          "birthday" : "2001.1.1",
          "nameCh" : "Ju",
          "age" : 22,
          "likes" : "bastketball"
      },
      {
          "@id": "people/Louis",
          "@type": "Person",
          "birthday" : "1978.1.1",
          "haircolor" : "Black",
          "nameFr" : "Louis",
          "age" : 45
      },
      {"edges" : [
      {
          "start" : "people/Jeremy",
          "rel" : "knows",
          "end" : "people/Tom"
      },
      {
          "start" : "people/Tom",
          "rel" : "knows",
          "end" : "people/Louis"
      },
      {
          "start" : "people/Louis",
          "rel" : "teaches",
          "end" : "people/Ju"
      },
      {
          "start" : "people/Ju",
          "rel" : "plays",
          "end" : "people/Jeremy"
      },
      {
          "start" : "people/Ju",
          "rel" : "plays",
          "end" : "people/Tom"
      }
      ]}
  ]
}

Part 2-3

import rdflib

CN_BASE = 'http://api.conceptnet.io/c/en/'

g = rdflib.Graph()
g.parse(CN_BASE+'indictment', format='json-ld')

# To download JSON object:

import json
import requests

json_obj = requests.get(CN_BASE+'indictment').json()

# To change the @context:

context = {
     "@base": "http://ex.org/",
     "edges": "http://ex.org/triple/",
     "start": "http://ex.org/s/",
     "rel": "http://ex.org/p/",
     "end": "http://ex.org/o/",
     "label": "http://ex.org/label"
}
json_obj['@context'] = context
json_str = json.dumps(json_obj)

g = rdflib.Graph()
g.parse(data=json_str, format='json-ld')

# To extract triples (here with labels):

r = g.query("""
         SELECT ?s ?sLabel ?p ?o ?oLabel WHERE {
             ?edge
                 <http://ex.org/s/> ?s ;
                 <http://ex.org/p/> ?p ;
                 <http://ex.org/o/> ?o .
             ?s <http://ex.org/label> ?sLabel .
             ?o <http://ex.org/label> ?oLabel .
}
         """, initNs={'cn': CN_BASE})
print(r.serialize(format='txt').decode())

# Construct a new graph:

r = g.query("""
         CONSTRUCT {
             ?s ?p ?o .
             ?s <http://ex.org/label> ?sLabel .
             ?o <http://ex.org/label> ?oLabel .
         } WHERE {
             ?edge <http://ex.org/s/> ?s ;
                   <http://ex.org/p/> ?p ;
                   <http://ex.org/o/> ?o .
             ?s <http://ex.org/label> ?sLabel .
             ?o <http://ex.org/label> ?oLabel .
}
         """, initNs={'cn': CN_BASE})

print(r.graph.serialize(format='ttl'))