|
|
(52 intermediate revisions by 3 users not shown) |
Line 1: |
Line 1: |
| This page will be updated with Python examples related to the lectures and labs. We will add more examples after each lab has ended. The first examples will use Python's RDFlib. We will introduce other relevant libraries later.
| | Here we will present suggested solutions after each lab. ''The page will be updated as the course progresses'' |
|
| |
|
| =Example lab solutions= | | <!-- |
| | =Getting started (Lab 1)= |
|
| |
|
| ==Getting started==
| | <syntaxhighlight> |
|
| |
|
| <syntaxhighlight>
| | from rdflib import Graph, Namespace |
|
| |
|
| from rdflib.collection import Collection
| | ex = Namespace('http://example.org/') |
| from rdflib import Graph, Namespace, Literal, URIRef
| |
| from rdflib.namespace import RDF, FOAF, XSD
| |
|
| |
|
| g = Graph() | | g = Graph() |
| EX = Namespace('http://EXample.org/')
| |
| RL = Namespace('http://purl.org/vocab/relationship/')
| |
| DBO = Namespace('https://dbpedia.org/ontology/')
| |
| DBR = Namespace('https://dbpedia.org/page/')
| |
|
| |
|
| g.namespace_manager.bind('exampleURI', EX) | | g.bind("ex", ex) |
| g.namespace_manager.bind('relationship', RL)
| |
| g.namespace_manager.bind('dbpediaOntology', DBO)
| |
| g.namespace_manager.bind('dbpediaPage', DBR)
| |
|
| |
|
| g.add((EX.Cade, RDF.type, FOAF.Person))
| | # The Mueller Investigation was lead by Robert Mueller |
| g.add((EX.Mary, RDF.type, FOAF.Person))
| | g.add((ex.MuellerInvestigation, ex.leadBy, ex.RobertMueller)) |
| g.add((EX.Cade, RL.spouseOf, EX.Mary)) # a symmetrical relation from an established namespace
| |
| g.add((DBR.France, DBO.capital, DBR.Paris))
| |
| g.add((EX.Cade, FOAF.age, Literal(27)))
| |
| g.add((EX.Mary, FOAF.age, Literal('26', datatype=XSD.int)))
| |
| Collection (g, EX.MaryInterests, [EX.hiking, EX.choclate, EX.biology])
| |
| g.add((EX.Mary, EX.hasIntrest, EX.MaryInterests))
| |
| g.add((EX.Mary, RDF.type, EX.student))
| |
| g.add((DBO.capital, EX.range, EX.city))
| |
| g.add((EX.Mary, RDF.type, EX.kind))
| |
| g.add((EX.Cade, RDF.type, EX.kindPerson)) | |
|
| |
|
| #hobbies = ['hiking', 'choclate', 'biology'] | | # It involved Paul Manafort, Rick Gates, George Papadopoulos, Michael Flynn, Michael Cohen, and Roger Stone. |
| #for i in hobbies:
| | g.add((ex.MuellerInvestigation, ex.involved, ex.PaulManafort)) |
| # g.add((EX.Mary, FOAF.interest, EX[i]))
| | g.add((ex.MuellerInvestigation, ex.involved, ex.RickGates)) |
| | g.add((ex.MuellerInvestigation, ex.involved, ex.GeorgePapadopoulos)) |
| | g.add((ex.MuellerInvestigation, ex.involved, ex.MichaelFlynn)) |
| | g.add((ex.MuellerInvestigation, ex.involved, ex.MichaelCohen)) |
| | g.add((ex.MuellerInvestigation, ex.involved, ex.RogerStone)) |
|
| |
|
| print(g.serialize(format="turtle"))
| | # Paul Manafort was business partner of Rick Gates |
| </syntaxhighlight>
| | g.add((ex.PaulManafort, ex.businessPartner, ex.RickGates)) |
|
| |
|
| ==RDFlib==
| | # He was campaign chairman for Donald Trump |
| <syntaxhighlight>
| | g.add((ex.PaulManafort, ex.campaignChairman, ex.DonaldTrump)) |
|
| |
|
| from rdflib.namespace import RDF, XSD, FOAF
| | # He was charged with money laundering, tax evasion, and foreign lobbying. |
| from rdflib import Graph, Namespace, Literal, BNode
| | g.add((ex.PaulManafort, ex.chargedWith, ex.MoneyLaundering)) |
| from rdflib.collection import Collection
| | g.add((ex.PaulManafort, ex.chargedWith, ex.TaxEvasion)) |
| | g.add((ex.PaulManafort, ex.chargedWith, ex.ForeignLobbying)) |
|
| |
|
| | # He was convicted for bank and tax fraud. |
| | g.add((ex.PaulManafort, ex.convictedOf, ex.BankFraud)) |
| | g.add((ex.PaulManafort, ex.convictedOf, ex.TaxFraud)) |
|
| |
|
| g = Graph() | | # He pleaded guilty to conspiracy. |
| ex = Namespace('http://example.org/') | | g.add((ex.PaulManafort, ex.pleadGuiltyTo, ex.Conspiracy)) |
| schema = Namespace("https://schema.org/")
| |
| dbp = Namespace("https://dbpedia.org/resource/")
| |
|
| |
|
| g.bind("ex", ex) | | # He was sentenced to prison. |
| g.bind("dbp", dbp)
| | g.add((ex.PaulManafort, ex.sentencedTo, ex.Prison)) |
| g.bind("schema", schema)
| |
|
| |
|
| address = BNode()
| | # He negotiated a plea agreement. |
| degree = BNode()
| | g.add((ex.PaulManafort, ex.negotiated, ex.PleaAgreement)) |
|
| |
|
| # from lab 1 | | # Rick Gates was charged with money laundering, tax evasion and foreign lobbying. |
| g.add((ex.Cade, FOAF.name, Literal("Cade Tracey", datatype=XSD.string)))
| | g.add((ex.RickGates, ex.chargedWith, ex.MoneyLaundering)) |
| g.add((ex.Mary, FOAF.name, Literal("Mary", datatype=XSD.string))) | | g.add((ex.RickGates, ex.chargedWith, ex.TaxEvasion)) |
| g.add((ex.Cade, RDF.type, FOAF.Person))
| | g.add((ex.RickGates, ex.chargedWith, ex.ForeignLobbying)) |
| g.add((ex.Mary, RDF.type, FOAF.Person))
| |
| g.add((ex.Mary, RDF.type, ex.Student)) | |
| g.add((ex.Cade, ex.married, ex.Mary))
| |
| g.add((ex.Cade, FOAF.age, Literal('27', datatype=XSD.int))) | |
| g.add((ex.Mary, FOAF.age, Literal('26', datatype=XSD.int)))
| |
| g.add((ex.Paris, RDF.type, ex.City))
| |
| g.add((ex.France, ex.Capital, ex.Paris))
| |
| g.add((ex.Mary, FOAF.interest, ex.hiking))
| |
| g.add((ex.Mary, FOAF.interest, ex.Chocolate))
| |
| g.add((ex.Mary, FOAF.interest, ex.biology))
| |
| g.add((ex.France, ex.City, ex.Paris))
| |
| g.add((ex.Mary, ex.Characterostic, ex.kind))
| |
| g.add((ex.Cade, ex.Characterostic, ex.kind))
| |
| g.add((ex.France, RDF.type, ex.Country))
| |
| g.add((ex.Cade, schema.address, address))
| |
|
| |
|
| # BNode address | | # He pleaded guilty to conspiracy and lying to FBI. |
| g.add((address, RDF.type, schema.PostalAdress))
| | g.add((ex.RickGates, ex.pleadGuiltyTo, ex.Conspiracy)) |
| g.add((address, schema.streetAddress, Literal('1516 Henry Street'))) | | g.add((ex.RickGates, ex.pleadGuiltyTo, ex.LyingToFBI)) |
| g.add((address, schema.addresCity, dbp.Berkeley))
| |
| g.add((address, schema.addressRegion, dbp.California)) | |
| g.add((address, schema.postalCode, Literal('94709')))
| |
| g.add((address, schema.addressCountry, dbp.United_States))
| |
|
| |
|
| # More info about Cade | | # Use the serialize method of rdflib.Graph to write out the model in different formats (on screen or to file) |
| g.add((ex.Cade, ex.Degree, degree))
| | print(g.serialize(format="ttl")) # To screen |
| g.add((degree, ex.Field, dbp.Biology))
| | #g.serialize("lab1.ttl", format="ttl") # To file |
| g.add((degree, RDF.type, dbp.Bachelors_degree)) | |
| g.add((degree, ex.Universety, dbp.University_of_California)) | |
| g.add((degree, ex.year, Literal('2001', datatype=XSD.gYear)))
| |
|
| |
|
| # Emma | | # Loop through the triples in the model to print out all triples that have pleading guilty as predicate |
| emma_degree = BNode()
| | for subject, object in g[ : ex.pleadGuiltyTo :]: |
| g.add((ex.Emma, FOAF.name, Literal("Emma Dominguez", datatype=XSD.string)))
| | print(subject, ex.pleadGuiltyTo, object) |
| g.add((ex.Emma, RDF.type, FOAF.Person)) | |
| g.add((ex.Emma, ex.Degree, emma_degree))
| |
| g.add((degree, ex.Field, dbp.Chemistry))
| |
| g.add((degree, RDF.type, dbp.Masters_degree))
| |
| g.add((degree, ex.Universety, dbp.University_of_Valencia))
| |
| g.add((degree, ex.year, Literal('2015', datatype=XSD.gYear)))
| |
|
| |
|
| # Address | | # --- IF you have more time tasks --- |
| emma_address = BNode()
| |
| g.add((ex.Emma, schema.address, emma_address))
| |
| g.add((emma_address, RDF.type, schema.PostalAdress))
| |
| g.add((emma_address, schema.streetAddress,
| |
| Literal('Carrer de la Guardia Civil 20')))
| |
| g.add((emma_address, schema.addressRegion, dbp.Valencia))
| |
| g.add((emma_address, schema.postalCode, Literal('46020')))
| |
| g.add((emma_address, schema.addressCountry, dbp.Spain))
| |
|
| |
|
| b = BNode()
| | # Michael Cohen, Michael Flynn and the lying is part of lab 2 and therefore the answer is not provided this week |
| g.add((ex.Emma, ex.visit, b))
| |
| Collection(g, b,
| |
| [dbp.Portugal, dbp.Italy, dbp.France, dbp.Germany, dbp.Denmark, dbp.Sweden])
| |
|
| |
|
| </syntaxhighlight>
| | #Write a method (function) that submits your model for rendering and saves the returned image to file. |
| | import requests |
| | import shutil |
|
| |
|
| ==SPARQL - Blazegraph== | | def graphToImage(graphInput): |
| <syntaxhighlight>
| | data = {"rdf":graphInput, "from":"ttl", "to":"png"} |
| PREFIX ex: <http://example.org/>
| | link = "http://www.ldf.fi/service/rdf-grapher" |
| PREFIX foaf: <http://xmlns.com/foaf/0.1/>
| | response = requests.get(link, params = data, stream=True) |
| PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
| | # print(response.content) |
| PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
| | print(response.raw) |
| PREFIX xml: <http://www.w3.org/XML/1998/namespace>
| | with open("lab1.png", "wb") as file: |
| PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
| | shutil.copyfileobj(response.raw, file) |
|
| |
|
| | graph = g.serialize(format="ttl") |
| | graphToImage(graph) |
|
| |
|
| #select all triplets in graph
| | </syntaxhighlight> |
| SELECT ?s ?p ?o
| |
| WHERE {
| |
| ?s ?p ?o .
| |
| }
| |
| #select the interestes of Cade
| |
| SELECT ?cadeInterest
| |
| WHERE {
| |
| ex:Cade ex:interest ?cadeInterest .
| |
| }
| |
| #select the country and city where Emma lives
| |
| SELECT ?emmaCity ?emmaCountry
| |
| WHERE {
| |
| ex:Emma ex:address ?address .
| |
| ?address ex:city ?emmaCity .
| |
| ?address ex:country ?emmaCountry .
| |
| }
| |
| #select the people who are over 26 years old
| |
| SELECT ?person ?age
| |
| WHERE {
| |
| ?person ex:age ?age .
| |
| FILTER(?age > 26) .
| |
| }
| |
| #select people who graduated with Bachelor
| |
| SELECT ?person ?degree
| |
| WHERE {
| |
| ?person ex:degree ?degree .
| |
| ?degree ex:degreeLevel "Bachelor" .
| |
|
| |
| }
| |
| # delete cades photography interest
| |
| DELETE DATA
| |
| {
| |
| ex:Cade ex:interest ex:Photography .
| |
| }
| |
|
| |
|
| # delete and insert university of valencia
| | =RDF programming with RDFlib (Lab 2)= |
| DELETE { ?s ?p ex:University_of_Valencia }
| |
| INSERT { ?s ?p ex:Universidad_de_Valencia }
| |
| WHERE { ?s ?p ex:University_of_Valencia }
| |
|
| |
|
| #check if the deletion worked
| |
| SELECT ?s ?o2
| |
| WHERE {
| |
| ?s ex:degree ?o .
| |
| ?o ex:degreeSource ?o2 .
| |
| }
| |
| #describe sergio
| |
| DESCRIBE ex:Sergio ?o
| |
| WHERE {
| |
| ex:Sergio ?p ?o .
| |
| ?o ?p2 ?o2 .
| |
| }
| |
| </syntaxhighlight>
| |
|
| |
| ==SPARQL - RDFlib==
| |
| <syntaxhighlight> | | <syntaxhighlight> |
| from SPARQLWrapper import SPARQLWrapper, JSON, POST, GET, TURTLE | | from rdflib import Graph, Namespace, Literal, BNode, XSD, FOAF, RDF, URIRef |
| | from rdflib.collection import Collection |
|
| |
|
| namespace = "lab4"
| | g = Graph() |
| sparql = SPARQLWrapper("http://10.111.21.183:9999/blazegraph/namespace/"+ namespace + "/sparql")
| |
|
| |
|
| # Print out Cades interests | | # Getting the graph created in the first lab |
| sparql.setQuery("""
| | g.parse("lab1.ttl", format="ttl") |
| PREFIX ex: <http://example.org/>
| |
| SELECT * WHERE {
| |
| ex:Cade ex:interest ?interest.
| |
| }
| |
| """)
| |
| sparql.setReturnFormat(JSON)
| |
| results = sparql.query().convert()
| |
| for result in results["results"]["bindings"]:
| |
| print(result["interest"]["value"])
| |
|
| |
|
| # Print Emmas city and country
| | ex = Namespace("http://example.org/") |
| sparql.setQuery("""
| |
| PREFIX ex: <http://example.org/>
| |
| SELECT ?emmaCity ?emmaCountry
| |
| WHERE {
| |
| ex:Emma ex:address ?address .
| |
| ?address ex:city ?emmaCity .
| |
| ?address ex:country ?emmaCountry .
| |
| }
| |
| """)
| |
| sparql.setReturnFormat(JSON)
| |
| results = sparql.query().convert()
| |
| for result in results["results"]["bindings"]:
| |
| print("Emma's city is "+result["emmaCity"]["value"]+" and Emma's country is " + result["emmaCountry"]["value"])
| |
|
| |
|
| #Select the people who are over 26 years old
| | g.bind("ex", ex) |
| sparql.setQuery("""
| | g.bind("foaf", FOAF) |
| PREFIX ex: <http://example.org/>
| |
| SELECT ?person ?age
| |
| WHERE {
| |
| ?person ex:age ?age .
| |
| FILTER(?age > 26) .
| |
| }
| |
| """)
| |
| sparql.setReturnFormat(JSON)
| |
| results = sparql.query().convert()
| |
| for result in results["results"]["bindings"]:
| |
| print("All people who are over 26 years old: "+result["person"]["value"])
| |
|
| |
|
| #Select people who graduated with Bachelor | | # --- Michael Cohen --- |
| sparql.setQuery("""
| | # Michael Cohen was Donald Trump's attorney. |
| PREFIX ex: <http://example.org/>
| | g.add((ex.MichaelCohen, ex.attorneyTo, ex.DonaldTrump)) |
| SELECT ?person ?degree
| | # He pleaded guilty for lying to Congress. |
| WHERE {
| | g.add((ex.MichaelCohen, ex.pleadGuiltyTo, ex.LyingToCongress)) |
| ?person ex:degree ?degree .
| |
| ?degree ex:degreeLevel "Bachelor" .
| |
| }
| |
| """)
| |
| sparql.setReturnFormat(JSON)
| |
| results = sparql.query().convert()
| |
| for result in results["results"]["bindings"]:
| |
| print("People who graduated with Bachelor: "+result["person"]["value"])
| |
|
| |
|
| #Delete cades photography interest | | # --- Michael Flynn --- |
| sparql.setQuery("""
| | # Michael Flynn was adviser to Donald Trump. |
| PREFIX ex: <http://example.org/>
| | g.add((ex.MichaelFlynn, ex.adviserTo, ex.DonaldTrump)) |
| DELETE DATA {
| | # He pleaded guilty for lying to the FBI. |
| ex:Cade ex:interest ex:Photography .
| | g.add((ex.MichaelFlynn, ex.pleadGuiltyTo, ex.LyingToFBI)) |
| }
| | # He negotiated a plea agreement. |
| """)
| | g.add((ex.MichaelFlynn, ex.negotiated, ex.PleaAgreement)) |
| sparql.setMethod(POST)
| |
| results = sparql.query()
| |
| print(results.response.read())
| |
|
| |
|
| # Print out Cades interests again | | # Change your graph so it represents instances of lying as blank nodes. |
| sparql.setQuery("""
| | # Remove the triples that will be duplicated |
| PREFIX ex: <http://example.org/>
| | g.remove((ex.Michael_Flynn, ex.pleadGuiltyTo, ex.LyingToFBI)) |
| SELECT * WHERE {
| | g.remove((ex.Michael_Flynn, ex.negoiated, ex.PleaBargain)) |
| ex:Cade ex:interest ?interest.
| | g.remove((ex.Rick_Gates, ex.pleadGuiltyTo, ex.LyingToFBI)) |
| }
| | g.remove((ex.Rick_Gates, ex.pleadGuiltyTo, ex.Conspiracy)) |
| """)
| | g.remove((ex.Rick_Gates, ex.chargedWith, ex.ForeignLobbying)) |
| sparql.setReturnFormat(JSON)
| | g.remove((ex.Rick_Gates, ex.chargedWith, ex.MoneyLaundering)) |
| sparql.setMethod(GET)
| | g.remove((ex.Rick_Gates, ex.chargedWith, ex.TaxEvasion)) |
| results = sparql.query().convert()
| | g.remove((ex.Michael_Cohen, ex.pleadGuiltyTo, ex.LyingToCongress)) |
| for result in results["results"]["bindings"]:
| |
| print(result["interest"]["value"])
| |
|
| |
|
| # Check university names | | # --- Michael Flynn --- |
| sparql.setQuery("""
| | FlynnLying = BNode() |
| PREFIX ex: <http://example.org/>
| | g.add((FlynnLying, ex.crime, ex.LyingToFBI)) |
| SELECT ?s ?o2
| | g.add((FlynnLying, ex.pleadGulityOn, Literal("2017-12-1", datatype=XSD.date))) |
| WHERE {
| | g.add((FlynnLying, ex.liedAbout, Literal("His communications with a former Russian ambassador during the presidential transition", datatype=XSD.string))) |
| ?s ex:degree ?o .
| | g.add((FlynnLying, ex.pleaBargain, Literal("true", datatype=XSD.boolean))) |
| ?o ex:degreeSource ?o2 .
| | g.add((ex.Michael_Flynn, ex.pleadGuiltyTo, FlynnLying)) |
| }
| |
| """)
| |
| sparql.setReturnFormat(JSON)
| |
| results = sparql.query().convert()
| |
| for result in results["results"]["bindings"]:
| |
| print(result["o2"]["value"])
| |
|
| |
|
| | # --- Rick Gates --- |
| | GatesLying = BNode() |
| | Crimes = BNode() |
| | Charged = BNode() |
| | Collection(g, Crimes, [ex.LyingToFBI, ex.Conspiracy]) |
| | Collection(g, Charged, [ex.ForeignLobbying, ex.MoneyLaundering, ex.TaxEvasion]) |
| | g.add((GatesLying, ex.crime, Crimes)) |
| | g.add((GatesLying, ex.chargedWith, Charged)) |
| | g.add((GatesLying, ex.pleadGulityOn, Literal("2018-02-23", datatype=XSD.date))) |
| | g.add((GatesLying, ex.pleaBargain, Literal("true", datatype=XSD.boolean))) |
| | g.add((ex.Rick_Gates, ex.pleadGuiltyTo, GatesLying)) |
|
| |
|
| #Delete and insert university of valencia | | # --- Michael Cohen --- |
| sparql.setQuery("""
| | CohenLying = BNode() |
| PREFIX ex: <http://example.org/>
| | g.add((CohenLying, ex.crime, ex.LyingToCongress)) |
| DELETE { ?s ?p ex:University_of_Valencia }
| | g.add((CohenLying, ex.liedAbout, ex.TrumpRealEstateDeal)) |
| INSERT { ?s ?p ex:Universidad_de_Valencia }
| | g.add((CohenLying, ex.prosecutorsAlleged, Literal("In an August 2017 letter Cohen sent to congressional committees investigating Russian election interference, he falsely stated that the project ended in January 2016", datatype=XSD.string))) |
| WHERE { ?s ?p ex:University_of_Valencia }
| | g.add((CohenLying, ex.mullerInvestigationAlleged, Literal("Cohen falsely stated that he had never agreed to travel to Russia for the real estate deal and that he did not recall any contact with the Russian government about the project", datatype=XSD.string))) |
| """)
| | g.add((CohenLying, ex.pleadGulityOn, Literal("2018-11-29", datatype=XSD.date))) |
| sparql.setMethod(POST)
| | g.add((CohenLying, ex.pleaBargain, Literal("true", datatype=XSD.boolean))) |
| results = sparql.query()
| | g.add((ex.Michael_Cohen, ex.pleadGuiltyTo, CohenLying)) |
| print(results.response.read())
| |
|
| |
|
| # Check university names again
| | print(g.serialize(format="ttl")) |
| sparql.setQuery("""
| |
| PREFIX ex: <http://example.org/>
| |
| SELECT ?s ?o2
| |
| WHERE {
| |
| ?s ex:degree ?o .
| |
| ?o ex:degreeSource ?o2 .
| |
| }
| |
| """)
| |
| sparql.setReturnFormat(JSON)
| |
| sparql.setMethod(GET)
| |
| results = sparql.query().convert()
| |
| for result in results["results"]["bindings"]:
| |
| print(result["o2"]["value"])
| |
|
| |
|
| #Insert Sergio | | #Save (serialize) your graph to a Turtle file. |
| sparql.setQuery("""
| | # g.serialize("lab2.ttl", format="ttl") |
| PREFIX ex: <http://example.org/>
| |
| PREFIX foaf: <http://xmlns.com/foaf/0.1/>
| |
| INSERT DATA {
| |
| ex:Sergio a foaf:Person ;
| |
| ex:address [ a ex:Address ;
| |
| ex:city ex:Valenciay ;
| |
| ex:country ex:Spain ;
| |
| ex:postalCode "46021"^^xsd:string ;
| |
| ex:state ex:California ;
| |
| ex:street "4_Carrer_del_Serpis"^^xsd:string ] ;
| |
| ex:degree [ ex:degreeField ex:Computer_science ;
| |
| ex:degreeLevel "Master"^^xsd:string ;
| |
| ex:degreeSource ex:University_of_Valencia ;
| |
| ex:year "2008"^^xsd:gYear ] ;
| |
| ex:expertise ex:Big_data,
| |
| ex:Semantic_technologies,
| |
| ex:Machine_learning;
| |
| foaf:name "Sergio_Pastor"^^xsd:string .
| |
| }
| |
| """)
| |
| sparql.setMethod(POST)
| |
| results = sparql.query()
| |
| print(results.response.read())
| |
| sparql.setMethod(GET)
| |
|
| |
|
| # Describe Sergio | | #Add a few triples to the Turtle file with more information about Donald Trump. |
| sparql.setReturnFormat(TURTLE)
| | ''' |
| sparql.setQuery("""
| | ex:Donald_Trump ex:address [ ex:city ex:Palm_Beach ; |
| PREFIX ex: <http://example.org/> | | ex:country ex:United_States ; |
| DESCRIBE ex:Sergio ?o
| | ex:postalCode 33480 ; |
| WHERE {
| | ex:residence ex:Mar_a_Lago ; |
| ex:Sergio ?p ?o .
| | ex:state ex:Florida ; |
| ?o ?p2 ?o2 .
| | ex:streetName "1100 S Ocean Blvd"^^xsd:string ] ; |
| } | | ex:previousAddress [ ex:city ex:Washington_DC ; |
| """) | | ex:country ex:United_States ; |
| results = sparql.query().convert()
| | ex:phoneNumber "1 202 456 1414"^^xsd:integer ; |
| print(results.serialize(format='turtle'))
| | ex:postalCode "20500"^^xsd:integer ; |
| | ex:residence ex:The_White_House ; |
| | ex:streetName "1600 Pennsylvania Ave."^^xsd:string ]; |
| | ex:marriedTo ex:Melania_Trump; |
| | ex:fatherTo (ex:Ivanka_Trump ex:Donald_Trump_Jr ex: ex:Tiffany_Trump ex:Eric_Trump ex:Barron_Trump). |
| | ''' |
|
| |
|
| # Construct that any city is in the country in an address | | #Read (parse) the Turtle file back into a Python program, and check that the new triples are there |
| sparql.setQuery("""
| | def serialize_Graph(): |
| PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
| | newGraph = Graph() |
| PREFIX ex: <http://example.org/> | | newGraph.parse("lab2.ttl") |
| CONSTRUCT {?city ex:locatedIn ?country}
| | print(newGraph.serialize()) |
| Where {
| |
| ?s rdf:type ex:Address .
| |
| ?s ex:city ?city .
| |
| ?s ex:country ?country.
| |
| }
| |
| """)
| |
| sparql.setReturnFormat(TURTLE)
| |
| results = sparql.query().convert()
| |
| print(results.serialize(format='turtle')) | |
|
| |
|
| </syntaxhighlight>
| | #Don't need this to run until after adding the triples above to the ttl file |
| ==Web APIs and JSON-LD==
| | # serialize_Graph() |
|
| |
|
| <syntaxhighlight>
| | #Write a method (function) that starts with Donald Trump prints out a graph depth-first to show how the other graph nodes are connected to him |
| import requests
| | visited_nodes = set() |
| from rdflib import FOAF, Namespace, Literal, RDF, Graph, TURTLE
| |
|
| |
|
| r = requests.get('http://api.open-notify.org/astros.json').json()
| | def create_Tree(model, nodes): |
| g = Graph()
| | #Traverse the model breadth-first to create the tree. |
| EX = Namespace('http://EXample.org/')
| | global visited_nodes |
| g.bind("ex", EX)
| | tree = Graph() |
| | children = set() |
| | visited_nodes |= set(nodes) |
| | for s, p, o in model: |
| | if s in nodes and o not in visited_nodes: |
| | tree.add((s, p, o)) |
| | visited_nodes.add(o) |
| | children.add(o) |
| | if o in nodes and s not in visited_nodes: |
| | invp = URIRef(f'{p}_inv') #_inv represents inverse of |
| | tree.add((o, invp, s)) |
| | visited_nodes.add(s) |
| | children.add(s) |
| | if len(children) > 0: |
| | children_tree = create_Tree(model, children) |
| | for triple in children_tree: |
| | tree.add(triple) |
| | return tree |
|
| |
|
| for item in r['people']:
| | def print_Tree(tree, root, indent=0): |
| craft = item['craft'].replace(" ","_")
| | #Print the tree depth-first. |
| person = item['name'].replace(" ","_") | | print(str(root)) |
| g.add((EX[person], EX.onCraft, EX[craft])) | | for s, p, o in tree: |
| g.add((EX[person], RDF.type, FOAF.Person)) | | if s==root: |
| g.add((EX[person], FOAF.name, Literal(item['name'])))
| | print(' '*indent + ' ' + str(p), end=' ') |
| g.add((EX[craft], FOAF.name, Literal(item['craft'])))
| | print_Tree(tree, o, indent+1) |
| res = g.query("""
| | |
| CONSTRUCT {?person1 foaf:knows ?person2}
| | tree = create_Tree(g, [ex.Donald_Trump]) |
| WHERE {
| | print_Tree(tree, ex.Donald_Trump) |
| ?person1 ex:onCraft ?craft .
| | </syntaxhighlight> |
| ?person2 ex:onCraft ?craft .
| |
| }
| |
| """)
| |
|
| |
|
| for triplet in res:
| | =SPARQL (Lab 3-4)= |
| # (we don't need to add that they know themselves)
| | ===List all triples=== |
| if (triplet[0] != triplet[2]):
| | <syntaxhighlight lang="SPARQL"> |
| g.add((triplet))
| | SELECT ?s ?p ?o |
|
| | WHERE {?s ?p ?o .} |
| print(g.serialize(format="turtle"))
| |
| </syntaxhighlight> | | </syntaxhighlight> |
|
| |
|
| ==Semantic lifting - CSV== | | ===List the first 100 triples=== |
| <syntaxhighlight> | | <syntaxhighlight lang="SPARQL"> |
| import pandas as pd
| | SELECT ?s ?p ?o |
| from rdflib import Graph, Namespace, URIRef, Literal
| | WHERE {?s ?p ?o .} |
| from rdflib.namespace import RDF, XSD
| | LIMIT 100 |
| import spotlight
| | </syntaxhighlight> |
| from spotlight import SpotlightException
| |
|
| |
|
| | ===Count the number of triples=== |
| | <syntaxhighlight lang="SPARQL"> |
| | SELECT (COUNT(*) as ?count) |
| | WHERE {?s ?p ?o .} |
| | </syntaxhighlight> |
|
| |
|
| # Parameter given to spotlight to filter out results with confidence lower than this value
| | ===Count the number of indictments=== |
| CONFIDENCE = 0.5
| | <syntaxhighlight lang="SPARQL"> |
| SERVER = "https://api.dbpedia-spotlight.org/en/annotate"
| | PREFIX ns1: <http://example.org#> |
|
| |
|
| def annotate_entity(entity):
| | SELECT (COUNT(?ind) as ?amount) |
| annotations = []
| | WHERE { |
| try:
| | ?s ns1:outcome ?ind; |
| annotations = spotlight.annotate(address=SERVER,text=entity, confidence=CONFIDENCE)
| | ns1:outcome ns1:indictment. |
| # This catches errors thrown from Spotlight, including when no resource is found in DBpedia
| | } |
| except SpotlightException as e:
| | </syntaxhighlight> |
| print(e)
| |
| return annotations
| |
|
| |
|
| | ===List the names of everyone who pleaded guilty, along with the name of the investigation=== |
| | <syntaxhighlight lang="SPARQL"> |
| | PREFIX ns1: <http://example.org#> |
|
| |
|
| ex = Namespace("http://example.org/")
| | SELECT ?name ?invname |
| dbr = Namespace("http://dbpedia.org/resource/")
| | WHERE { |
| dbp = Namespace("https://dbpedia.org/property/")
| | ?s ns1:name ?name; |
| dbpage = Namespace("https://dbpedia.org/page/")
| | ns1:investigation ?invname; |
| sem = Namespace("http://semanticweb.cs.vu.nl/2009/11/sem/")
| | ns1:outcome ns1:guilty-plea . |
| tl = Namespace("http://purl.org/NET/c4dm/timeline.owl#")
| | } |
| | </syntaxhighlight> |
|
| |
|
| g = Graph()
| | ===List the names of everyone who were convicted, but who had their conviction overturned by which president=== |
| g.bind("ex", ex)
| | <syntaxhighlight lang="SPARQL"> |
| g.bind("dbr", dbr)
| | PREFIX ns1: <http://example.org#> |
| g.bind("dbp", dbp)
| |
| g.bind("dbpage", dbpage)
| |
| g.bind("sem", sem)
| |
| g.bind("tl", tl)
| |
|
| |
|
| df = pd.read_csv("russia-investigations.csv")
| | SELECT ?name ?president |
| # We need to correct the type of the columns in the DataFrame, as Pandas assigns an incorrect type when it reads the file (for me at least). We use .astype("str") to convert the content of the columns to a string.
| | WHERE { |
| df["name"] = df["name"].astype("str")
| | ?s ns1:name ?name; |
| df["type"] = df["type"].astype("str")
| | ns1:president ?president; |
| | ns1:outcome ns1:conviction; |
| | ns1:overturned ns1:true. |
| | } |
| | </syntaxhighlight> |
|
| |
|
| # iterrows creates an iterable object (list of rows)
| | ===For each investigation, list the number of indictments made=== |
| for index, row in df.iterrows():
| | <syntaxhighlight lang="SPARQL"> |
| investigation = URIRef(ex + row['investigation'])
| | PREFIX ns1: <http://example.org#> |
| investigation_spotlight = annotate_entity(row['investigation'])
| |
| investigation_start = Literal(row['investigation-start'], datatype=XSD.date)
| |
| investigation_end = Literal(row['investigation-end'], datatype=XSD.date)
| |
| investigation_days = Literal(row['investigation-days'], datatype=XSD.integer)
| |
| name = Literal(row['name'], datatype=XSD.string)
| |
| name_underscore = URIRef(dbpage + row['name'].replace(" ", "_"))
| |
| investigation_result = URIRef(
| |
| ex + row['investigation'] + "_investigation_" + row['name'].replace(" ", "_"))
| |
| indictment_days = Literal(row['indictment-days'], datatype=XSD.integer)
| |
| type = URIRef(dbr + row['type'].replace(" ", "_"))
| |
| cp_date = Literal(row['cp-date'], datatype=XSD.date)
| |
| cp_days = Literal(row['cp-days'], datatype=XSD.duration)
| |
| overturned = Literal(row['overturned'], datatype=XSD.boolean)
| |
| pardoned = Literal(row['pardoned'], datatype=XSD.boolean)
| |
| american = Literal(row['american'], datatype=XSD.boolean)
| |
| president = Literal(row['president'], datatype=XSD.string)
| |
| president_underscore = URIRef(dbr + row['president'].replace(" ", "_"))
| |
| president_spotlight = annotate_entity(row['president'])
| |
|
| |
|
| try:
| | SELECT ?invs (COUNT(?invs) as ?count) |
| g.add((( URIRef(investigation_spotlight[0]["URI"]), RDF.type, sem.Event)))
| | WHERE { |
| except:
| | ?s ns1:investigation ?invs; |
| g.add((investigation, RDF.type, sem.Event))
| | ns1:outcome ns1:indictment . |
| try:
| | } |
| g.add((( URIRef(investigation_spotlight[0]["URI"]), sem.hasBeginTimeStamp, investigation_start)))
| | GROUP BY ?invs |
| except:
| |
| g.add((investigation, sem.hasBeginTimeStamp, investigation_start))
| |
| try:
| |
| g.add((( URIRef(investigation_spotlight[0]["URI"]), sem.hasEndTimeStamp, investigation_end)))
| |
| except:
| |
| g.add((investigation, sem.hasEndTimeStamp, investigation_end))
| |
| try:
| |
| g.add((URIRef(investigation_spotlight[0]["URI"]), tl.duration, investigation_days))
| |
| except:
| |
| g.add((investigation, tl.duration, investigation_days))
| |
| try:
| |
| g.add((URIRef(investigation_spotlight[0]["URI"]), dbp.president, URIRef(president_spotlight[0]["URI"])))
| |
| except:
| |
| g.add((investigation, dbp.president, dbr.president_underscore))
| |
| try:
| |
| g.add((URIRef(investigation_spotlight[0]["URI"]), sem.hasSubEvent, investigation_result))
| |
| except:
| |
| g.add((investigation, sem.hasSubEvent, investigation_result))
| |
|
| |
| g.add((investigation_result, ex.resultType, type))
| |
| g.add((investigation_result, ex.objectOfInvestigation, name_underscore))
| |
| g.add((investigation_result, ex.isAmerican, american))
| |
| g.add((investigation_result, ex.indictmentDuration, indictment_days))
| |
| g.add((investigation_result, ex.caseSolved, cp_date))
| |
| g.add((investigation_result, ex.daysBeforeCaseSolved, cp_days))
| |
| g.add((investigation_result, ex.overturned, overturned))
| |
| g.add((investigation_result, ex.pardoned, pardoned))
| |
| | |
| g.serialize("output.ttl", format="ttl")
| |
| </syntaxhighlight> | | </syntaxhighlight> |
|
| |
|
| ==RDFS== | | ===For each investigation with multiple indictments, list the number of indictments made=== |
| <syntaxhighlight>
| | <syntaxhighlight lang="SPARQL"> |
| from rdflib.namespace import RDF, FOAF, XSD, RDFS
| | PREFIX ns1: <http://example.org#> |
| from rdflib import OWL, Graph, Namespace, URIRef, Literal, BNode
| |
| from rdflib.namespace import RDF, RDFS, XSD, OWL
| |
| import owlrl
| |
| | |
| ex = Namespace("http://example.org/")
| |
| dbr = Namespace("http://dbpedia.org/resource/")
| |
| dbp = Namespace("https://dbpedia.org/property/")
| |
| dbpage = Namespace("https://dbpedia.org/page/")
| |
| sem = Namespace("http://semanticweb.cs.vu.nl/2009/11/sem/")
| |
| tl = Namespace("http://purl.org/NET/c4dm/timeline.owl#")
| |
|
| |
|
| g = Graph()
| | SELECT ?invs (COUNT(?invs) as ?count) |
| g.bind("ex", ex)
| | WHERE { |
| g.bind("dbr", dbr)
| | ?s ns1:investigation ?invs; |
| g.bind("dbp", dbp)
| | ns1:outcome ns1:indictment . |
| g.bind("dbpage", dbpage)
| | } |
| g.bind("sem", sem)
| | GROUP BY ?invs |
| g.bind("tl", tl)
| | HAVING(?count > 1) |
| | </syntaxhighlight> |
|
| |
|
| g.parse(location="exampleTTL.ttl", format="turtle")
| | ===For each investigation with multiple indictments, list the number of indictments made, sorted with the most indictments first=== |
| | <syntaxhighlight lang="SPARQL"> |
| | PREFIX ns1: <http://example.org#> |
|
| |
|
| # University of California and University of Valencia are both Universities.
| | SELECT ?invs (COUNT(?invs) as ?count) |
| g.add((ex.University_of_California, RDF.type, ex.University))
| | WHERE { |
| g.add((ex.University_of_Valencia, RDF.type, ex.University))
| | ?s ns1:investigation ?invs; |
| # All universities are higher education institutions (HEIs).
| | ns1:outcome ns1:indictment . |
| g.add((ex.University, RDFS.subClassOf, ex.Higher_education))
| | } |
| # Only persons can have an expertise, and what they have expertise in is always a subject.
| | GROUP BY ?invs |
| g.add((ex.expertise, RDFS.domain, FOAF.Person))
| | HAVING(?count > 1) |
| g.add((ex.expertise, RDFS.range, ex.subject))
| | ORDER BY DESC(?count) |
| # Only persons can graduate from a HEI.
| | </syntaxhighlight> |
| g.add((ex.graduatedFromHEI, RDFS.domain, FOAF.Person))
| |
| g.add((ex.graduatedFromHEI, RDFS.range, ex.Higher_education))
| |
| # If you are a student, you are in fact a person as well.
| |
| g.add((ex.Student, RDFS.subClassOf, FOAF.Person))
| |
| # That a person is married to someone, means that they know them.
| |
| g.add((ex.married, RDFS.subPropertyOf, FOAF.knows))
| |
| # Finally, if a person has a name, that name is also the label of that entity."
| |
| g.add((FOAF.name, RDFS.subPropertyOf, RDFS.label))
| |
|
| |
|
| # Having a degree from a HEI means that you have also graduated from that HEI.
| | ===For each president, list the numbers of convictions and of pardons made=== |
| g.add((ex.graduatedFromHEI, RDFS.subPropertyOf, ex.degree))
| | <syntaxhighlight lang="SPARQL"> |
| # That a city is a capital of a country means that this city is located in that country.
| | PREFIX ns1: <http://example.org#> |
| g.add((ex.capital, RDFS.domain, ex.Country))
| |
| g.add((ex.capital, RDFS.range, ex.City))
| |
| g.add((ex.capital, RDFS.subPropertyOf, ex.hasLocation))
| |
| # That someone was involved in a meeting, means that they have met the other participants.
| |
| # This question was bad for the RDFS lab because we need complex OWL or easy sparql.
| |
| res = g.query("""
| |
| CONSTRUCT {?person1 ex:haveMet ?person2}
| |
| WHERE {
| |
| ?person1 ex:meeting ?Meeting .
| |
| ?Meeting ex:involved ?person2 .
| |
| }
| |
| """)
| |
| for triplet in res:
| |
| #we don't need to add that people have met themselves
| |
| if (triplet[0] != triplet[2]):
| |
| g.add((triplet))
| |
| # If someone partook in a meeting somewhere, means that they have visited that place"
| |
| # This question was bad for the RDFS lab for the same reason.
| |
| res = g.query("""
| |
| CONSTRUCT {?person ex:hasVisited ?place}
| |
| WHERE {
| |
| ?person1 ex:meeting ?Meeting .
| |
| ?Meeting ex:location ?place .
| |
| }
| |
| """)
| |
| for triplet in res:
| |
| g.add((triplet))
| |
|
| |
|
| rdfs = owlrl.OWLRL.OWLRL_Semantics(g, False, False, False)
| | SELECT ?president (COUNT(?outcome) as ?conviction) (COUNT(?pardon) as |
| rdfs.closure()
| | ?pardons) |
| rdfs.flush_stored_triples()
| | WHERE { |
| g.serialize("output.ttl",format="ttl")
| | ?s ns1:president ?president; |
| | ns1:outcome ?outcome ; |
| | ns1:outcome ns1:conviction. |
| | OPTIONAL{ |
| | ?s ns1:pardoned ?pardon . |
| | FILTER (?pardon = true) |
| | } |
| | } |
| | GROUP BY ?president |
| </syntaxhighlight> | | </syntaxhighlight> |
|
| |
|
| ==OWL 1== | | ===Rename mullerkg:name to something like muellerkg:person=== |
| <syntaxhighlight>
| |
| import owlrl
| |
| from rdflib import Graph, Namespace, Literal, URIRef
| |
| from rdflib.namespace import RDF, RDFS, XSD, FOAF, OWL
| |
| from rdflib.collection import Collection
| |
|
| |
|
| g = Graph()
| | <syntaxhighlight lang="SPARQL"> |
| print()
| | PREFIX ns1: <http://example.org#> |
| # Namespaces
| |
| ex = Namespace("http://example.org/")
| |
| dbp = Namespace("http://dbpedia.org/resource/")
| |
| geo = Namespace("http://sws.geonames.org/")
| |
| schema = Namespace("https://schema.org/")
| |
| akt = Namespace("http://www.aktors.org/ontology/portal#")
| |
| vcard = Namespace("http://www.w3.org/2006/vcard/ns#")
| |
|
| |
|
| g.bind("ex", ex)
| | DELETE{?s ns1:name ?o} |
| g.bind("owl", OWL)
| | INSERT{?s ns1:person ?o} |
| | | WHERE {?s ns1:name ?o} |
| g.parse(location="lab8turtle.txt", format="turtle")
| | </syntaxhighlight> |
| | |
| # Cade and Emma are two different persons.
| |
| g.add((ex.Cade, OWL.differentFrom, ex.Emma))
| |
| # The country USA above is the same as the DBpedia resource http://dbpedia.org/resource/United_States (dbr:United_States) and the GeoNames resource http://sws.geonames.org/6252001/ (gn:6252001).
| |
| g.add((ex.USA, OWL.sameAs, dbp.United_States))
| |
| g.add((ex.USA, OWL.sameAs, geo["6252001"]))
| |
| # The person class (the RDF type the Cade and Emma resources) in your graph is the same as FOAF's, schema.org's and AKT's person classes
| |
| # (they are http://xmlns.com/foaf/0.1/Person, http://schema.org/Person, and http://www.aktors.org/ontology/portal#Person, respectively.
| |
| g.add((FOAF.Person, OWL.sameAs, schema.Person))
| |
| g.add((FOAF.Person, OWL.sameAs, akt.Person))
| |
| # Nothing can be any two of a person, a university, or a city at the same time.
| |
| Collection(g, ex.DisjointClasses, [FOAF.Person, ex.University, ex.City])
| |
| g.add((OWL.AllDifferent, OWL.distinctMembers, ex.DisjointClasses))
| |
| # The property you have used in your RDF/RDFS graph to represent that 94709 is the US zip code of Berkeley, California in US
| |
| # is a subproperty of VCard's postal code-property (http://www.w3.org/2006/vcard/ns#postal-code).
| |
| g.add((ex.postalCode, RDFS.subPropertyOf, vcard["postal-code"]))
| |
| # No two US cities can have the same postal code.
| |
| # We have to add a relation from city to postal code first
| |
| res = g.query("""
| |
| PREFIX RDF: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
| |
| PREFIX ex: <http://example.org/>
| |
| CONSTRUCT {?usa_city ex:us_city_postal_code ?postalcode}
| |
| WHERE {
| |
| ?address RDF:type ex:Address .
| |
| ?address ex:country ex:USA .
| |
| ?address ex:city ?usa_city .
| |
| ?address ex:postalCode ?postalcode
| |
| }
| |
| """)
| |
| for triplet in res:
| |
| g.add((triplet))
| |
| # Now we can make us cities have distinct postal codes
| |
| g.add((ex.us_city_postal_code, RDF.type, OWL.FunctionalProperty))
| |
| g.add((ex.us_city_postal_code, RDF.type, OWL.InverseFunctionalProperty))
| |
| g.add((ex.us_city_postal_code, RDFS.subPropertyOf, ex.postalcode))
| |
|
| |
|
| # The property you have used for Emma living in Valencia is the same property as FOAF's based-near property
| | ===Update the graph so all the investigated person and president nodes become the subjects in foaf:name triples with the corresponding strings=== |
| # (http://xmlns.com/foaf/0.1/based_near), and it is the inverse of DBpedia's hometown property (http://dbpedia.org/ontology/hometown, dbo:hometown).
| |
| g.add((ex.city, OWL.sameAs, FOAF.based_near))
| |
| g.add((ex.city, OWL.inverseOf, dbp.hometown))
| |
|
| |
|
| g.add((ex.Cade, ex.married, ex.Mary))
| | <syntaxhighlight lang="SPARQL"> |
| g.add((ex.Cade, ex.livesWith, ex.Mary))
| | PREFIX ns1: <http://example.org#> |
| g.add((ex.Cade, ex.sibling, ex.Andrew))
| | PREFIX foaf: <http://xmlns.com/foaf/0.1/> |
| g.add((ex.Cade, ex.hasFather, ex.Bob))
| |
| g.add((ex.Bob, ex.fatherOf, ex.Cade))
| |
|
| |
|
| | #Persons |
| | INSERT {?person foaf:name ?name} |
| | WHERE { |
| | ?investigation ns1:person ?person . |
| | BIND(REPLACE(STR(?person), STR(ns1:), "") AS ?name) |
| | } |
|
| |
|
| #Look through the predicates(properties) above and add new triples for each one that describes them as any of the following: | | #Presidents |
| # a reflexive , irreflexive, symmetric, asymmetric, transitive, functional, or an Inverse Functional Property.
| | INSERT {?president foaf:name ?name} |
| g.add((ex.married, RDF.type, OWL.SymmetricProperty))
| | WHERE { |
| g.add((ex.married, RDF.type, OWL.FunctionalProperty))
| | ?investigation ns1:president ?president . |
| g.add((ex.married, RDF.type, OWL.InverseFunctionalProperty))
| | BIND(REPLACE(STR(?president), STR(ns1:), "") AS ?name) |
| g.add((ex.married, RDF.type, OWL.IrreflexiveProperty))
| | } |
| | </syntaxhighlight> |
|
| |
|
| g.add((ex.livesWith, RDF.type, OWL.SymmetricProperty))
| | ===Use INSERT DATA updates to add these triples=== |
| g.add((ex.livesWith, RDF.type, OWL.ReflexiveProperty))
| |
| g.add((ex.livesWith, RDF.type, OWL.TransitiveProperty))
| |
|
| |
|
| g.add((ex.sibling, RDF.type, OWL.SymmetricProperty))
| | <syntaxhighlight lang="SPARQL"> |
| | PREFIX ns1: <http://example.org#> |
|
| |
|
| g.add((ex.hasFather, RDF.type, OWL.AsymmetricProperty))
| | INSERT DATA { |
| g.add((ex.hasFather, RDF.type, OWL.FunctionalProperty))
| | ns1:George_Papadopoulos ns1:adviserTo ns1:Donald_Trump; |
| g.add((ex.hasFather, RDF.type, OWL.IrreflexiveProperty))
| | ns1:pleadGuiltyTo ns1:LyingToFBI; |
| | ns1:sentencedTo ns1:Prison. |
|
| |
|
| g.add((ex.fatherOf, RDF.type, OWL.AsymmetricProperty))
| | ns1:Roger_Stone a ns1:Republican; |
| g.add((ex.fatherOf, RDF.type, OWL.IrreflexiveProperty))
| | ns1:adviserTo ns1:Donald_Trump; |
| g.add((ex.fatherOf, RDF.type, OWL.InverseFunctionalProperty))
| | ns1:officialTo ns1:Trump_Campaign; |
| g.add((ex.fatherOf, RDF.type, OWL.IrreflexiveProperty))
| | ns1:interactedWith ns1:Wikileaks; |
| | ns1:providedTestimony ns1:House_Intelligence_Committee; |
| | ns1:clearedOf ns1:AllCharges. |
| | } |
|
| |
|
| # These three lines add inferred triples to the graph. | | #To test if added |
| owl = owlrl.CombinedClosure.RDFS_OWLRL_Semantics(g, False, False, False)
| | SELECT ?p ?o |
| owl.closure()
| | WHERE {ns1:Roger_Stone ?p ?o .} |
| owl.flush_stored_triples()
| |
| | |
| g.serialize("lab8output.xml",format="xml")
| |
| </syntaxhighlight> | | </syntaxhighlight> |
|
| |
|
| ==Semantic lifting - XML== | | ===Use DELETE DATA and then INSERT DATA updates to correct that Roger Stone was cleared of all charges=== |
| <syntaxhighlight>
| |
| from rdflib import Graph, Literal, Namespace, URIRef
| |
| from rdflib.namespace import RDF
| |
| import xml.etree.ElementTree as ET
| |
| import requests
| |
|
| |
|
| g = Graph()
| | <syntaxhighlight lang="SPARQL"> |
| ex = Namespace("http://example.org/")
| | PREFIX ns1: <http://example.org#> |
| prov = Namespace("http://www.w3.org/ns/prov#")
| |
| g.bind("ex", ex)
| |
| g.bind("prov", prov)
| |
|
| |
|
| | DELETE DATA { |
| | ns1:Roger_Stone ns1:clearedOf ns1:AllCharges . |
| | } |
|
| |
|
| # URL of xml data
| | INSERT DATA { |
| url = 'http://feeds.bbci.co.uk/news/rss.xml'
| | ns1:Roger_Stone ns1:indictedFor ns1:ObstructionOfJustice, |
| # Retrieve the xml data from the web-url.
| | ns1:WitnessTampering, |
| resp = requests.get(url)
| | ns1:FalseStatements. |
| # Creating an ElementTree from the response content
| |
| tree = ET.ElementTree(ET.fromstring(resp.content))
| |
| root = tree.getroot()
| |
| | |
| # I just realized this is cheating, but whatever, you should do it with xmltree
| |
| writerDict = {
| |
| "Mon":"Thomas_Smith",
| |
| "Tue":"Thomas_Smith",
| |
| "Wed":"Thomas_Smith",
| |
| "Thu":"Joseph_Olson",
| |
| "Fri":"Joseph_Olson",
| |
| "Sat":"Sophia_Cruise",
| |
| "Sun":"Sophia_Cruise"
| |
| } | | } |
| copyright = Literal(root.findall("./channel")[0].find("copyright").text)
| |
|
| |
|
| for item in root.findall("./channel/item"):
| | #The task specifically requested DELETE DATA & INSERT DATA, put below is |
| copyright = Literal(root.findall("./channel")[0].find("copyright").text)
| | a more efficient solution |
|
| |
|
| News_article_id = URIRef(item.find("guid").text)
| | DELETE{ns1:Roger_Stone ns1:clearedOf ns1:AllCharges.} |
| title = Literal(item.find("title").text)
| | INSERT{ |
| description = Literal(item.find("description").text)
| | ns1:Roger_Stone ns1:indictedFor ns1:ObstructionOfJustice, |
| link = URIRef(item.find("link").text)
| | ns1:WitnessTampering, |
| pubDate = Literal(item.find("pubDate").text)
| | ns1:FalseStatements. |
| writerName = ex[writerDict[pubDate[:3]]]
| | } |
| | WHERE{ns1:Roger_Stone ns1:clearedOf ns1:AllCharges.} |
| | </syntaxhighlight> |
|
| |
|
| g.add((News_article_id, ex.title, title))
| | ===Use a DESCRIBE query to show the updated information about Roger Stone=== |
| g.add((News_article_id, ex.description, description))
| |
| g.add((News_article_id, ex.source_link, link))
| |
| g.add((News_article_id, ex.pubDate, pubDate))
| |
| g.add((News_article_id, ex.copyright, copyright))
| |
| g.add((News_article_id, RDF.type, ex.News_article))
| |
| g.add((News_article_id, RDF.type, prov.Entity))
| |
|
| |
|
| g.add((News_article_id, ex.authoredBy, writerName))
| | <syntaxhighlight lang="SPARQL"> |
| g.add((writerName, RDF.type, prov.Person))
| | PREFIX ns1: <http://example.org#> |
| g.add((writerName, RDF.type, prov.Agent))
| |
| g.add((ex.authoredBy, RDF.type, prov.Generation))
| |
|
| |
|
| print(g.serialize(format="turtle"))
| | DESCRIBE ?o |
| | WHERE {ns1:Roger_Stone ns1:indictedFor ?o .} |
| </syntaxhighlight> | | </syntaxhighlight> |
|
| |
|
| ==OWL 2== | | ===Use a CONSTRUCT query to create a new RDF group with triples only about Roger Stone=== |
| <syntaxhighlight>
| |
| from rdflib import Graph, Literal, Namespace, BNode
| |
| from rdflib.namespace import RDF, OWL, RDFS
| |
| from rdflib.collection import Collection
| |
|
| |
|
| g = Graph()
| | <syntaxhighlight lang="SPARQL"> |
| ex = Namespace("http://example.org/")
| | PREFIX ns1: <http://example.org#> |
| g.bind("ex", ex)
| |
| g.bind("owl", OWL)
| |
|
| |
|
| # anyone who is a graduate has at least one degree
| | CONSTRUCT { |
| br = BNode()
| | ns1:Roger_Stone ?p ?o. |
| g.add((br, RDF.type, OWL.Restriction))
| | ?s ?p2 ns1:Roger_Stone. |
| g.add((br, OWL.onProperty, ex.degree))
| | } |
| g.add((br, OWL.minCardinality, Literal(1)))
| | WHERE { |
| bi = BNode()
| | ns1:Roger_Stone ?p ?o . |
| Collection(g, bi, [ex.Person, br])
| | ?s ?p2 ns1:Roger_Stone |
| g.add((ex.Graduate, OWL.intersectionOf, bi))
| | } |
| | </syntaxhighlight> |
|
| |
|
| #anyone who is a university graduate has at least one degree from a university
| | ===Write a DELETE/INSERT statement to change one of the prefixes in your graph=== |
| br = BNode()
| |
| g.add((br, RDF.type, OWL.Restriction))
| |
| g.add((br, OWL.onProperty, ex.degree))
| |
| g.add((br, OWL.someValuesFrom, ex.University))
| |
| bi = BNode()
| |
| Collection(g, bi, [ex.Graduate, br])
| |
| #[ex.Person, br] also someValueFrom implies a cardinality of at least one so they would be equivalent.
| |
| #[ex.Person, ex.Graduate, br] would be redundant since intersection is associative.
| |
| g.add((ex.University_graduate, OWL.intersectionOf, bi))
| |
|
| |
|
| #a grade is either an A, B, C, D, E or F | | <syntaxhighlight lang="SPARQL"> |
| | PREFIX ns1: <http://example.org#> |
| | PREFIX dbp: <https://dbpedia.org/page/> |
|
| |
|
| bi = BNode()
| | DELETE {?s ns1:person ?o1} |
| Collection(g, bi, [Literal("A"), Literal("B"), Literal("C"), Literal("D"), Literal("E"), Literal("F")])
| | INSERT {?s ns1:person ?o2} |
| b1 = BNode()
| | WHERE{ |
| g.add((b1, RDF.type, RDFS.Datatype))
| | ?s ns1:person ?o1 . |
| g.add((b1, OWL.oneOf, bi))
| | BIND (IRI(replace(str(?o1), str(ns1:), str(dbp:))) AS ?o2) |
| | } |
|
| |
|
| g.add((ex.grade, RDFS.range, b1))
| | #This update changes the object in triples with ns1:person as the |
| | predicate. It changes it's prefix of ns1 (which is the |
| | "shortcut/shorthand" for example.org) to the prefix dbp (dbpedia.org) |
| | </syntaxhighlight> |
|
| |
|
| #a straight A student is a student that has only A grades
| | ===Write an INSERT statement to add at least one significant date to the Mueller investigation, with literal type xsd:date. Write a DELETE/INSERT statement to change the date to a string, and a new DELETE/INSERT statement to change it back to xsd:date. === |
| b1 = BNode()
| |
| g.add((b1, RDF.type, OWL.Restriction))
| |
| g.add((b1, OWL.onProperty, ex.grade))
| |
| g.add((b1, OWL.allValuesFrom, Literal("A")))
| |
|
| |
|
| b2 = BNode()
| | <syntaxhighlight lang="SPARQL"> |
| g.add((b2, RDF.type, OWL.Restriction))
| | #Whilst this solution is not exactly what the task asks for, I feel like |
| g.add((b2, OWL.onProperty, ex.grade))
| | this is more appropiate given the dataset. The following update |
| g.add((b2, OWL.someValuesFrom, Literal("A")))
| | changes the objects that uses the cp_date as predicate from a URI, to a |
| | literal with date as it's datatype |
|
| |
|
| bi = BNode()
| | PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> |
| Collection(g, bi, [ex.Student, b1, b2])
| | PREFIX ns1: <http://example.org#> |
| g.add((ex.Straight_A_student, OWL.intersectionOf, bi))
| |
|
| |
|
| #a graduate has no F grades
| | DELETE {?s ns1:cp_date ?o} |
| b3 = BNode()
| | INSERT{?s ns1:cp_date ?o3} |
| Collection(g, b3, [Literal("A"), Literal("B"), Literal("C"), Literal("D"), Literal("E")])
| | WHERE{ |
| b4 = BNode()
| | ?s ns1:cp_date ?o . |
| g.add((b4, RDF.type, RDFS.Datatype))
| | BIND (replace(str(?o), str(ns1:), "") AS ?o2) |
| g.add((b4, OWL.oneOf, b3))
| | BIND (STRDT(STR(?o2), xsd:date) AS ?o3) |
| b5 = BNode()
| | } |
| g.add((b5, RDF.type, OWL.Restriction))
| |
| g.add((b5, OWL.onProperty, ex.grade))
| |
| g.add((b5, OWL.allValuesFrom, b4))
| |
|
| |
|
| b6 = BNode()
| | #To test: |
| Collection(g, b6, [ex.Person, b1, b5])
| |
| g.add((ex.Graduate, OWL.intersectionOf, b6))
| |
|
| |
|
| #a student has a unique student number
| | PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> |
| g.add((ex.student_number, RDF.type, OWL.FunctionalProperty))
| | PREFIX ns1: <http://example.org#> |
| g.add((ex.student_number, RDF.type, OWL.InverseFunctionalProperty))
| |
|
| |
|
| #each student has exactly one average grade
| | SELECT ?s ?o |
| b1 = BNode()
| | WHERE{ |
| g.add((b1, RDF.type, OWL.Restriction))
| | ?s ns1:cp_date ?o. |
| g.add((b1, OWL.onProperty, ex.average_grade))
| | FILTER(datatype(?o) = xsd:date) |
| g.add((b1, OWL.cardinality, Literal(1)))
| | } |
|
| |
|
| b2 = BNode()
| | #To change it to an integer, use the following code, and to change it |
| g.add((b2, RDF.type, OWL.Restriction))
| | back to date, swap "xsd:integer" to "xsd:date" |
| g.add((b2, OWL.onProperty, ex.student_number))
| |
| g.add((b2, OWL.cardinality, Literal(1)))
| |
|
| |
|
| Collection(g, b3, [ex.Person, b1, b2])
| | PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> |
| g.add((ex.Student, OWL.intersectionOf, b3))
| | PREFIX ns1: <http://example.org#> |
|
| |
|
| #a course is either a bachelor, a master or a Ph.D course
| | DELETE {?s ns1:cp_date ?o} |
| bi = BNode()
| | INSERT{?s ns1:cp_date ?o2} |
| Collection(g, bi, [ex.Bachelor_course, ex.Master_course, ex["Ph.D_course"]])
| | WHERE{ |
| b1 = BNode()
| | ?s ns1:cp_date ?o . |
| #g.add((b1, RDF.type, OWL.Class))
| | BIND (STRDT(STR(?o), xsd:integer) AS ?o2) |
| g.add((b1, OWL.oneOf, bi))
| | } |
| | |
| g.add((ex.Course, RDF.type, b1))
| |
| | |
| #a bachelor student takes only bachelor courses
| |
| g.add((ex.Bachelor_student, RDFS.subClassOf, ex.Student))
| |
| b1 = BNode()
| |
| g.add((b1, RDF.type, OWL.Restriction))
| |
| g.add((b1, OWL.onProperty, ex.hasCourse))
| |
| g.add((b1, OWL.allValuesFrom, ex.Bachelor_course))
| |
| | |
| b2 = BNode()
| |
| Collection(g, b2, [ex.Student, b1])
| |
| g.add((ex.Bachelor_student, OWL.intersectionOf, b2))
| |
|
| |
|
| #a masters student takes only master courses and at most one bachelor course
| |
|
| |
| b1 = BNode()
| |
| g.add((b1, RDF.type, OWL.Restriction))
| |
| g.add((b1, OWL.onProperty, ex.hasCourse))
| |
| g.add((b1, OWL.maxQualifiedCardinality, Literal(1)))
| |
| g.add((b1, OWL.onClass, ex.Bachelor_course))
| |
|
| |
| b2 = BNode()
| |
| g.add((b2, RDF.type, OWL.Restriction))
| |
| g.add((b2, OWL.onProperty, ex.hasCourse))
| |
| g.add((b2, OWL.someValuesFrom, ex.Master_course))
| |
|
| |
| b3 = BNode()
| |
| Collection(g, b3, [ex.Master_course, ex.Bachelor_course])
| |
|
| |
| b5 = BNode()
| |
| g.add((b5, RDF.type, OWL.Restriction))
| |
| g.add((b5, OWL.onProperty, ex.hasCourse))
| |
| g.add((b5, OWL.allValuesFrom, b3))
| |
|
| |
| b6 = BNode()
| |
| Collection(g, b6, [ex.Student, b1, b2, b5])
| |
| g.add((ex.Master_student, OWL.intersectionOf, b6))
| |
|
| |
| #a Ph.D student takes only Ph.D and at most two masters courses
| |
| b1 = BNode()
| |
| g.add((b1, RDF.type, OWL.Restriction))
| |
| g.add((b1, OWL.onProperty, ex.hasCourse))
| |
| g.add((b1, OWL.maxQualifiedCardinality, Literal(2)))
| |
| g.add((b1, OWL.onClass, ex.Master_course))
| |
|
| |
| b2 = BNode()
| |
| g.add((b2, RDF.type, OWL.Restriction))
| |
| g.add((b2, OWL.onProperty, ex.hasCourse))
| |
| g.add((b2, OWL.someValuesFrom, ex["Ph.D_course"]))
| |
|
| |
| b3 = BNode()
| |
| Collection(g, b3, [ex.Master_course, ex["Ph.D_course"]])
| |
|
| |
| b5 = BNode()
| |
| g.add((b5, RDF.type, OWL.Restriction))
| |
| g.add((b5, OWL.onProperty, ex.hasCourse))
| |
| g.add((b5, OWL.allValuesFrom, b3))
| |
|
| |
| b6 = BNode()
| |
| Collection(g, b6, [ex.Student, b1, b2, b5])
| |
| g.add((ex["Ph.D_student"], OWL.intersectionOf, b6))
| |
| #a Ph.D. student cannot take a bachelor course
| |
| #NA, it's already true
| |
| </syntaxhighlight> | | </syntaxhighlight> |
|
| |
|
| ==Lab 11: Semantic Lifting - HTML== | | =SPARQL Programming (Lab 5)= |
|
| |
|
| <syntaxhighlight> | | <syntaxhighlight> |
| from bs4 import BeautifulSoup as bs
| |
| from rdflib import Graph, Literal, URIRef, Namespace
| |
| from rdflib.namespace import RDF, SKOS, XSD
| |
| import requests
| |
|
| |
|
| | from rdflib import Graph, Namespace, RDF, FOAF |
| | from SPARQLWrapper import SPARQLWrapper, JSON, POST, GET, TURTLE |
|
| |
|
| g = Graph() | | g = Graph() |
| ex = Namespace("http://example.org/")
| | g.parse("Russia_investigation_kg.ttl") |
| g.bind("ex", ex) | |
|
| |
|
| # Download html from URL and parse it with BeautifulSoup. | | # ----- RDFLIB ----- |
| url = "https://www.semanticscholar.org/topic/Knowledge-Graph/159858"
| | ex = Namespace('http://example.org#') |
| page = requests.get(url)
| |
| html = bs(page.content, features="html.parser")
| |
| # print(html.prettify()) | |
|
| |
|
| # Find the html that surrounds all the papers
| | NS = { |
| papers = html.find_all('div', attrs={'class': 'flex-container'})
| | '': ex, |
| # Find the html that surrounds the info box
| | 'rdf': RDF, |
| topic = html.find_all(
| | 'foaf': FOAF, |
| 'div', attrs={'class': 'flex-item__left-column entity-header'}) | | } |
|
| |
|
| | # Print out a list of all the predicates used in your graph. |
| | task1 = g.query(""" |
| | SELECT DISTINCT ?p WHERE{ |
| | ?s ?p ?o . |
| | } |
| | """, initNs=NS) |
|
| |
|
| # Iterate through each paper to make triples:
| | print(list(task1)) |
| for paper in papers:
| |
| # e.g selecting title.
| |
| title = paper.find('div', attrs={'class': 'timeline-paper-title'}).text
| |
| author = paper.find('span', attrs={'class': 'author-list'}).text
| |
| papper_year = paper.find(
| |
| 'li', attrs={'data-selenium-selector': "paper-year"}).text
| |
| corpus_ID = paper.find(
| |
| 'li', attrs={'data-selenium-selector': "corpus-id"}).text
| |
| corpus_ID = corpus_ID.replace(" ", "_")
| |
| c_id = corpus_ID.replace("Corpus_ID:_", "")
| |
|
| |
|
| article = URIRef(ex + c_id) | | # Print out a sorted list of all the presidents represented in your graph. |
| | task2 = g.query(""" |
| | SELECT DISTINCT ?president WHERE{ |
| | ?s :president ?president . |
| | } |
| | ORDER BY ?president |
| | """, initNs=NS) |
|
| |
|
| # Adding tripels
| | print(list(task2)) |
| g.add((article, RDF.type, ex.paper))
| |
| g.add((article, ex.HasID, Literal(c_id, datatype=XSD.int)))
| |
| g.add((article, ex.HasTitle, Literal(title, datatype=XSD.string)))
| |
| g.add((article, ex.Publisher_year, Literal(papper_year, datatype=XSD.year)))
| |
|
| |
|
| author = author.split(", ")
| | # Create dictionary (Python dict) with all the represented presidents as keys. For each key, the value is a list of names of people indicted under that president. |
| for x in author:
| | task3_dic = {} |
| name = x.replace(" ", "_")
| |
| name = URIRef(ex + name)
| |
|
| |
|
| g.add((article, ex.hasAuthor, name))
| | task3 = g.query(""" |
| | SELECT ?president ?person WHERE{ |
| | ?s :president ?president; |
| | :name ?person; |
| | :outcome :indictment. |
| | } |
| | """, initNs=NS) |
|
| |
|
| # Iterate through the info box to make triples:
| | for president, person in task3: |
| for items in topic: | | if president not in task3_dic: |
| main_topic = items.find('h1', attrs={'class': 'entity-name'}).text
| | task3_dic[president] = [person] |
| related_topic = items.find( | | else: |
| 'div', attrs={'class': 'entity-aliases'}).text
| | task3_dic[president].append(person) |
| related_topic = related_topic.replace("Known as: ", "")
| |
| related_topic = related_topic.replace(f'\xa0Expand', "")
| |
| related_topic = related_topic.replace(" ", "")
| |
| main_topic = main_topic.replace(" ", "_") | |
|
| |
|
| main_topic = URIRef(ex + main_topic)
| | print(task3_dic) |
|
| |
|
| g.add((article, RDF.type, SKOS.Concept))
| | # Use an ASK query to investigate whether Donald Trump has pardoned more than 5 people. |
| g.add((article, SKOS.hasTopConcept, main_topic))
| |
|
| |
|
| related_topic = related_topic.split(',')
| | # This task is a lot trickier than it needs to be. As far as I'm aware RDFLib has no HAVING support, so a query like this: |
| | task4 = g.query(""" |
| | ASK { |
| | SELECT (COUNT(?s) as ?count) WHERE{ |
| | ?s :pardoned :true; |
| | :president :Bill_Clinton . |
| | } |
| | HAVING (?count > 5) |
| | } |
| | """, initNs=NS) |
|
| |
|
| for related_labels in related_topic:
| | print(task4.askAnswer) |
| related_topic = URIRef(ex + related_labels)
| |
| g.add((article, SKOS.broader, related_topic))
| |
|
| |
|
| | # Which works fine in Blazegraph and is a valid SPARQL query will always provide false in RDFLib cause it uses HAVING. |
| | # Instead you have to use a nested SELECT query like below, where you use FILTER instead of HAVING. Donald Trump has no pardons, |
| | # so I have instead chosen Bill Clinton with 13 to check if the query works. |
|
| |
|
| print(g.serialize(format='turtle'))
| | task4 = g.query(""" |
| </syntaxhighlight>
| | ASK{ |
| | SELECT ?count WHERE{{ |
| | SELECT (COUNT(?s) as ?count) WHERE{ |
| | ?s :pardoned :true; |
| | :president :Bill_Clinton . |
| | }} |
| | FILTER (?count > 5) |
| | } |
| | } |
| | """, initNs=NS) |
|
| |
|
| ==Owlready2==
| | print(task4.askAnswer) |
| Martin's solution. NOTE: intead of using "is_a" to define classes like I have mostly done, use "equivalent_to" to make the resoner more powerful (and work at all in this case).
| |
| <syntaxhighlight>
| |
| from owlready2 import *
| |
| from rdflib import Graph, Namespace
| |
|
| |
|
| BASE = 'http://info216.uib.no/owlready2-lab/'
| | # Use a DESCRIBE query to create a new graph with information about Donald Trump. Print out the graph in Turtle format. |
| onto = get_ontology(BASE)
| |
|
| |
|
| def clean_onto(onto):
| | # By all accounts, it seems DESCRIBE querires are yet to be implemented in RDFLib, but they are attempting to implement it: |
| with onto:
| | # https://github.com/RDFLib/rdflib/pull/2221 <--- Issue and proposed solution rasied |
| for ind in onto.individuals():
| | # https://github.com/RDFLib/rdflib/commit/2325b4a81724c1ccee3a131067db4fbf9b4e2629 <--- Solution commited to RDFLib |
| destroy_entity(ind)
| | # This solution does not work. However, this proposed solution should work if DESCRIBE is implemented in RDFLib |
| for prop in onto.properties():
| |
| destroy_entity(prop)
| |
| for cls in onto.classes():
| |
| destroy_entity(cls)
| |
|
| |
|
| def onto2graph(onto):
| | # task5 = g.query(""" |
| graph = Graph()
| | # DESCRIBE :Donald_Trump |
| onto.save('temp_owlready2.nt', format='ntriples')
| | # """, initNs=NS) |
| graph.parse('temp_owlready2.nt', format='ntriples')
| |
| return graph
| |
|
| |
|
| def print_onto(onto):
| | # print(task5.serialize()) |
| g = onto2graph(onto)
| |
| g.bind('', Namespace(BASE))
| |
| print(g.serialize(format='ttl'))
| |
|
| |
|
| clean_onto(onto)
| | # ----- SPARQLWrapper ----- |
|
| |
|
| # anyone who is a graduate has at least one degree
| | SERVER = 'http://localhost:7200' #Might need to replace this |
| with onto:
| | REPOSITORY = 'Labs' #Replace with your repository name |
| class Student(Thing): pass
| |
| class Degree(Thing): pass
| |
| class hasDegree(Student >> Degree): pass
| |
| class Graduate(Student):
| |
| is_a = [hasDegree.some(Degree)]
| |
|
| |
|
| #anyone who is a university graduate has at least one degree from a university | | # Query Endpoint |
| with onto:
| | sparql = SPARQLWrapper(f'{SERVER}/repositories/{REPOSITORY}') |
| class UniversityDegree(Degree): pass
| | # Update Endpoint |
| class UniversityGraduate(Graduate):
| | sparqlUpdate = SPARQLWrapper(f'{SERVER}/repositories/{REPOSITORY}/statements') |
| is_a = [hasDegree.some(UniversityDegree)]
| |
|
| |
|
| | # Ask whether there was an ongoing indictment on the date 1990-01-01. |
| | sparql.setQuery(""" |
| | PREFIX ns1: <http://example.org#> |
| | ASK { |
| | SELECT ?end ?start |
| | WHERE{ |
| | ?s ns1:investigation_end ?end; |
| | ns1:investigation_start ?start; |
| | ns1:outcome ns1:indictment. |
| | FILTER(?start <= "1990-01-01"^^xsd:date && ?end >= "1990-01-01"^^xsd:date) |
| | } |
| | } |
| | """) |
| | sparql.setReturnFormat(JSON) |
| | results = sparql.query().convert() |
| | print(f"Are there any investigation on the 1990-01-01: {results['boolean']}") |
|
| |
|
| #a grade is either an A, B, C, D, E or F | | # List ongoing indictments on that date 1990-01-01. |
| with onto:
| | sparql.setQuery(""" |
| class Grade(Thing): pass | | PREFIX ns1: <http://example.org#> |
| class A(Grade): pass | | SELECT ?s |
| class B(Grade): pass | | WHERE{ |
| class C(Grade): pass
| | ?s ns1:investigation_end ?end; |
| class D(Grade): pass
| | ns1:investigation_start ?start; |
| class E(Grade): pass
| | ns1:outcome ns1:indictment. |
| class F(Grade): pass | | FILTER(?start <= "1990-01-01"^^xsd:date && ?end >= "1990-01-01"^^xsd:date) |
| | } |
| | """) |
|
| |
|
| Grade.is_a.append(OneOf([A, B, C, D, E, F]))
| | sparql.setReturnFormat(JSON) |
| | results = sparql.query().convert() |
|
| |
|
| #a straight A student is a student that has only A grades
| | print("The ongoing investigations on the 1990-01-01 are:") |
| with onto:
| | for result in results["results"]["bindings"]: |
| class hasGrade(Student >> Grade): pass
| | print(result["s"]["value"]) |
| class StraightAStudent(Student): | |
| is_a = [hasGrade.only(A)]
| |
|
| |
|
| | # Describe investigation number 100 (muellerkg:investigation_100). |
| | sparql.setQuery(""" |
| | PREFIX ns1: <http://example.org#> |
| | DESCRIBE ns1:investigation_100 |
| | """) |
|
| |
|
| #a graduate has no F grades
| | sparql.setReturnFormat(TURTLE) |
| #Graduate.is_a.append(hasGrade.only(OneOf[A,B,C,D,E]))
| | results = sparql.query().convert() |
|
| |
|
| #a student has a unique student number
| | print(results) |
| with onto:
| |
| class StudentNumber(Thing):pass
| |
| class hasStudentNumber(Student >> StudentNumber, FunctionalProperty, InverseFunctionalProperty):pass
| |
|
| |
|
| #each student has exactly one average grade | | # Print out a list of all the types used in your graph. |
| with onto:
| | sparql.setQuery(""" |
| class AverageGrade(Grade):pass | | PREFIX ns1: <http://example.org#> |
| class hasAverageGrade(Student >> AverageGrade):pass | | PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> |
| Student.is_a.append(hasAverageGrade.exactly(1,AverageGrade))
| |
| Student.is_a.append(hasStudentNumber.exactly(1,StudentNumber))
| |
|
| |
|
| |
|
| #a course is either a bachelor, a master or a Ph.D course
| | SELECT DISTINCT ?types |
| with onto:
| | WHERE{ |
| class Course(Thing):pass | | ?s rdf:type ?types . |
| class BachelorCourse(Course):pass | | } |
| class MasterCourse(Course):pass
| | """) |
| class PhDCourse(Course):pass
| |
| | |
| Course.is_a.append(OneOf([BachelorCourse, MasterCourse, PhDCourse]))
| |
|
| |
|
| #a bachelor student takes only bachelor courses
| | sparql.setReturnFormat(JSON) |
| with onto:
| | results = sparql.query().convert() |
| class takesCourse(Student>>Course):pass
| |
| class BachelorStudent(Student):
| |
| is_a = [
| |
| takesCourse.only(BachelorCourse) &
| |
| takesCourse.some(Course)
| |
| ]
| |
|
| |
|
| |
|
| #a masters student takes only master courses and at most one bachelor course
| | rdf_Types = [] |
| with onto:
| |
| class MasterOrBachelorCourse(Course):pass
| |
| class MasterStudent(Student):
| |
| is_a = [
| |
| takesCourse.only(Not(PhDCourse)) &
| |
| takesCourse.max(1,BachelorCourse) &
| |
| takesCourse.some(MasterCourse)
| |
| ]
| |
|
| |
|
| | for result in results["results"]["bindings"]: |
| | rdf_Types.append(result["types"]["value"]) |
|
| |
|
| #a Ph.D student takes only Ph.D and at most two masters courses
| | print(rdf_Types) |
| with onto:
| |
| class PhDStudent(Student):
| |
| is_a = [
| |
| takesCourse.only(Not(BachelorCourse))&
| |
| takesCourse.max(2,MasterCourse)&
| |
| takesCourse.some(PhDCourse)
| |
| ]
| |
|
| |
|
| # In comparison to lab 10.. | | # Update the graph to that every resource that is an object in a muellerkg:investigation triple has the rdf:type muellerkg:Investigation. |
| """ | | update_str = """ |
| b1 = BNode()
| | PREFIX ns1: <http://example.org#> |
| g.add((b1, RDF.type, OWL.Restriction))
| | PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> |
| g.add((b1, OWL.onProperty, ex.hasCourse))
| |
| g.add((b1, OWL.maxQualifiedCardinality, Literal(2)))
| |
| g.add((b1, OWL.onClass, ex.Master_course))
| |
|
| |
|
| b2 = BNode()
| | INSERT{ |
| g.add((b2, RDF.type, OWL.Restriction))
| | ?invest rdf:type ns1:Investigation . |
| g.add((b2, OWL.onProperty, ex.hasCourse))
| | } |
| g.add((b2, OWL.someValuesFrom, ex["Ph.D_course"]))
| | WHERE{ |
| | ?s ns1:investigation ?invest . |
| | }""" |
|
| |
|
| b3 = BNode()
| | sparqlUpdate.setQuery(update_str) |
| Collection(g, b3, [ex.Master_course, ex["Ph.D_course"]])
| | sparqlUpdate.setMethod(POST) |
| | sparqlUpdate.query() |
|
| |
|
| b5 = BNode()
| | #To Test |
| g.add((b5, RDF.type, OWL.Restriction))
| | sparql.setQuery(""" |
| g.add((b5, OWL.onProperty, ex.hasCourse))
| | prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> |
| g.add((b5, OWL.allValuesFrom, b3))
| | PREFIX ns1: <http://example.org#> |
|
| |
|
| b6 = BNode()
| | ASK{ |
| Collection(g, b6, [ex.Student, b1, b2, b5])
| | ns1:watergate rdf:type ns1:Investigation. |
| g.add((ex["Ph.D_student"], OWL.intersectionOf, b6))
| | } |
| """ | | """) |
| #a Ph.D. student cannot take a bachelor course
| |
| #NA, it's already true
| |
|
| |
|
| #print(onto2graph(onto).serialize(format="turtle"))
| | sparql.setReturnFormat(JSON) |
| | results = sparql.query().convert() |
| | print(results['boolean']) |
|
| |
|
| clean_onto(onto)
| | # Update the graph to that every resource that is an object in a muellerkg:person triple has the rdf:type muellerkg:IndictedPerson. |
| | update_str = """ |
| | PREFIX ns1: <http://example.org#> |
| | PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> |
|
| |
|
| # anyone who is a graduate has at least one degree
| | INSERT{ |
| # a graduate is a student with at least one degree
| | ?person rdf:type ns1:IndictedPerson . |
| with onto:
| | } |
| class Student(Thing): pass
| | WHERE{ |
| class Degree(Thing): pass | | ?s ns1:name ?person . |
| class hasDegree(Student >> Degree): pass | | }""" |
| class Graduate(Student):
| |
| equivalent_to = [hasDegree.some(Degree)]
| |
|
| |
|
| # test with individual
| | sparqlUpdate.setQuery(update_str) |
| with onto:
| | sparqlUpdate.setMethod(POST) |
| cade = Student()
| | sparqlUpdate.query() |
| infosci = Degree()
| |
| cade.hasDegree.append(infosci)
| |
| from owlready2 import sync_reasoner
| |
|
| |
|
| print(onto.Graduate in cade.is_a)
| | #To test, run the query in the above task, replacing the ask query with e.g. ns1:Deborah_Gore_Dean rdf:type ns1:IndictedPerson |
| sync_reasoner()
| |
| print(onto.Graduate in cade.is_a)
| |
|
| |
|
| | # Update the graph so all the investigation nodes (such as muellerkg:watergate) become the subject in a dc:title triple with the corresponding string (watergate) as the literal. |
| | update_str = """ |
| | PREFIX ns1: <http://example.org#> |
| | PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> |
| | PREFIX dc: <http://purl.org/dc/elements/1.1/> |
|
| |
|
| print("graduate is: ", Graduate.is_a)
| | INSERT{ |
| print("cade is: ", cade.is_a)
| | ?invest dc:title ?investString. |
| | } |
| | WHERE{ |
| | ?s ns1:investigation ?invest . |
| | BIND (replace(str(?invest), str(ns1:), "") AS ?investString) |
| | }""" |
|
| |
|
| </syntaxhighlight>
| | sparqlUpdate.setQuery(update_str) |
| | sparqlUpdate.setMethod(POST) |
| | sparqlUpdate.query() |
|
| |
|
| Alternative solution. More pro from Andreas, but it's a not so thoroughly tested draft for us teacher assistents he stresses (it's new material for us), so you might need to make some changes (like the one recommended above: equivalent_to instead of is_a).
| | #Same test as above, replace it with e.g. ns1:watergate dc:title "watergate" |
|
| |
|
| <syntaxhighlight> | | # Print out a sorted list of all the indicted persons represented in your graph. |
| from owlready2 import get_ontology, Thing, ObjectProperty
| | sparql.setQuery(""" |
| from rdflib import Graph, Namespace
| | PREFIX ns1: <http://example.org#> |
| | PREFIX foaf: <http://xmlns.com/foaf/0.1/> |
|
| |
|
| BASE = 'http://info216.uib.no/owlready2-lab/'
| | SELECT ?name |
| onto = get_ontology(BASE)
| | WHERE{ |
| | ?s ns1:name ?name; |
| | ns1:outcome ns1:indictment. |
| | } |
| | ORDER BY ?name |
| | """) |
|
| |
|
| def onto2graph(onto):
| | sparql.setReturnFormat(JSON) |
| graph = Graph()
| | results = sparql.query().convert() |
| onto.save('temp.nt', format='ntriples')
| |
| graph.parse('temp.nt', format='ntriples')
| |
| return graph
| |
|
| |
|
| def print_onto(onto):
| | names = [] |
| g = onto2graph(onto)
| |
| g.bind('', Namespace(BASE))
| |
| print(g.serialize(format='ttl'))
| |
|
| |
|
| from owlready2 import destroy_entity
| | for result in results["results"]["bindings"]: |
| def clean_onto(onto):
| | names.append(result["name"]["value"]) |
| with onto:
| |
| for ind in onto.individuals():
| |
| destroy_entity(ind)
| |
| for prop in onto.properties():
| |
| destroy_entity(prop)
| |
| for cls in onto.classes():
| |
| destroy_entity(cls)
| |
|
| |
|
| # anyone who is a graduate has at least one degree
| | print(names) |
| with onto:
| |
| class Student(Thing): pass
| |
| class Degree(Thing): pass
| |
| class hasDegree(Student >> Degree): pass
| |
| class Graduate(Student):
| |
| is_a = [hasDegree.some(Degree)]
| |
|
| |
|
| # anyone who is a university graduate has at least one degree from a university | | # Print out the minimum, average and maximum indictment days for all the indictments in the graph. |
| with onto:
| |
| class hasDegree(ObjectProperty): pass
| |
| class degreeFrom(ObjectProperty): pass
| |
| class Degree(Thing): pass
| |
| class University(Thing): pass
| |
| class UniversityGraduate(Thing):
| |
| hasDegree: Degree
| |
| is_a = [hasDegree.some(Degree & degreeFrom.some(University))]
| |
| print_onto(onto)
| |
|
| |
|
| from owlready2 import declare_datatype
| | sparql.setQuery(""" |
| class XSDString(object):
| | prefix xsd: <http://www.w3.org/2001/XMLSchema#> |
| def __init__(self, value): self.value = value | | PREFIX ns1: <http://example.org#> |
| def str_parser(s): return s
| |
| def str_unparser(s): return s
| |
| declare_datatype(XSDString, 'http://www.w3.org/2001/XMLSchema#string', str_parser, str_unparser)
| |
|
| |
|
| # a grade is either an A, B, C, D, E or F
| | SELECT (AVG(?daysRemoved) as ?avg) (MAX(?daysRemoved) as ?max) (MIN(?daysRemoved) as ?min) WHERE{ |
| from owlready2 import OneOf
| | ?s ns1:indictment_days ?days; |
| with onto:
| | ns1:outcome ns1:indictment. |
| class Grade(Thing): pass | | |
| class charGrade(Grade >> XSDString): pass
| | BIND (replace(str(?days), str(ns1:), "") AS ?daysR) |
| grade_A = Grade()
| | BIND (STRDT(STR(?daysR), xsd:float) AS ?daysRemoved) |
| grade_A.charGrade = ['A']
| | } |
| grade_B = Grade()
| | """) |
| grade_B.charGrade = ['B'] | |
| grade_C = Grade() | |
| grade_C.charGrade = ['C']
| |
| grade_D = Grade()
| |
| grade_D.charGrade = ['D']
| |
| grade_E = Grade()
| |
| grade_E.charGrade = ['E'] | |
| grade_F = Grade()
| |
| grade_F.charGrade = ['F']
| |
| Grade.equivalent_to.append(OneOf([
| |
| grade_A, grade_B, grade_C, grade_D, grade_E, grade_F
| |
| ]))
| |
| print_onto(onto)
| |
|
| |
|
| # a straight A student is a student that has only A grades
| | sparql.setReturnFormat(JSON) |
| with onto:
| | results = sparql.query().convert() |
| class Grade(Thing): pass
| |
| class charGrade(Grade >> XSDString): pass
| |
| grade_A = Grade()
| |
| grade_A.charGrade = ['A']
| |
| grade_B = Grade()
| |
| grade_B.charGrade = ['B']
| |
| # ...
| |
| Grade.equivalent_to.append(OneOf([
| |
| grade_A, grade_B, # ...
| |
| ]))
| |
|
| |
|
| class Student(Thing): pass
| | for result in results["results"]["bindings"]: |
| class hasGrade(Student >> Grade): pass | | print(f'The longest an investigation lasted was: {result["max"]["value"]}') |
| class GradeA(Grade): | | print(f'The shortest an investigation lasted was: {result["min"]["value"]}') |
| equivalent_to = [OneOf([grade_A])]
| | print(f'The average investigation lasted: {result["avg"]["value"]}') |
| class StraightAStudent(Student): | |
| equivalent_to = [
| |
| hasGrade.some(GradeA) & hasGrade.only(GradeA)
| |
| ]
| |
| print_onto(onto)
| |
|
| |
|
| # a graduate has no F grades
| | # Print out the minimum, average and maximum indictment days for all the indictments in the graph per investigation. |
| with onto:
| |
| class Grade(Thing): pass
| |
| class charGrade(Grade >> XSDString): pass
| |
| grade_A = Grade()
| |
| grade_A.charGrade = ['A']
| |
| # ...
| |
| grade_F.charGrade = ['F']
| |
| Grade.equivalent_to.append(OneOf([
| |
| grade_A, # ...
| |
| grade_F
| |
| ]))
| |
|
| |
|
| class Student(Thing): pass
| | sparql.setQuery(""" |
| class hasGrade(Student >> Grade): pass | | prefix xsd: <http://www.w3.org/2001/XMLSchema#> |
| class GradeF(Grade): | | PREFIX ns1: <http://example.org#> |
| equivalent_to = [OneOf([grade_F])]
| |
| class Graduate(Student):
| |
| equivalent_to = [Student & ~ hasGrade.some(GradeF)]
| |
| print_onto(onto)
| |
|
| |
|
| # a student has a single unique student number
| | SELECT ?investigation (AVG(?daysRemoved) as ?avg) (MAX(?daysRemoved) as ?max) (MIN(?daysRemoved) as ?min) WHERE{ |
| class XSDInt(object):
| | ?s ns1:indictment_days ?days; |
| def __init__(self, value): self.value = value
| | ns1:outcome ns1:indictment; |
| def int_parser(s): return int(s)
| | ns1:investigation ?investigation. |
| def int_unparser(i): return str(i)
| | |
| declare_datatype(XSDInt, 'http://www.w3.org/2001/XMLSchema#int', int_parser, int_unparser)
| | BIND (replace(str(?days), str(ns1:), "") AS ?daysR) |
| | BIND (STRDT(STR(?daysR), xsd:float) AS ?daysRemoved) |
| | } |
| | GROUP BY ?investigation |
| | """) |
|
| |
|
| from owlready2 import FunctionalProperty, InverseFunctionalProperty
| | sparql.setReturnFormat(JSON) |
| with onto:
| | results = sparql.query().convert() |
| class Student(Thing): pass
| |
| class hasStudentNumber(Student >> XSDInt):
| |
| is_a = [FunctionalProperty, InverseFunctionalProperty]
| |
| print_onto(onto)
| |
|
| |
|
| # each student has exactly one average grade
| | for result in results["results"]["bindings"]: |
| class XSDFloat(object):
| | print(f'{result["investigation"]["value"]} - min: {result["min"]["value"]}, max: {result["max"]["value"]}, avg: {result["avg"]["value"]}') |
| def __init__(self, value): self.value = value | |
| def int_parser(s): return float(s)
| |
| def int_unparser(f): return str(f)
| |
| declare_datatype(XSDFloat, 'http://www.w3.org/2001/XMLSchema#float', int_parser, int_unparser)
| |
|
| |
|
| with onto:
| | </syntaxhighlight> |
| class Student(Thing): pass
| |
| class hasAverageGrade(Grade >> XSDFloat): pass
| |
| Student.is_a.append(hasAverageGrade.exactly(1, XSDFloat))
| |
| print_onto(onto)
| |
|
| |
|
| # a course is either a bachelor, a master or a Ph.D course
| | =Wikidata SPARQL (Lab 6)= |
| from owlready2 import AllDisjoint
| | ===Use a DESCRIBE query to retrieve some triples about your entity=== |
| with onto:
| |
| class Course(Thing): pass
| |
| class BachelorCourse(Course): pass
| |
| class MasterCourse(Course): pass
| |
| class PhDCourse(Course): pass
| |
| AllDisjoint([BachelorCourse, MasterCourse, PhDCourse])
| |
| print_onto(onto)
| |
|
| |
|
| # a bachelor student takes only bachelor courses
| | <syntaxhighlight lang="SPARQL"> |
| from owlready2 import AllDisjoint
| | DESCRIBE wd:Q42 LIMIT 100 |
| with onto:
| | </syntaxhighlight> |
| class Course(Thing): pass
| |
| class BachelorCourse(Course): pass
| |
| class MasterCourse(Course): pass
| |
| class PhDCourse(Course): pass
| |
| AllDisjoint([BachelorCourse, MasterCourse, PhDCourse])
| |
| print_onto(onto)
| |
|
| |
|
| # a masters student takes only master courses, except for at most one bachelor course
| | ===Use a SELECT query to retrieve the first 100 triples about your entity=== |
| with onto:
| |
| class Student(Thing): pass
| |
| class Course(Thing): pass
| |
| class takesCourse(Student >> Course): pass
| |
| class BachelorCourse(Course): pass
| |
| class MasterCourse(Course): pass
| |
| class MasterStudent(Student):
| |
| is_a = [
| |
| takesCourse.some(MasterCourse) &
| |
| takesCourse.only(MasterCourse | BachelorCourse) &
| |
| takesCourse.max(1, BachelorCourse)
| |
| ]
| |
| print_onto(onto)
| |
|
| |
|
| # a Ph.D student takes only Ph.D courses, except for at most two masters courses
| | <syntaxhighlight lang="SPARQL"> |
| with onto:
| | SELECT * WHERE { |
| class Student(Thing): pass
| | wd:Q42 ?p ?o . |
| class Course(Thing): pass
| | } LIMIT 100 |
| class takesCourse(Student >> Course): pass
| | </syntaxhighlight> |
| class MasterCourse(Course): pass
| |
| class PhDCourse(Course): pass
| |
| class PhDStudent(Student):
| |
| is_a = [
| |
| takesCourse.some(PhDCourse) &
| |
| takesCourse.only(PhDCourse | MasterCourse) &
| |
| takesCourse.max(2, MasterCourse)
| |
| ]
| |
| print_onto(onto)
| |
|
| |
|
| # a Ph.D. student cannot take a bachelor course
| | ===Write a local SELECT query that embeds a SERVICE query to retrieve the first 100 triples about your entity to your local machine=== |
| with onto:
| |
| class Student(Thing): pass
| |
| class Course(Thing): pass
| |
| class takesCourse(Student >> Course): pass
| |
| class BachelorCourse(Course): pass
| |
| class PhDStudent(Student):
| |
| is_a = [
| |
| takesCourse.max(0, BachelorCourse)
| |
| ]
| |
| print_onto(onto)
| |
|
| |
|
| # ...alternative solution
| | <syntaxhighlight lang="SPARQL"> |
| clean_onto(onto)
| | PREFIX wd: <http://www.wikidata.org/entity/> |
| with onto:
| |
| class Student(Thing): pass
| |
| class Course(Thing): pass
| |
| class takesCourse(Student >> Course): pass
| |
| class BachelorCourse(Course): pass
| |
| class PhDStudent(Student):
| |
| is_a = [Student & ~ takesCourse.some(BachelorCourse)]
| |
| print_onto(onto)
| |
|
| |
|
| # a graduate is a student with at least one degree
| | SELECT * WHERE { |
| with onto:
| | SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> { |
| class Student(Thing): pass
| | SELECT * WHERE { |
| class Degree(Thing): pass
| | wd:Q42 ?p ?o . |
| class hasDegree(Student >> Degree): pass | | } LIMIT 100 |
| class Graduate(Student):
| | } |
| equivalent_to = [Student & hasDegree.some(Degree)]
| | } |
| | </syntaxhighlight> |
|
| |
|
| # test with individual
| | ===Change the SELECT query to an INSERT query that adds the Wikidata triples your local repository=== |
| with onto:
| |
| cade = Student()
| |
| infosci = Degree()
| |
| cade.hasDegree.append(infosci)
| |
|
| |
|
| from owlready2 import sync_reasoner
| | <syntaxhighlight lang="SPARQL"> |
| | PREFIX wd: <http://www.wikidata.org/entity/> |
|
| |
|
| print(onto.Graduate in cade.is_a)
| | INSERT { |
| sync_reasoner()
| | wd:Q42 ?p ?o . |
| print(onto.Graduate in cade.is_a)
| | } WHERE { |
| | SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> { |
| | SELECT * WHERE { |
| | wd:Q42 ?p ?o . |
| | } LIMIT 100 |
| | } |
| | } |
| | </syntaxhighlight> |
|
| |
|
| # if you have more time:
| | ===Use a FILTER statement to only SELECT primary triples in this sense.=== |
| # populate the ontology with individuals
| |
| # a straight A student is a student that has only A grades
| |
| clean_onto(onto)
| |
| with onto:
| |
| class Grade(Thing): pass
| |
| class charGrade(Grade >> XSDString): pass
| |
| grade_A = Grade()
| |
| grade_A.charGrade = ['A']
| |
| grade_B = Grade()
| |
| grade_B.charGrade = ['B']
| |
| # ...
| |
| Grade.equivalent_to.append(OneOf([
| |
| grade_A, grade_B, # ...
| |
| ]))
| |
|
| |
|
| class Student(Thing): pass
| | <syntaxhighlight lang="SPARQL"> |
| class hasGrade(Student >> Grade): pass
| | PREFIX wd: <http://www.wikidata.org/entity/> |
| class GradeA(Grade):
| |
| equivalent_to = [OneOf([grade_A])]
| |
| class StraightAStudent(Student):
| |
| equivalent_to = [
| |
| Student &
| |
| hasGrade.some(GradeA) & hasGrade.only(GradeA)
| |
| ]
| |
| # add individual
| |
| cade = Student()
| |
| cade.hasGrade.append(grade_A)
| |
| print_onto(onto)
| |
|
| |
|
| from owlready2 import sync_reasoner
| | SELECT * WHERE { |
| print(onto.StraightAStudent in cade.is_a)
| | wd:Q42 ?p ?o . |
| sync_reasoner()
| | |
| print(onto.StraightAStudent in cade.is_a)
| | FILTER (STRSTARTS(STR(?p), STR(wdt:))) |
| | | FILTER (STRSTARTS(STR(?o), STR(wd:))) |
| from owlready2 import close_world
| | } LIMIT 100 |
| close_world(onto) # because of the "only"-restriction
| |
| sync_reasoner()
| |
| print(onto.StraightAStudent in cade.is_a)
| |
| </syntaxhighlight> | | </syntaxhighlight> |
|
| |
|
| ==Knowledge graph embeddings== | | ===Use Wikidata's in-built SERVICE wikibase:label to get labels for all the object resources=== |
| https://colab.research.google.com/drive/1sHusTjvmHtV6PkzIatLTMPzHuseAF6N1?usp=sharing
| |
|
| |
|
| | <syntaxhighlight lang="SPARQL"> |
| | PREFIX wd: <http://www.wikidata.org/entity/> |
|
| |
|
| =More miscellaneous examples=
| | SELECT ?p ?oLabel WHERE { |
| | | wd:Q42 ?p ?o . |
| | | |
| ===Printing the triples of the Graph in a readable way===
| | FILTER (STRSTARTS(STR(?p), STR(wdt:))) |
| <syntaxhighlight>
| | FILTER (STRSTARTS(STR(?o), STR(wd:))) |
| # The turtle format has the purpose of being more readable for humans.
| | |
| print(g.serialize(format="turtle"))
| | SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } |
| | |
| | } LIMIT 100 |
| </syntaxhighlight> | | </syntaxhighlight> |
|
| |
|
| ===Coding Tasks Lab 1=== | | ===Edit your query (by relaxing the FILTER expression) so it also returns triples where the object has DATATYPE xsd:string.=== |
| <syntaxhighlight>
| |
| from rdflib import Graph, Namespace, URIRef, BNode, Literal
| |
| from rdflib.namespace import RDF, FOAF, XSD
| |
|
| |
|
| g = Graph()
| | <syntaxhighlight lang="SPARQL"> |
| ex = Namespace("http://example.org/")
| | PREFIX wd: <http://www.wikidata.org/entity/> |
|
| |
|
| g.add((ex.Cade, ex.married, ex.Mary))
| | SELECT ?p ?oLabel ?o WHERE { |
| g.add((ex.France, ex.capital, ex.Paris))
| | wd:Q42 ?p ?o . |
| g.add((ex.Cade, ex.age, Literal("27", datatype=XSD.integer)))
| | |
| g.add((ex.Mary, ex.age, Literal("26", datatype=XSD.integer)))
| | FILTER (STRSTARTS(STR(?p), STR(wdt:))) |
| g.add((ex.Mary, ex.interest, ex.Hiking))
| | FILTER ( |
| g.add((ex.Mary, ex.interest, ex.Chocolate))
| | STRSTARTS(STR(?o), STR(wd:)) || # comment out this whole line to see only string literals! |
| g.add((ex.Mary, ex.interest, ex.Biology))
| | DATATYPE(?o) = xsd:string |
| g.add((ex.Mary, RDF.type, ex.Student))
| | ) |
| g.add((ex.Paris, RDF.type, ex.City))
| | |
| g.add((ex.Paris, ex.locatedIn, ex.France))
| | SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } |
| g.add((ex.Cade, ex.characteristic, ex.Kind))
| | |
| g.add((ex.Mary, ex.characteristic, ex.Kind))
| | } LIMIT 100 |
| g.add((ex.Mary, RDF.type, FOAF.Person))
| | </syntaxhighlight> |
| g.add((ex.Cade, RDF.type, FOAF.Person))
| |
|
| |
|
| # OR
| | ===Relax the FILTER expression again so it also returns triples with these three predicates (rdfs:label, skos:altLabel and schema:description) === |
|
| |
|
| g = Graph()
| | <syntaxhighlight lang="SPARQL"> |
| | | PREFIX wd: <http://www.wikidata.org/entity/> |
| ex = Namespace('http://example.org/')
| |
| | |
| g.add((ex.Cade, FOAF.name, Literal("Cade", datatype=XSD.string)))
| |
| g.add((ex.Mary, FOAF.name, Literal("Mary", datatype=XSD.string)))
| |
| g.add((ex.Cade, RDF.type, FOAF.Person))
| |
| g.add((ex.Mary, RDF.type, FOAF.Person))
| |
| g.add((ex.Mary, RDF.type, ex.Student))
| |
| g.add((ex.Cade, ex.Married, ex.Mary))
| |
| g.add((ex.Cade, FOAF.age, Literal('27', datatype=XSD.int)))
| |
| g.add((ex.Mary, FOAF.age, Literal('26', datatype=XSD.int)))
| |
| g.add((ex.Paris, RDF.type, ex.City))
| |
| g.add((ex.France, ex.Capital, ex.Paris))
| |
| g.add((ex.Mary, FOAF.interest, ex.hiking))
| |
| g.add((ex.Mary, FOAF.interest, ex.Chocolate))
| |
| g.add((ex.Mary, FOAF.interest, ex.biology))
| |
| g.add((ex.France, ex.City, ex.Paris))
| |
| g.add((ex.Mary, ex.characteristic, ex.kind))
| |
| g.add((ex.Cade, ex.characteristic, ex.kind))
| |
| g.add((ex.France, RDF.type, ex.Country))
| |
| | |
| | |
| print(g.serialize(format="turtle"))
| |
|
| |
|
| | SELECT ?p ?oLabel ?o WHERE { |
| | wd:Q42 ?p ?o . |
| | |
| | FILTER ( |
| | (STRSTARTS(STR(?p), STR(wdt:)) && # comment out these three lines to see only fingerprint literals! |
| | STRSTARTS(STR(?o), STR(wd:)) || DATATYPE(?o) = xsd:string) |
| | || |
| | (?p IN (rdfs:label, skos:altLabel, schema:description) && |
| | DATATYPE(?o) = rdf:langString && LANG(?o) = "en") |
| | ) |
| | |
| | SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } |
| | |
| | } LIMIT 100 |
| </syntaxhighlight> | | </syntaxhighlight> |
|
| |
|
| ==Basic RDF programming== | | ===Try to restrict the FILTER expression again so that, when the predicate is rdfs:label, skos:altLabel and schema:description, the object must have LANG "en" === |
|
| |
|
| ===Different ways to create an address=== | | <syntaxhighlight lang="SPARQL"> |
| | PREFIX wikibase: <http://wikiba.se/ontology#> |
| | PREFIX bd: <http://www.bigdata.com/rdf#> |
| | PREFIX wd: <http://www.wikidata.org/entity/> |
| | PREFIX wdt: <http://www.wikidata.org/prop/direct/> |
| | PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> |
| | PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> |
| | PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> |
| | PREFIX skos: <http://www.w3.org/2004/02/skos/core#> |
| | PREFIX schema: <http://schema.org/> |
|
| |
|
| <syntaxhighlight> | | SELECT * WHERE { |
| | SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> { |
| | SELECT ?p ?oLabel ?o WHERE { |
| | wd:Q42 ?p ?o . |
|
| |
|
| from rdflib import Graph, Namespace, URIRef, BNode, Literal
| | FILTER ( |
| from rdflib.namespace import RDF, FOAF, XSD
| | (STRSTARTS(STR(?p), STR(wdt:)) && |
| | STRSTARTS(STR(?o), STR(wd:)) || DATATYPE(?o) = xsd:string) |
| | || |
| | (?p IN (rdfs:label, skos:altLabel, schema:description) && |
| | DATATYPE(?o) = rdf:langString && LANG(?o) = "en") |
| | ) |
|
| |
|
| g = Graph()
| | SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } |
| ex = Namespace("http://example.org/")
| |
|
| |
|
| | } LIMIT 100 |
| | } |
| | } |
| | </syntaxhighlight> |
|
| |
|
| # How to represent the address of Cade Tracey. From probably the worst solution to the best.
| | ===Change the SELECT query to an INSERT query that adds the Wikidata triples your local repository === |
|
| |
|
| # Solution 1 - | | <syntaxhighlight lang="SPARQL"> |
| # Make the entire address into one Literal. However, Generally we want to separate each part of an address into their own triples. This is useful for instance if we want to find only the streets where people live. | | PREFIX wikibase: <http://wikiba.se/ontology#> |
| | PREFIX bd: <http://www.bigdata.com/rdf#> |
| | PREFIX wd: <http://www.wikidata.org/entity/> |
| | PREFIX wdt: <http://www.wikidata.org/prop/direct/> |
| | PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> |
| | PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> |
| | PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> |
| | PREFIX skos: <http://www.w3.org/2004/02/skos/core#> |
| | PREFIX schema: <http://schema.org/> |
|
| |
|
| g.add((ex.Cade_Tracey, ex.livesIn, Literal("1516_Henry_Street, Berkeley, California 94709, USA")))
| | INSERT { |
| | wd:Q42 ?p ?o . |
| | ?o rdfs:label ?oLabel . |
| | } WHERE { |
| | SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> { |
| | SELECT ?p ?oLabel ?o WHERE { |
| | wd:Q42 ?p ?o . |
|
| |
|
| | FILTER ( |
| | (STRSTARTS(STR(?p), STR(wdt:)) && |
| | STRSTARTS(STR(?o), STR(wd:)) || DATATYPE(?o) = xsd:string) |
| | || |
| | (?p IN (rdfs:label, skos:altLabel, schema:description) && |
| | DATATYPE(?o) = rdf:langString && LANG(?o) = "en") |
| | ) |
|
| |
|
| # Solution 2 -
| | SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } |
| # Seperate the different pieces information into their own triples
| |
|
| |
|
| g.add((ex.Cade_tracey, ex.street, Literal("1516_Henry_Street")))
| | } LIMIT 500 |
| g.add((ex.Cade_tracey, ex.city, Literal("Berkeley")))
| | } |
| g.add((ex.Cade_tracey, ex.state, Literal("California")))
| | } |
| g.add((ex.Cade_tracey, ex.zipcode, Literal("94709")))
| | </syntaxhighlight> |
| g.add((ex.Cade_tracey, ex.country, Literal("USA")))
| |
|
| |
|
| | ==If you have more time == |
| | ===You must therefore REPLACE all wdt: prefixes of properties with wd: prefixes and BIND the new URI AS a new variable, for example ?pw. === |
|
| |
|
| # Solution 3 - Some parts of the addresses can make more sense to be resources than Literals.
| | <syntaxhighlight lang="SPARQL"> |
| # Larger concepts like a city or state are typically represented as resources rather than Literals, but this is not necesarilly a requirement in the case that you don't intend to say more about them.
| | PREFIX wd: <http://www.wikidata.org/entity/> |
|
| |
|
| g.add((ex.Cade_tracey, ex.street, Literal("1516_Henry_Street")))
| | SELECT ?pwLabel ?oLabel WHERE { |
| g.add((ex.Cade_tracey, ex.city, ex.Berkeley))
| | wd:Q42 ?p ?o . |
| g.add((ex.Cade_tracey, ex.state, ex.California))
| | |
| g.add((ex.Cade_tracey, ex.zipcode, Literal("94709")))
| | FILTER (STRSTARTS(STR(?p), STR(wdt:))) |
| g.add((ex.Cade_tracey, ex.country, ex.USA))
| | FILTER (STRSTARTS(STR(?o), STR(wd:))) |
| | |
| | BIND (IRI(REPLACE(STR(?p), STR(wdt:), STR(wd:))) AS ?pw) |
|
| |
|
| | SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } |
| | |
| | } LIMIT 100 |
| | </syntaxhighlight> |
|
| |
|
| # Solution 4
| | ===Now you can go back to the SELECT statement that returned primary triples with only resource objects (not literal objects or fingerprints). Extend it so it also includes primary triples "one step out", i.e., triples where the subjects are objects of triples involving your reference entity. === |
| # Grouping of the information into an Address. We can Represent the address concept with its own URI OR with a Blank Node.
| |
| # One advantage of this is that we can easily remove the entire address, instead of removing each individual part of the address.
| |
| # Solution 4 or 5 is how I would recommend to make addresses. Here, ex.CadeAddress could also be called something like ex.address1 or so on, if you want to give each address a unique ID.
| |
|
| |
|
| # Address URI - CadeAdress | | <syntaxhighlight lang="SPARQL"> |
| | PREFIX wikibase: <http://wikiba.se/ontology#> |
| | PREFIX bd: <http://www.bigdata.com/rdf#> |
| | PREFIX wd: <http://www.wikidata.org/entity/> |
| | PREFIX wdt: <http://www.wikidata.org/prop/direct/> |
| | PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> |
| | PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> |
| | PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> |
| | PREFIX skos: <http://www.w3.org/2004/02/skos/core#> |
| | PREFIX schema: <http://schema.org/> |
|
| |
|
| g.add((ex.Cade_Tracey, ex.address, ex.CadeAddress))
| | INSERT { |
| g.add((ex.CadeAddress, RDF.type, ex.Address))
| | wd:Q42 ?p1 ?o1 . |
| g.add((ex.CadeAddress, ex.street, Literal("1516 Henry Street")))
| | ?o1 rdfs:label ?o1Label . |
| g.add((ex.CadeAddress, ex.city, ex.Berkeley))
| | ?o1 ?p2 ?o2 . |
| g.add((ex.CadeAddress, ex.state, ex.California))
| | ?o2 rdfs:label ?o2Label . |
| g.add((ex.CadeAddress, ex.postalCode, Literal("94709")))
| | } WHERE { |
| g.add((ex.CadeAddress, ex.country, ex.USA))
| | SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> { |
| | SELECT ?p1 ?o1Label ?o1 ?p2 ?o2Label ?o2 WHERE { |
| | wd:Q42 ?p1 ?o1 . |
| | ?o1 ?p2 ?o2 . |
|
| |
|
| # OR
| | FILTER ( |
| | STRSTARTS(STR(?p1), STR(wdt:)) && |
| | STRSTARTS(STR(?o1), STR(wd:)) && |
| | STRSTARTS(STR(?p2), STR(wdt:)) && |
| | STRSTARTS(STR(?o2), STR(wd:)) |
| | ) |
|
| |
|
| # Blank node for Address.
| | SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } |
| address = BNode()
| |
| g.add((ex.Cade_Tracey, ex.address, address))
| |
| g.add((address, RDF.type, ex.Address))
| |
| g.add((address, ex.street, Literal("1516 Henry Street", datatype=XSD.string)))
| |
| g.add((address, ex.city, ex.Berkeley))
| |
| g.add((address, ex.state, ex.California))
| |
| g.add((address, ex.postalCode, Literal("94709", datatype=XSD.string)))
| |
| g.add((address, ex.country, ex.USA))
| |
|
| |
|
| | } LIMIT 500 |
| | } |
| | } |
| | </syntaxhighlight> |
|
| |
|
| # Solution 5 using existing vocabularies for address
| | =CSV to RDF (Lab 7)= |
|
| |
|
| # (in this case https://schema.org/PostalAddress from schema.org).
| | <syntaxhighlight lang="Python"> |
| # Also using existing ontology for places like California. (like http://dbpedia.org/resource/California from dbpedia.org)
| |
|
| |
|
| schema = Namespace("https://schema.org/")
| | #Imports |
| dbp = Namespace("https://dpbedia.org/resource/")
| | import re |
| | from pandas import * |
| | from numpy import nan |
| | from rdflib import Graph, Namespace, URIRef, Literal, RDF, XSD, FOAF |
| | from spotlight import SpotlightException, annotate |
|
| |
|
| g.add((ex.Cade_Tracey, schema.address, ex.CadeAddress))
| | SERVER = "https://api.dbpedia-spotlight.org/en/annotate" |
| g.add((ex.CadeAddress, RDF.type, schema.PostalAddress))
| | # Test around with the confidence, and see how many names changes depending on the confidence. |
| g.add((ex.CadeAddress, schema.streetAddress, Literal("1516 Henry Street")))
| | # However, be aware that anything lower than this (0.83) it will replace James W. McCord and other names that includes James with LeBron James |
| g.add((ex.CadeAddress, schema.addresCity, dbp.Berkeley))
| | CONFIDENCE = 0.83 |
| g.add((ex.CadeAddress, schema.addressRegion, dbp.California))
| |
| g.add((ex.CadeAddress, schema.postalCode, Literal("94709")))
| |
| g.add((ex.CadeAddress, schema.addressCountry, dbp.United_States))
| |
|
| |
|
| </syntaxhighlight>
| | # This function uses DBpedia Spotlight, which was not a part of the CSV lab this year. |
| | def annotate_entity(entity, filters={'types': 'DBpedia:Person'}): |
| | annotations = [] |
| | try: |
| | annotations = annotate(address=SERVER, text=entity, confidence=CONFIDENCE, filters=filters) |
| | except SpotlightException as e: |
| | print(e) |
| | return annotations |
|
| |
|
| ===Typed Literals===
| |
| <syntaxhighlight>
| |
| from rdflib import Graph, Literal, Namespace
| |
| from rdflib.namespace import XSD
| |
| g = Graph() | | g = Graph() |
| ex = Namespace("http://example.org/") | | ex = Namespace("http://example.org/") |
| | g.bind("ex", ex) |
|
| |
|
| g.add((ex.Cade, ex.age, Literal(27, datatype=XSD.integer)))
| | #Pandas' read_csv function to load russia-investigation.csv |
| g.add((ex.Cade, ex.gpa, Literal(3.3, datatype=XSD.float)))
| | df = read_csv("russia-investigation.csv") |
| g.add((ex.Cade, FOAF.name, Literal("Cade Tracey", datatype=XSD.string)))
| | #Replaces all instances of nan to None type with numpy's nan |
| g.add((ex.Cade, ex.birthday, Literal("2006-01-01", datatype=XSD.date)))
| | df = df.replace(nan, None) |
| </syntaxhighlight>
| |
|
| |
|
| | #Function that prepares the values to be added to the graph as a URI (ex infront) or Literal |
| | def prepareValue(row): |
| | if row == None: #none type |
| | value = Literal(row) |
| | elif isinstance(row, str) and re.match(r'\d{4}-\d{2}-\d{2}', row): #date |
| | value = Literal(row, datatype=XSD.date) |
| | elif isinstance(row, bool): #boolean value (true / false) |
| | value = Literal(row, datatype=XSD.boolean) |
| | elif isinstance(row, int): #integer |
| | value = Literal(row, datatype=XSD.integer) |
| | elif isinstance(row, str): #string |
| | value = URIRef(ex + row.replace('"', '').replace(" ", "_").replace(",","").replace("-", "_")) |
| | elif isinstance(row, float): #float |
| | value = Literal(row, datatype=XSD.float) |
|
| |
|
| ===Writing and reading graphs/files===
| | return value |
|
| |
|
| <syntaxhighlight>
| | #Convert the non-semantic CSV dataset into a semantic RDF |
| # Writing the graph to a file on your system. Possible formats = turtle, n3, xml, nt.
| | def csv_to_rdf(df): |
| g.serialize(destination="triples.txt", format="turtle")
| | for index, row in df.iterrows(): |
| | id = URIRef(ex + "Investigation_" + str(index)) |
| | investigation = prepareValue(row["investigation"]) |
| | investigation_start = prepareValue(row["investigation-start"]) |
| | investigation_end = prepareValue(row["investigation-end"]) |
| | investigation_days = prepareValue(row["investigation-days"]) |
| | indictment_days = prepareValue(row["indictment-days "]) |
| | cp_date = prepareValue(row["cp-date"]) |
| | cp_days = prepareValue(row["cp-days"]) |
| | overturned = prepareValue(row["overturned"]) |
| | pardoned = prepareValue(row["pardoned"]) |
| | american = prepareValue(row["american"]) |
| | outcome = prepareValue(row["type"]) |
| | name_ex = prepareValue(row["name"]) |
| | president_ex = prepareValue(row["president"]) |
|
| |
|
| # Parsing a local file
| | #Spotlight Search |
| parsed_graph = g.parse(location="triples.txt", format="turtle")
| | name = annotate_entity(str(row['name'])) |
| | president = annotate_entity(str(row['president']).replace(".", "")) |
| | |
| | #Adds the tripples to the graph |
| | g.add((id, RDF.type, ex.Investigation)) |
| | g.add((id, ex.investigation, investigation)) |
| | g.add((id, ex.investigation_start, investigation_start)) |
| | g.add((id, ex.investigation_end, investigation_end)) |
| | g.add((id, ex.investigation_days, investigation_days)) |
| | g.add((id, ex.indictment_days, indictment_days)) |
| | g.add((id, ex.cp_date, cp_date)) |
| | g.add((id, ex.cp_days, cp_days)) |
| | g.add((id, ex.overturned, overturned)) |
| | g.add((id, ex.pardoned, pardoned)) |
| | g.add((id, ex.american, american)) |
| | g.add((id, ex.outcome, outcome)) |
|
| |
|
| # Parsing a remote endpoint like Dbpedia
| | #Spotlight search |
| dbpedia_graph = g.parse("http://dbpedia.org/resource/Pluto")
| | #Name |
| </syntaxhighlight>
| | try: |
| | g.add((id, ex.person, URIRef(name[0]["URI"]))) |
| | except: |
| | g.add((id, ex.person, name_ex)) |
|
| |
|
| ===Graph Binding===
| | #President |
| <syntaxhighlight>
| | try: |
| #Graph Binding is useful for at least two reasons:
| | g.add((id, ex.president, URIRef(president[0]["URI"]))) |
| #(1) We no longer need to specify prefixes with SPARQL queries if they are already binded to the graph.
| | except: |
| #(2) When serializing the graph, the serialization will show the correct expected prefix
| | g.add((id, ex.president, president_ex)) |
| # instead of default namespace names ns1, ns2 etc.
| |
|
| |
|
| g = Graph()
| | csv_to_rdf(df) |
| | | print(g.serialize()) |
| ex = Namespace("http://example.org/")
| | g.serialize("lab7.ttl", format="ttl") |
| dbp = Namespace("http://dbpedia.org/resource/")
| |
| schema = Namespace("https://schema.org/")
| |
|
| |
|
| g.bind("ex", ex)
| |
| g.bind("dbp", dbp)
| |
| g.bind("schema", schema)
| |
| </syntaxhighlight> | | </syntaxhighlight> |
|
| |
|
| ===Collection Example=== | | =JSON-LD (Lab 8)= |
| | == Task 1) Basic JSON-LD == |
|
| |
|
| <syntaxhighlight> | | <syntaxhighlight lang="JSON-LD"> |
| from rdflib import Graph, Namespace
| |
| from rdflib.collection import Collection
| |
|
| |
|
| | { |
| | "@context": { |
| | "@base": "http://example.org/", |
| | "edges": "http://example.org/triple", |
| | "start": "http://example.org/source", |
| | "rel": "http://exaxmple.org/predicate", |
| | "end": "http://example.org/object", |
| | "Person" : "http://example.org/Person", |
| | "birthday" : { |
| | "@id" : "http://example.org/birthday", |
| | "@type" : "xsd:date" |
| | }, |
| | "nameEng" : { |
| | "@id" : "http://example.org/en/name", |
| | "@language" : "en" |
| | }, |
| | "nameFr" : { |
| | "@id" : "http://example.org/fr/name", |
| | "@language" : "fr" |
| | }, |
| | "nameCh" : { |
| | "@id" : "http://example.org/ch/name", |
| | "@language" : "ch" |
| | }, |
| | "age" : { |
| | "@id" : "http://example.org/age", |
| | "@type" : "xsd:int" |
| | }, |
| | "likes" : "http://example.org/games/likes", |
| | "haircolor" : "http://example.org/games/haircolor" |
| | }, |
| | "@graph": [ |
| | { |
| | "@id": "people/Jeremy", |
| | "@type": "Person", |
| | "birthday" : "1987.1.1", |
| | "nameEng" : "Jeremy", |
| | "age" : 26 |
| | }, |
| | { |
| | "@id": "people/Tom", |
| | "@type": "Person" |
| | }, |
| | { |
| | "@id": "people/Ju", |
| | "@type": "Person", |
| | "birthday" : "2001.1.1", |
| | "nameCh" : "Ju", |
| | "age" : 22, |
| | "likes" : "bastketball" |
| | }, |
| | { |
| | "@id": "people/Louis", |
| | "@type": "Person", |
| | "birthday" : "1978.1.1", |
| | "haircolor" : "Black", |
| | "nameFr" : "Louis", |
| | "age" : 45 |
| | }, |
| | {"edges" : [ |
| | { |
| | "start" : "people/Jeremy", |
| | "rel" : "knows", |
| | "end" : "people/Tom" |
| | }, |
| | { |
| | "start" : "people/Tom", |
| | "rel" : "knows", |
| | "end" : "people/Louis" |
| | }, |
| | { |
| | "start" : "people/Louis", |
| | "rel" : "teaches", |
| | "end" : "people/Ju" |
| | }, |
| | { |
| | "start" : "people/Ju", |
| | "rel" : "plays", |
| | "end" : "people/Jeremy" |
| | }, |
| | { |
| | "start" : "people/Ju", |
| | "rel" : "plays", |
| | "end" : "people/Tom" |
| | } |
| | ]} |
| | ] |
| | } |
|
| |
|
| # Sometimes we want to add many objects or subjects for the same predicate at once.
| |
| # In these cases we can use Collection() to save some time.
| |
| # In this case I want to add all countries that Emma has visited at once.
| |
|
| |
| b = BNode()
| |
| g.add((ex.Emma, ex.visit, b))
| |
| Collection(g, b,
| |
| [ex.Portugal, ex.Italy, ex.France, ex.Germany, ex.Denmark, ex.Sweden])
| |
|
| |
| # OR
| |
|
| |
| g.add((ex.Emma, ex.visit, ex.EmmaVisits))
| |
| Collection(g, ex.EmmaVisits,
| |
| [ex.Portugal, ex.Italy, ex.France, ex.Germany, ex.Denmark, ex.Sweden])
| |
|
| |
|
| </syntaxhighlight> | | </syntaxhighlight> |
|
| |
|
| ==SPARQL== | | == Task 2 & 3) Retrieving JSON-LD from ConceptNet / Programming JSON-LD in Python == |
|
| |
|
| Also see the [[SPARQL Examples]] page!
| | <syntaxhighlight lang="Python"> |
|
| |
|
| ===Querying a local ("in memory") graph===
| | import rdflib |
|
| |
|
| Example contents of the file family.ttl:
| | CN_BASE = 'http://api.conceptnet.io/c/en/' |
| @prefix rex: <http://example.org/royal#> .
| |
| @prefix fam: <http://example.org/family#> .
| |
|
| |
| rex:IngridAlexandra fam:hasParent rex:HaakonMagnus .
| |
| rex:SverreMagnus fam:hasParent rex:HaakonMagnus .
| |
| rex:HaakonMagnus fam:hasParent rex:Harald .
| |
| rex:MarthaLouise fam:hasParent rex:Harald .
| |
| rex:HaakonMagnus fam:hasSister rex:MarthaLouise .
| |
|
| |
|
| import rdflib
| | g = rdflib.Graph() |
|
| | g.parse(CN_BASE+'indictment', format='json-ld') |
| g = rdflib.Graph()
| |
| g.parse("family.ttl", format='ttl')
| |
|
| |
| qres = g.query("""
| |
| PREFIX fam: <http://example.org/family#>
| |
| SELECT ?child ?sister WHERE {
| |
| ?child fam:hasParent ?parent .
| |
| ?parent fam:hasSister ?sister .
| |
| }""")
| |
| for row in qres:
| |
| print("%s has aunt %s" % row)
| |
|
| |
|
| With a prepared query, you can write the query once, and then bind some of the variables each time you use it:
| | # To download JSON object: |
| import rdflib
| |
|
| |
| g = rdflib.Graph()
| |
| g.parse("family.ttl", format='ttl')
| |
|
| |
| q = rdflib.plugins.sparql.prepareQuery(
| |
| """SELECT ?child ?sister WHERE {
| |
| ?child fam:hasParent ?parent .
| |
| ?parent fam:hasSister ?sister .
| |
| }""",
| |
| initNs = { "fam": "http://example.org/family#"})
| |
|
| |
| sm = rdflib.URIRef("http://example.org/royal#SverreMagnus")
| |
|
| |
| for row in g.query(q, initBindings={'child': sm}):
| |
| print(row)
| |
|
| |
|
| ===Select all contents of lists (rdfllib.Collection)===
| | import json |
| <syntaxhighlight>
| | import requests |
|
| |
|
| # rdflib.Collection has a different interntal structure so it requires a slightly more advance query. Here I am selecting all places that Emma has visited.
| | json_obj = requests.get(CN_BASE+'indictment').json() |
|
| |
|
| PREFIX ex: <http://example.org/>
| | # To change the @context: |
| PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
| |
|
| |
|
| SELECT ?visit
| | context = { |
| WHERE {
| | "@base": "http://ex.org/", |
| ex:Emma ex:visit/rdf:rest*/rdf:first ?visit
| | "edges": "http://ex.org/triple/", |
| | "start": "http://ex.org/s/", |
| | "rel": "http://ex.org/p/", |
| | "end": "http://ex.org/o/", |
| | "label": "http://ex.org/label" |
| } | | } |
| </syntaxhighlight>
| | json_obj['@context'] = context |
| | json_str = json.dumps(json_obj) |
|
| |
|
| | g = rdflib.Graph() |
| | g.parse(data=json_str, format='json-ld') |
|
| |
|
| ===Using parameters/variables in rdflib queries===
| | # To extract triples (here with labels): |
|
| |
|
| <syntaxhighlight> | | r = g.query(""" |
| from rdflib import Graph, Namespace, URIRef
| | SELECT ?s ?sLabel ?p ?o ?oLabel WHERE { |
| from rdflib.plugins.sparql import prepareQuery
| | ?edge |
| | <http://ex.org/s/> ?s ; |
| | <http://ex.org/p/> ?p ; |
| | <http://ex.org/o/> ?o . |
| | ?s <http://ex.org/label> ?sLabel . |
| | ?o <http://ex.org/label> ?oLabel . |
| | } |
| | """, initNs={'cn': CN_BASE}) |
| | print(r.serialize(format='txt').decode()) |
|
| |
|
| g = Graph()
| | # Construct a new graph: |
| ex = Namespace("http://example.org/")
| |
| g.bind("ex", ex)
| |
|
| |
|
| g.add((ex.Cade, ex.livesIn, ex.France)) | | r = g.query(""" |
| g.add((ex.Anne, ex.livesIn, ex.Norway))
| | CONSTRUCT { |
| g.add((ex.Sofie, ex.livesIn, ex.Sweden))
| | ?s ?p ?o . |
| g.add((ex.Per, ex.livesIn, ex.Norway))
| | ?s <http://ex.org/label> ?sLabel . |
| g.add((ex.John, ex.livesIn, ex.USA))
| | ?o <http://ex.org/label> ?oLabel . |
| | | } WHERE { |
| | | ?edge <http://ex.org/s/> ?s ; |
| def find_people_from_country(country):
| | <http://ex.org/p/> ?p ; |
| country = URIRef(ex + country)
| | <http://ex.org/o/> ?o . |
| q = prepareQuery(
| | ?s <http://ex.org/label> ?sLabel . |
| """
| | ?o <http://ex.org/label> ?oLabel . |
| PREFIX ex: <http://example.org/>
| | } |
| SELECT ?person WHERE {
| | """, initNs={'cn': CN_BASE}) |
| ?person ex:livesIn ?country.
| |
| }
| |
| """) | |
|
| |
|
| capital_result = g.query(q, initBindings={'country': country})
| | print(r.graph.serialize(format='ttl')) |
|
| |
|
| for row in capital_result:
| |
| print(row)
| |
|
| |
| find_people_from_country("Norway")
| |
| </syntaxhighlight> | | </syntaxhighlight> |
|
| |
|
| ===SELECTING data from Blazegraph via Python=== | | =SHACL (Lab 9)= |
| <syntaxhighlight>
| |
|
| |
|
| from SPARQLWrapper import SPARQLWrapper, JSON
| | <syntaxhighlight lang="Python"> |
|
| |
|
| # This creates a server connection to the same URL that contains the graphic interface for Blazegraph.
| | from pyshacl import validate |
| # You also need to add "sparql" to end of the URL like below.
| | from rdflib import Graph |
|
| |
|
| sparql = SPARQLWrapper("http://localhost:9999/blazegraph/sparql")
| | data_graph = Graph() |
| | # parses the Turtle example from the task |
| | data_graph.parse("data_graph.ttl") |
|
| |
|
| # SELECT all triples in the database. | | prefixes = """ |
| | @prefix ex: <http://example.org/> . |
| | @prefix foaf: <http://xmlns.com/foaf/0.1/> . |
| | @prefix sh: <http://www.w3.org/ns/shacl#> . |
| | @prefix xsd: <http://www.w3.org/2001/XMLSchema#> . |
| | @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . |
| | """ |
|
| |
|
| sparql.setQuery("""
| | shape_graph = """ |
| SELECT DISTINCT ?p WHERE { | | ex:PUI_Shape |
| ?s ?p ?o. | | a sh:NodeShape ; |
| } | | sh:targetClass ex:PersonUnderInvestigation ; |
| """)
| | sh:property [ |
| sparql.setReturnFormat(JSON)
| | sh:path foaf:name ; |
| results = sparql.query().convert()
| | sh:minCount 1 ; #Every person under investigation has exactly one name. |
| | sh:maxCount 1 ; #Every person under investigation has exactly one name. |
| | sh:datatype rdf:langString ; #All person names must be language-tagged |
| | ] ; |
| | sh:property [ |
| | sh:path ex:chargedWith ; |
| | sh:nodeKind sh:IRI ; #The object of a charged with property must be a URI. |
| | sh:class ex:Offense ; #The object of a charged with property must be an offense. |
| | ] . |
|
| |
|
| for result in results["results"]["bindings"]: | | # --- If you have more time tasks --- |
| print(result["p"]["value"]) | | ex:User_Shape rdf:type sh:NodeShape; |
| | sh:targetClass ex:Indictment; |
| | # The only allowed values for ex:american are true, false or unknown. |
| | sh:property [ |
| | sh:path ex:american; |
| | sh:pattern "(true|false|unknown)" ; |
| | ]; |
| | |
| | # The value of a property that counts days must be an integer. |
| | sh:property [ |
| | sh:path ex:indictment_days; |
| | sh:datatype xsd:integer; |
| | ]; |
| | sh:property [ |
| | sh:path ex:investigation_days; |
| | sh:datatype xsd:integer; |
| | ]; |
| | |
| | # The value of a property that indicates a start date must be xsd:date. |
| | sh:property [ |
| | sh:path ex:investigation_start; |
| | sh:datatype xsd:date; |
| | ]; |
|
| |
|
| # SELECT all interests of Cade | | # The value of a property that indicates an end date must be xsd:date or unknown (tip: you can use sh:or (...) ). |
| | sh:property [ |
| | sh:path ex:investigation_end; |
| | sh:or ( |
| | [ sh:datatype xsd:date ] |
| | [ sh:hasValue "unknown" ] |
| | )]; |
| | |
| | # Every indictment must have exactly one FOAF name for the investigated person. |
| | sh:property [ |
| | sh:path foaf:name; |
| | sh:minCount 1; |
| | sh:maxCount 1; |
| | ]; |
| | |
| | # Every indictment must have exactly one investigated person property, and that person must have the type ex:PersonUnderInvestigation. |
| | sh:property [ |
| | sh:path ex:investigatedPerson ; |
| | sh:minCount 1 ; |
| | sh:maxCount 1 ; |
| | sh:class ex:PersonUnderInvestigation ; |
| | sh:nodeKind sh:IRI ; |
| | ] ; |
|
| |
|
| sparql.setQuery("""
| | # No URI-s can contain hyphens ('-'). |
| PREFIX ex: <http://example.org/> | | sh:property [ |
| SELECT DISTINCT ?interest WHERE {
| | sh:path ex:outcome ; |
| ex:Cade ex:interest ?interest.
| | sh:nodeKind sh:IRI ; |
| }
| | sh:pattern "^[^-]*$" ; |
| """) | | ] ; |
| sparql.setReturnFormat(JSON)
| |
| results = sparql.query().convert()
| |
|
| |
|
| for result in results["results"]["bindings"]:
| | # Presidents must be identified with URIs. |
| print(result["interest"]["value"]) | | sh:property [ |
| </syntaxhighlight>
| | sh:path ex:president ; |
| | sh:minCount 1 ; |
| | sh:class ex:President ; |
| | sh:nodeKind sh:IRI ; |
| | ] . |
| | """ |
|
| |
|
| ===Updating data from Blazegraph via Python=== | | shacl_graph = Graph() |
| <syntaxhighlight>
| | # parses the contents of a shape_graph you made in the previous task |
| from SPARQLWrapper import SPARQLWrapper, POST, DIGEST
| | shacl_graph.parse(data=prefixes+shape_graph) |
|
| |
|
| namespace = "kb"
| | # uses pySHACL's validate method to apply the shape_graph constraints to the data_graph |
| sparql = SPARQLWrapper("http://localhost:9999/blazegraph/namespace/"+ namespace + "/sparql")
| | results = validate( |
| | data_graph, |
| | shacl_graph=shacl_graph, |
| | inference='both' |
| | ) |
|
| |
|
| sparql.setMethod(POST)
| | # prints out the validation result |
| sparql.setQuery("""
| | boolean_value, results_graph, results_text = results |
| PREFIX ex: <http://example.org/>
| |
| INSERT DATA{
| |
| ex:Cade ex:interest ex:Mathematics.
| |
| }
| |
| """)
| |
|
| |
|
| results = sparql.query()
| | # print(boolean_value) |
| print(results.response.read()) | | print(results_graph.serialize(format='ttl')) |
| | # print(results_text) |
|
| |
|
| | #Write a SPARQL query to print out each distinct sh:resultMessage in the results_graph |
| | distinct_messages = """ |
| | PREFIX sh: <http://www.w3.org/ns/shacl#> |
|
| |
|
| </syntaxhighlight>
| | SELECT DISTINCT ?message WHERE { |
| ===Retrieving data from Wikidata with SparqlWrapper=== | | [] sh:result / sh:resultMessage ?message . |
| <syntaxhighlight>
| | } |
| from SPARQLWrapper import SPARQLWrapper, JSON
| | """ |
| | messages = results_graph.query(distinct_messages) |
| | for row in messages: |
| | print(row.message) |
|
| |
|
| sparql = SPARQLWrapper("https://query.wikidata.org/sparql")
| | #each sh:resultMessage in the results_graph once, along with the number of times that message has been repeated in the results |
| # In the query I want to select all the Vitamins in wikidata. | | count_messages = """ |
| | PREFIX sh: <http://www.w3.org/ns/shacl#> |
|
| |
|
| sparql.setQuery("""
| | SELECT ?message (COUNT(?node) AS ?num_messages) WHERE { |
| SELECT ?nutrient ?nutrientLabel WHERE
| | [] sh:result ?result . |
| { | | ?result sh:resultMessage ?message ; |
| ?nutrient wdt:P279 wd:Q34956.
| | sh:focusNode ?node . |
| SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
| |
| } | | } |
| """)
| | GROUP BY ?message |
| | | ORDER BY DESC(?count) ?message |
| sparql.setReturnFormat(JSON)
| |
| results = sparql.query().convert()
| |
| | |
| for result in results["results"]["bindings"]:
| |
| print(result["nutrient"]["value"], " ", result["nutrientLabel"]["value"])
| |
| </syntaxhighlight>
| |
| | |
| | |
| More examples can be found in the example section on the official query service here: https://query.wikidata.org/.
| |
| | |
| ===Download from BlazeGraph===
| |
| | |
| <syntaxhighlight>
| |
| """
| |
| Dumps a database to a local RDF file.
| |
| You need to install the SPARQLWrapper package first...
| |
| """ | | """ |
|
| |
|
| import datetime
| | messages = results_graph.query(count_messages) |
| from SPARQLWrapper import SPARQLWrapper, RDFXML
| | for row in messages: |
| | print("COUNT MESSAGE") |
| | print(row.num_messages, " ", row.message) |
|
| |
|
| # your namespace, the default is 'kb'
| |
| ns = 'kb'
| |
|
| |
|
| # the SPARQL endpoint
| |
| endpoint = 'http://info216.i2s.uib.no/bigdata/namespace/' + ns + '/sparql'
| |
|
| |
| # - the endpoint just moved, the old one was:
| |
| # endpoint = 'http://i2s.uib.no:8888/bigdata/namespace/' + ns + '/sparql'
| |
|
| |
| # create wrapper
| |
| wrapper = SPARQLWrapper(endpoint)
| |
|
| |
| # prepare the SPARQL update
| |
| wrapper.setQuery('CONSTRUCT { ?s ?p ?o } WHERE { ?s ?p ?o }')
| |
| wrapper.setReturnFormat(RDFXML)
| |
|
| |
| # execute the SPARQL update and convert the result to an rdflib.Graph
| |
| graph = wrapper.query().convert()
| |
|
| |
| # the destination file, with code to make it timestamped
| |
| destfile = 'rdf_dumps/slr-kg4news-' + datetime.datetime.now().strftime('%Y%m%d-%H%M') + '.rdf'
| |
|
| |
| # serialize the result to file
| |
| graph.serialize(destination=destfile, format='ttl')
| |
|
| |
| # report and quit
| |
| print('Wrote %u triples to file %s .' %
| |
| (len(res), destfile))
| |
| </syntaxhighlight> | | </syntaxhighlight> |
|
| |
|
| ===Query Dbpedia with SparqlWrapper=== | | =RDFS (Lab 10)= |
|
| |
|
| <syntaxhighlight> | | <syntaxhighlight lang="Python"> |
| from SPARQLWrapper import SPARQLWrapper, JSON
| |
|
| |
|
| sparql = SPARQLWrapper("http://dbpedia.org/sparql")
| | import owlrl |
| | from rdflib import Graph, RDF, Namespace, Literal, XSD, FOAF, RDFS |
| | from rdflib.collection import Collection |
|
| |
|
| sparql.setQuery("""
| | g = Graph() |
| PREFIX dbr: <http://dbpedia.org/resource/>
| | ex = Namespace('http://example.org/') |
| PREFIX dbo: <http://dbpedia.org/ontology/>
| |
| PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
| |
| SELECT ?comment
| |
| WHERE {
| |
| dbr:Barack_Obama rdfs:comment ?comment.
| |
| FILTER (langMatches(lang(?comment),"en"))
| |
| }
| |
| """)
| |
|
| |
|
| sparql.setReturnFormat(JSON)
| | g.bind("ex", ex) |
| results = sparql.query().convert()
| | g.bind("foaf", FOAF) |
|
| |
|
| for result in results["results"]["bindings"]:
| |
| print(result["comment"]["value"])
| |
| </syntaxhighlight>
| |
|
| |
|
| ==Lifting CSV to RDF== | | NS = { |
| | 'ex': ex, |
| | 'rdf': RDF, |
| | 'rdfs': RDFS, |
| | 'foaf': FOAF, |
| | } |
|
| |
|
| <syntaxhighlight>
| | #Write a small function that computes the RDFS closure on your graph. |
| from rdflib import Graph, Literal, Namespace, URIRef
| | def flush(): |
| from rdflib.namespace import RDF, FOAF, RDFS, OWL
| | engine = owlrl.RDFSClosure.RDFS_Semantics(g, False, False, False) |
| import pandas as pd
| | engine.closure() |
| | engine.flush_stored_triples() |
|
| |
|
| g = Graph() | | #Rick Gates was charged with money laundering and tax evasion. |
| ex = Namespace("http://example.org/") | | g.add((ex.Rick_Gates, ex.chargedWith, ex.MoneyLaundering)) |
| g.bind("ex", ex) | | g.add((ex.Rick_Gates, ex.chargedWith, ex.TaxEvasion)) |
|
| |
|
| # Load the CSV data as a pandas Dataframe. | | #When one thing that is charged with another thing, |
| csv_data = pd.read_csv("task1.csv")
| | g.add((ex.chargedWith, RDFS.domain, ex.PersonUnderInvestigation)) #the first thing (subject) is a person under investigation and |
| | g.add((ex.chargedWith, RDFS.range, ex.Offense)) #the second thing (object) is an offense. |
|
| |
|
| # Here I deal with spaces (" ") in the data. I replace them with "_" so that URI's become valid. | | #Write a SPARQL query that checks the RDF type(s) of Rick Gates and money laundering in your RDF graph. |
| csv_data = csv_data.replace(to_replace=" ", value="_", regex=True)
| | print(g.query('ASK {ex:Rick_Gates rdf:type ex:PersonUnderInvestigation}', initNs=NS).askAnswer) |
| | print(g.query('ASK {ex:MoneyLaundering rdf:type ex:Offense}', initNs=NS).askAnswer) |
| | flush() |
| | print(g.query('ASK {ex:Rick_Gates rdf:type ex:PersonUnderInvestigation}', initNs=NS).askAnswer) |
| | print(g.query('ASK {ex:MoneyLaundering rdf:type ex:Offense}', initNs=NS).askAnswer) |
|
| |
|
| # Here I mark all missing/empty data as "unknown". This makes it easy to delete triples containing this later. | | #A person under investigation is a FOAF person |
| csv_data = csv_data.fillna("unknown")
| | g.add((ex.PersonUnderInvestigation, RDFS.subClassOf, FOAF.Person)) |
| | print(g.query('ASK {ex:Rick_Gates rdf:type foaf:Person}', initNs=NS).askAnswer) |
| | flush() |
| | print(g.query('ASK {ex:Rick_Gates rdf:type foaf:Person}', initNs=NS).askAnswer) |
|
| |
|
| # Loop through the CSV data, and then make RDF triples. | | #Paul Manafort was convicted for tax evasion. |
| for index, row in csv_data.iterrows(): | | g.add((ex.Paul_Manafort, ex.convictedFor, ex.TaxEvasion)) |
| # The names of the people act as subjects.
| | #the first thing is also charged with the second thing |
| subject = row['Name']
| | g.add((ex.convictedFor, RDFS.subPropertyOf, ex.chargedWith)) |
| # Create triples: e.g. "Cade_Tracey - age - 27"
| | flush() |
| g.add((URIRef(ex + subject), URIRef(ex + "age"), Literal(row["Age"])))
| | print(g.query('ASK {ex:Paul_Manafort ex:chargedWith ex:TaxEvasion}', initNs=NS).askAnswer) |
| g.add((URIRef(ex + subject), URIRef(ex + "married"), URIRef(ex + row["Spouse"])))
| |
| g.add((URIRef(ex + subject), URIRef(ex + "country"), URIRef(ex + row["Country"])))
| |
|
| |
|
| # If We want can add additional RDF/RDFS/OWL information e.g
| | print(g.serialize()) |
| g.add((URIRef(ex + subject), RDF.type, FOAF.Person))
| |
|
| |
|
| # I remove triples that I marked as unknown earlier.
| |
| g.remove((None, None, URIRef("http://example.org/unknown")))
| |
|
| |
| # Clean printing of the graph.
| |
| print(g.serialize(format="turtle").decode())
| |
| </syntaxhighlight> | | </syntaxhighlight> |
|
| |
|
| ===CSV file for above example=== | | =OWL 1 (Lab 11)= |
| | <syntaxhighlight lang="Python"> |
|
| |
|
| <syntaxhighlight>
| | from rdflib import Graph, RDFS, Namespace, RDF, FOAF, BNode, OWL, URIRef, Literal, XSD |
| "Name","Age","Spouse","Country"
| | from rdflib.collection import Collection |
| "Cade Tracey","26","Mary Jackson","US"
| | import owlrl |
| "Bob Johnson","21","","Canada"
| |
| "Mary Jackson","25","","France"
| |
| "Phil Philips","32","Catherine Smith","Japan"
| |
| </syntaxhighlight>
| |
| | |
| | |
| =Coding Tasks Lab 6=
| |
| <syntaxhighlight>
| |
| import pandas as pd | |
|
| |
|
| | g = Graph() |
| | ex = Namespace('http://example.org/') |
| | schema = Namespace('http://schema.org/') |
| | dbr = Namespace('https://dbpedia.org/page/') |
|
| |
|
| from rdflib import Graph, Namespace, URIRef, Literal, BNode
| |
| from rdflib.namespace import RDF, XSD
| |
|
| |
|
| |
| ex = Namespace("http://example.org/")
| |
| sem = Namespace("http://semanticweb.cs.vu.nl/2009/11/sem/")
| |
|
| |
| g = Graph()
| |
| g.bind("ex", ex) | | g.bind("ex", ex) |
| g.bind("sem", sem) | | # g.bind("schema", schema) |
| | g.bind("foaf", FOAF) |
|
| |
|
| | # Donald Trump and Robert Mueller are two different persons. |
| | g.add((ex.Donald_Trump, OWL.differentFrom, ex.Robert_Mueller)) |
|
| |
|
| # Removing unwanted characters | | # Actually, all the names mentioned in connection with the Mueller investigation refer to different people. |
| df = pd.read_csv('russia-investigation.csv')
| | b1 = BNode() |
| # Here I deal with spaces (" ") in the data. I replace them with "_" so that URI's become valid.
| | b2 = BNode() |
| df = df.replace(to_replace=" ", value="_", regex=True)
| | Collection(g, b2, [ex.Robert_Mueller, ex.Paul_Manafort, ex.Rick_Gates, ex.George_Papadopoulos, ex.Michael_Flynn, ex.Michael_Cohen, ex.Roger_Stone, ex.Donald_Trump]) |
| # This may seem odd, but in the data set we have a name like this:("Scooter"). So we have to remove quotation marks
| | g.add((b1, RDF.type, OWL.AllDifferent)) |
| df = df.replace(to_replace=f'"', value="", regex=True)
| | g.add((b1, OWL.distinctMembers, b2)) |
| # # Here I mark all missing/empty data as "unknown". This makes it easy to delete triples containing this later.
| |
| df = df.fillna("unknown")
| |
|
| |
|
| # Loop through the CSV data, and then make RDF triples. | | # All these people are foaf:Persons as well as schema:Persons |
| for index, row in df.iterrows():
| | g.add((FOAF.Person, OWL.equivalentClass, schema.Person)) |
| name = row['investigation']
| |
| investigation = URIRef(ex + name)
| |
| g.add((investigation, RDF.type, sem.Event))
| |
| investigation_start = row["investigation-start"]
| |
| g.add((investigation, sem.hasBeginTimeStamp, Literal(
| |
| investigation_start, datatype=XSD.datetime)))
| |
| investigation_end = row["investigation-end"]
| |
| g.add((investigation, sem.hasEndTimeStamp, Literal(
| |
| investigation_end, datatype=XSD.datetime)))
| |
| investigation_end = row["investigation-days"]
| |
| g.add((investigation, sem.hasXSDDuration, Literal(
| |
| investigation_end, datatype=XSD.Days)))
| |
| person = row["name"]
| |
| person = URIRef(ex + person)
| |
| g.add((investigation, sem.Actor, person))
| |
| result = row['type']
| |
| g.add((investigation, sem.hasSubEvent, Literal(result, datatype=XSD.string)))
| |
| overturned = row["overturned"]
| |
| g.add((investigation, ex.overtuned, Literal(overturned, datatype=XSD.boolean)))
| |
| pardoned = row["pardoned"]
| |
| g.add((investigation, ex.pardon, Literal(pardoned, datatype=XSD.boolean)))
| |
|
| |
|
| g.serialize("output.ttl", format="ttl") | | # Tax evation is a kind of bank and tax fraud. |
| print(g.serialize(format="turtle"))
| | g.add((ex.TaxEvation, RDFS.subClassOf, ex.BankFraud)) |
| | g.add((ex.TaxEvation, RDFS.subClassOf, ex.TaxFraud)) |
|
| |
|
| | # The Donald Trump involved in the Mueller investigation is dbpedia:Donald_Trump and not dbpedia:Donald_Trump_Jr. |
| | g.add((ex.Donald_Trump, OWL.sameAs, dbr.Donald_Trump)) |
| | g.add((ex.Donald_Trump, OWL.differentFrom, URIRef(dbr + "Donald_Trump_Jr."))) |
|
| |
|
| </syntaxhighlight>
| | # Congress, FBI and the Mueller investigation are foaf:Organizations. |
| | g.add((ex.Congress, RDF.type, FOAF.Organization)) |
| | g.add((ex.FBI, RDF.type, FOAF.Organization)) |
| | g.add((ex.Mueller_Investigation, RDF.type, FOAF.Organization)) |
|
| |
|
| ==RDFS==
| | # Nothing can be both a person and an organization. |
| | g.add((FOAF.Person, OWL.disjointWith, FOAF.Organization)) |
|
| |
|
| ===RDFS-plus (OWL) Properties===
| | # Leading an organization is a way of being involved in an organization. |
| <syntaxhighlight>
| | g.add((ex.leading, RDFS.subPropertyOf, ex.involved)) |
| g.add((ex.married, RDF.type, OWL.SymmetricProperty))
| |
| g.add((ex.married, RDF.type, OWL.IrreflexiveProperty)) | |
| g.add((ex.livesWith, RDF.type, OWL.ReflexiveProperty))
| |
| g.add((ex.livesWith, RDF.type, OWL.SymmetricProperty))
| |
| g.add((ex.sibling, RDF.type, OWL.TransitiveProperty))
| |
| g.add((ex.sibling, RDF.type, OWL.SymmetricProperty))
| |
| g.add((ex.sibling, RDF.type, OWL.IrreflexiveProperty))
| |
| g.add((ex.hasFather, RDF.type, OWL.FunctionalProperty))
| |
| g.add((ex.hasFather, RDF.type, OWL.AsymmetricProperty))
| |
| g.add((ex.hasFather, RDF.type, OWL.IrreflexiveProperty))
| |
| g.add((ex.fatherOf, RDF.type, OWL.AsymmetricProperty))
| |
| g.add((ex.fatherOf, RDF.type, OWL.IrreflexiveProperty))
| |
| | |
| # Sometimes there is no definite answer, and it comes down to how we want to model our properties
| |
| # e.g is livesWith a transitive property? Usually yes, but we can also want to specify that a child lives with both of her divorced parents.
| |
| # which means that: (mother livesWith child % child livesWith father) != mother livesWith father. Which makes it non-transitive.
| |
| </syntaxhighlight>
| |
|
| |
|
| <!--
| | # Being a campaign manager or an advisor for is a way of supporting someone. |
| ==Lifting XML to RDF==
| | g.add((ex.campaignManagerTo, RDFS.subPropertyOf, ex.supports)) |
| <syntaxhighlight>
| | g.add((ex.advisorTo, RDFS.subPropertyOf, ex.supports)) |
| from rdflib import Graph, Literal, Namespace, URIRef
| |
| from rdflib.namespace import RDF, XSD, RDFS
| |
| import xml.etree.ElementTree as ET
| |
|
| |
|
| g = Graph() | | # Donald Trump is a politician and a Republican. |
| ex = Namespace("http://example.org/TV/") | | g.add((ex.Donald_Trump, RDF.type, ex.Politician)) |
| prov = Namespace("http://www.w3.org/ns/prov#")
| | g.add((ex.Donald_Trump, RDF.type, ex.Republican)) |
| g.bind("ex", ex) | |
| g.bind("prov", prov)
| |
|
| |
|
| tree = ET.parse("tv_shows.xml")
| | # A Republican politician is both a politician and a Republican. |
| root = tree.getroot()
| | g.add((ex.RepublicanPolitician, RDFS.subClassOf, ex.Politician)) |
| | g.add((ex.RepublicanPolitician, RDFS.subClassOf, ex.Republican)) |
|
| |
|
| for tv_show in root.findall('tv_show'):
| | #hasBusinessPartner |
| show_id = tv_show.attrib["id"]
| | g.add((ex.Paul_Manafort, ex.hasBusinessPartner, ex.Rick_Gates)) |
| title = tv_show.find("title").text
| | g.add((ex.hasBusinessPartner, RDF.type, OWL.SymmetricProperty)) |
| | g.add((ex.hasBusinessPartner, RDF.type, OWL.IrreflexiveProperty)) |
|
| |
|
| g.add((URIRef(ex + show_id), ex.title, Literal(title, datatype=XSD.string)))
| | #adviserTo |
| g.add((URIRef(ex + show_id), RDF.type, ex.TV_Show))
| | g.add((ex.Michael_Flynn, ex.adviserTo, ex.Donald_Trump)) |
| | g.add((ex.adviserTo, RDF.type, OWL.IrreflexiveProperty)) |
| | # Not necessarily asymmetric as it's not a given that they couldn't be advisors to each other |
|
| |
|
| for actor in tv_show.findall("actor"):
| | #wasLyingTo |
| first_name = actor.find("firstname").text
| | g.add((ex.Rick_Gates_Lying, ex.wasLyingTo, ex.FBI)) |
| last_name = actor.find("lastname").text
| | g.add((ex.wasLyingTo, RDF.type, OWL.IrreflexiveProperty)) |
| full_name = first_name + "_" + last_name
| | # Not asymmetric as the subject and object could lie to each other; also in this context, the FBI can lie to you |
|
| |
| g.add((URIRef(ex + show_id), ex.stars, URIRef(ex + full_name)))
| |
| g.add((URIRef(ex + full_name), ex.starsIn, URIRef(title)))
| |
| g.add((URIRef(ex + full_name), RDF.type, ex.Actor))
| |
|
| |
|
| print(g.serialize(format="turtle").decode())
| | #presidentOf |
| </syntaxhighlight>
| | g.add((ex.Donald_Trump, ex.presidentOf, ex.USA)) |
| | g.add((ex.presidentOf, RDF.type, OWL.AsymmetricProperty)) |
| | g.add((ex.presidentOf, RDF.type, OWL.IrreflexiveProperty)) |
| | g.add((ex.presidentOf, RDF.type, OWL.FunctionalProperty)) #can only be president of one country |
| | #not inversefunctionalproperty as Bosnia has 3 presidents https://www.culturalworld.org/do-any-countries-have-more-than-one-president.htm |
|
| |
|
| | #hasPresident |
| | g.add((ex.USA, ex.hasPresident, ex.Donald_Trump)) |
| | g.add((ex.hasPresident, RDF.type, OWL.AsymmetricProperty)) |
| | g.add((ex.hasPresident, RDF.type, OWL.IrreflexiveProperty)) |
| | g.add((ex.hasPresident, RDF.type, OWL.InverseFunctionalProperty)) #countries do not share their president with another |
| | #not functionalproperty as a country (Bosnia) can have more than one president |
|
| |
|
| | #Closure |
| | owlrl.DeductiveClosure(owlrl.OWLRL_Semantics).expand(g) |
|
| |
|
| | #Serialization |
| | print(g.serialize(format="ttl")) |
| | # g.serialize("lab8.xml", format="xml") #serializes to XML file |
|
| |
|
| ===RDFS inference with RDFLib===
| |
| You can use the OWL-RL package to add inference capabilities to RDFLib. It can be installed using the pip install command:
| |
| <syntaxhighlight>
| |
| pip install owlrl
| |
| </syntaxhighlight> | | </syntaxhighlight> |
| Or download it from [https://github.com/RDFLib/OWL-RL GitHub] and copy the ''owlrl'' subfolder into your project folder next to your Python files.
| |
|
| |
|
| [https://owl-rl.readthedocs.io/en/latest/owlrl.html OWL-RL documentation.]
| | =OWL 2 (Lab 12)= |
| | <syntaxhighlight lang="Python"> |
|
| |
|
| Example program to get you started. In this example we are creating the graph using sparql.update, but it is also possible to parse the data from a file.
| | @prefix : <http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#> . |
| <syntaxhighlight> | | @prefix dc: <http://purl.org/dc/terms#> . |
| import rdflib.plugins.sparql.update
| | @prefix io: <http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#> . |
| import owlrl.RDFSClosure
| | @prefix dbr: <http://dbpedia.org/resource/> . |
| | @prefix owl: <http://www.w3.org/2002/07/owl#> . |
| | @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . |
| | @prefix xml: <http://www.w3.org/XML/1998/namespace> . |
| | @prefix xsd: <http://www.w3.org/2001/XMLSchema#> . |
| | @prefix foaf: <http://xmlns.com/foaf/0.1/> . |
| | @prefix prov: <http://www.w3.org/ns/prov#> . |
| | @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> . |
| | @base <http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#> . |
|
| |
|
| g = rdflib.Graph()
| | <http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology> rdf:type owl:Ontology . |
|
| |
|
| ex = rdflib.Namespace('http://example.org#')
| | ################################################################# |
| g.bind('', ex)
| | # Object Properties |
| | ################################################################# |
|
| |
|
| g.update("""
| | ### http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#indictedIn |
| PREFIX ex: <http://example.org#>
| | io:indictedIn rdf:type owl:ObjectProperty ; |
| PREFIX owl: <http://www.w3.org/2002/07/owl#>
| | rdfs:subPropertyOf io:involvedIn ; |
| INSERT DATA {
| | rdfs:domain io:InvestigatedPerson ; |
| ex:Socrates rdf:type ex:Man .
| | rdfs:range io:Investigation . |
| ex:Man rdfs:subClassOf ex:Mortal .
| |
| }""")
| |
|
| |
|
| rdfs = owlrl.RDFSClosure.RDFS_Semantics(g, False, False, False)
| |
| # RDF_Semantics parameters:
| |
| # - graph (rdflib.Graph) – The RDF graph to be extended.
| |
| # - axioms (bool) – Whether (non-datatype) axiomatic triples should be added or not.
| |
| # - daxioms (bool) – Whether datatype axiomatic triples should be added or not.
| |
| # - rdfs (bool) – Whether RDFS inference is also done (used in subclassed only).
| |
| # For now, you will in most cases use all False in RDFS_Semtantics.
| |
|
| |
|
| # Generates the closure of the graph - generates the new entailed triples, but does not add them to the graph. | | ### http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#investigating |
| rdfs.closure() | | io:investigating rdf:type owl:ObjectProperty ; |
| # Adds the new triples to the graph and empties the RDFS triple-container.
| | rdfs:subPropertyOf io:involvedIn ; |
| rdfs.flush_stored_triples() | | rdfs:domain io:Investigator ; |
| | rdfs:range io:Investigation . |
|
| |
|
| # Ask-query to check whether a new triple has been generated from the entailment.
| |
| b = g.query("""
| |
| PREFIX ex: <http://example.org#>
| |
| ASK {
| |
| ex:Socrates rdf:type ex:Mortal .
| |
| }
| |
| """)
| |
| print('Result: ' + bool(b))
| |
| </syntaxhighlight>
| |
|
| |
|
| ===Language tagged RDFS labels===
| | ### http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#involvedIn |
| <syntaxhighlight>
| | io:involvedIn rdf:type owl:ObjectProperty ; |
| from rdflib import Graph, Namespace, Literal
| | rdfs:domain foaf:Person ; |
| from rdflib.namespace import RDFS
| | rdfs:range io:Investigation . |
|
| |
|
| g = Graph()
| |
| ex = Namespace("http://example.org/")
| |
|
| |
|
| g.add((ex.France, RDFS.label, Literal("Frankrike", lang="no")))
| | ### http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#leading |
| g.add((ex.France, RDFS.label, Literal("France", lang="en")))
| | io:leading rdf:type owl:ObjectProperty ; |
| g.add((ex.France, RDFS.label, Literal("Francia", lang="es")))
| | rdfs:subPropertyOf io:investigating ; |
| | rdfs:domain io:InvestigationLeader ; |
| | rdfs:range io:Investigation . |
|
| |
|
|
| |
|
| </syntaxhighlight>
| | ################################################################# |
| | # Data properties |
| | ################################################################# |
|
| |
|
| ==OWL==
| | ### http://purl.org/dc/elements/1.1/description |
| ===Basic inference with RDFLib===
| | <http://purl.org/dc/elements/1.1/description> rdf:type owl:DatatypeProperty ; |
| | rdfs:domain io:Investigation ; |
| | rdfs:range xsd:string . |
|
| |
|
| You can use the OWL-RL package again as for Lecture 5.
| |
|
| |
|
| Instead of:
| | ### http://www.w3.org/ns/prov#endedAtTime |
| <syntaxhighlight>
| | prov:endedAtTime rdf:type owl:DatatypeProperty , |
| # The next three lines add inferred triples to g.
| | owl:FunctionalProperty ; |
| rdfs = owlrl.RDFSClosure.RDFS_Semantics(g, False, False, False)
| | rdfs:domain io:Investigation ; |
| rdfs.closure()
| | rdfs:range xsd:dateTime . |
| rdfs.flush_stored_triples()
| |
| </syntaxhighlight>
| |
| you can write this to get both RDFS and basic RDFS Plus / OWL inference:
| |
| <syntaxhighlight>
| |
| # The next three lines add inferred triples to g. | |
| owl = owlrl.CombinedClosure.RDFS_OWLRL_Semantics(g, False, False, False) | |
| owl.closure() | |
| owl.flush_stored_triples()
| |
| </syntaxhighlight>
| |
|
| |
|
| Example updates and queries:
| |
| <syntaxhighlight>
| |
| PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
| |
| PREFIX owl: <http://www.w3.org/2002/07/owl#>
| |
| PREFIX ex: <http://example.org#>
| |
|
| |
|
| INSERT DATA {
| | ### http://www.w3.org/ns/prov#startedAtTime |
| ex:Socrates ex:hasWife ex:Xanthippe .
| | prov:startedAtTime rdf:type owl:DatatypeProperty , |
| ex:hasHusband owl:inverseOf ex:hasWife .
| | owl:FunctionalProperty ; |
| }
| | rdfs:domain io:Investigation ; |
| </syntaxhighlight>
| | rdfs:range xsd:dateTime . |
|
| |
|
| <syntaxhighlight>
| |
| ASK {
| |
| ex:Xanthippe ex:hasHusband ex:Socrates .
| |
| }
| |
| </syntaxhighlight>
| |
|
| |
|
| <syntaxhighlight>
| | ### http://xmlns.com/foaf/0.1/name |
| ASK {
| | foaf:name rdf:type owl:DatatypeProperty ; |
| ex:Socrates ^ex:hasHusband ex:Xanthippe .
| | rdfs:domain foaf:Person ; |
| }
| | rdfs:range xsd:string . |
| </syntaxhighlight>
| |
|
| |
|
| <syntaxhighlight>
| |
| INSERT DATA {
| |
| ex:hasWife rdfs:subPropertyOf ex:hasSpouse .
| |
| ex:hasSpouse rdf:type owl:SymmetricProperty .
| |
| }
| |
| </syntaxhighlight>
| |
|
| |
|
| <syntaxhighlight>
| | ### http://xmlns.com/foaf/0.1/title |
| ASK {
| | foaf:title rdf:type owl:DatatypeProperty ; |
| ex:Socrates ex:hasSpouse ex:Xanthippe .
| | rdfs:domain io:Investigation ; |
| }
| | rdfs:range xsd:string . |
| </syntaxhighlight>
| |
|
| |
|
| <syntaxhighlight>
| |
| ASK {
| |
| ex:Socrates ^ex:hasSpouse ex:Xanthippe .
| |
| }
| |
| </syntaxhighlight>
| |
|
| |
|
| | ################################################################# |
| | # Classes |
| | ################################################################# |
|
| |
|
| | ### http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#InvestigatedPerson |
| | io:InvestigatedPerson rdf:type owl:Class ; |
| | rdfs:subClassOf io:Person ; |
| | owl:disjointWith io:Investigator . |
|
| |
|
|
| |
|
| | ### http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#Investigation |
| | io:Investigation rdf:type owl:Class . |
|
| |
|
|
| |
|
| ===XML Data for above example===
| | ### http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#InvestigationLeader |
| <syntaxhighlight>
| | io:InvestigationLeader rdf:type owl:Class ; |
| <data>
| | rdfs:subClassOf io:Investigator . |
| <tv_show id="1050">
| |
| <title>The_Sopranos</title>
| |
| <actor>
| |
| <firstname>James</firstname>
| |
| <lastname>Gandolfini</lastname>
| |
| </actor>
| |
| </tv_show>
| |
| <tv_show id="1066">
| |
| <title>Seinfeld</title>
| |
| <actor>
| |
| <firstname>Jerry</firstname>
| |
| <lastname>Seinfeld</lastname>
| |
| </actor>
| |
| <actor>
| |
| <firstname>Julia</firstname>
| |
| <lastname>Louis-dreyfus</lastname>
| |
| </actor>
| |
| <actor>
| |
| <firstname>Jason</firstname>
| |
| <lastname>Alexander</lastname>
| |
| </actor>
| |
| </tv_show>
| |
| </data>
| |
| </syntaxhighlight>
| |
|
| |
|
| ==Lifting HTML to RDF==
| |
| <syntaxhighlight>
| |
| from bs4 import BeautifulSoup as bs, NavigableString
| |
| from rdflib import Graph, URIRef, Namespace
| |
| from rdflib.namespace import RDF
| |
|
| |
|
| g = Graph()
| | ### http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#Investigator |
| ex = Namespace("http://example.org/")
| | io:Investigator rdf:type owl:Class ; |
| g.bind("ex", ex)
| | rdfs:subClassOf io:Person . |
| | |
| html = open("tv_shows.html").read()
| |
| html = bs(html, features="html.parser")
| |
|
| |
|
| shows = html.find_all('li', attrs={'class': 'show'})
| |
| for show in shows:
| |
| title = show.find("h3").text
| |
| actors = show.find('ul', attrs={'class': 'actor_list'})
| |
| for actor in actors:
| |
| if isinstance(actor, NavigableString):
| |
| continue
| |
| else:
| |
| actor = actor.text.replace(" ", "_")
| |
| g.add((URIRef(ex + title), ex.stars, URIRef(ex + actor)))
| |
| g.add((URIRef(ex + actor), RDF.type, ex.Actor))
| |
|
| |
|
| g.add((URIRef(ex + title), RDF.type, ex.TV_Show))
| | ### http://www.semanticweb.org/bruker/ontologies/2023/2/InvestigationOntology#Person |
| | io:Person rdf:type owl:Class ; |
| | rdfs:subClassOf foaf:Person . |
|
| |
|
|
| |
|
| print(g.serialize(format="turtle").decode())
| | ### http://xmlns.com/foaf/0.1/Person |
| </syntaxhighlight>
| | foaf:Person rdf:type owl:Class . |
|
| |
|
| ===HTML code for the example above===
| |
| <syntaxhighlight>
| |
| <!DOCTYPE html>
| |
| <html>
| |
| <head>
| |
| <meta charset="utf-8">
| |
| <title></title>
| |
| </head>
| |
| <body>
| |
| <div class="tv_shows">
| |
| <ul>
| |
| <li class="show">
| |
| <h3>The_Sopranos</h3>
| |
| <div class="irrelevant_data"></div>
| |
| <ul class="actor_list">
| |
| <li>James Gandolfini</li>
| |
| </ul>
| |
| </li>
| |
| <li class="show">
| |
| <h3>Seinfeld</h3>
| |
| <div class="irrelevant_data"></div>
| |
| <ul class="actor_list">
| |
| <li >Jerry Seinfeld</li>
| |
| <li>Jason Alexander</li>
| |
| <li>Julia Louis-Dreyfus</li>
| |
| </ul>
| |
| </li>
| |
| </ul>
| |
| </div>
| |
| </body>
| |
| </html>
| |
| </syntaxhighlight>
| |
|
| |
| ==Web APIs with JSON==
| |
| <syntaxhighlight>
| |
| import requests
| |
| import json
| |
| import pprint
| |
|
| |
| # Retrieve JSON data from API service URL. Then load it with the json library as a json object.
| |
| url = "http://api.geonames.org/postalCodeLookupJSON?postalcode=46020&#country=ES&username=demo"
| |
| data = requests.get(url).content.decode("utf-8")
| |
| data = json.loads(data)
| |
| pprint.pprint(data)
| |
| </syntaxhighlight>
| |
|
| |
|
| | ################################################################# |
| | # Individuals |
| | ################################################################# |
|
| |
|
| ==JSON-LD==
| | ### http://dbpedia.org/resource/Donald_Trump |
| | dbr:Donald_Trump rdf:type owl:NamedIndividual ; |
| | foaf:name "Donald Trump" . |
|
| |
|
| <syntaxhighlight>
| |
| import rdflib
| |
|
| |
|
| g = rdflib.Graph()
| | ### http://dbpedia.org/resource/Elizabeth_Prelogar |
| | dbr:Elizabeth_Prelogar rdf:type owl:NamedIndividual ; |
| | io:investigating <http://dbpedia.org/resource/Special_Counsel_investigation_(2017–2019)> ; |
| | foaf:name "Elizabeth Prelogar" . |
|
| |
|
| example = """
| |
| {
| |
| "@context": {
| |
| "name": "http://xmlns.com/foaf/0.1/name",
| |
| "homepage": {
| |
| "@id": "http://xmlns.com/foaf/0.1/homepage",
| |
| "@type": "@id"
| |
| }
| |
| },
| |
| "@id": "http://me.markus-lanthaler.com/",
| |
| "name": "Markus Lanthaler",
| |
| "homepage": "http://www.markus-lanthaler.com/"
| |
| }
| |
| """
| |
|
| |
|
| # json-ld parsing automatically deals with @contexts | | ### http://dbpedia.org/resource/Michael_Flynn |
| g.parse(data=example, format='json-ld')
| | dbr:Michael_Flynn rdf:type owl:NamedIndividual ; |
| | foaf:name "Michael Flynn" . |
|
| |
|
| # serialisation does expansion by default
| |
| for line in g.serialize(format='json-ld').decode().splitlines():
| |
| print(line)
| |
|
| |
|
| # by supplying a context object, serialisation can do compaction | | ### http://dbpedia.org/resource/Paul_Manafort |
| context = {
| | dbr:Paul_Manafort rdf:type owl:NamedIndividual ; |
| "foaf": "http://xmlns.com/foaf/0.1/"
| | io:indictedIn <http://dbpedia.org/resource/Special_Counsel_investigation_(2017–2019)> ; |
| }
| | foaf:name "Paul Manafort" . |
| for line in g.serialize(format='json-ld', context=context).decode().splitlines():
| |
| print(line)
| |
| </syntaxhighlight>
| |
|
| |
|
|
| |
|
| <div class="credits" style="text-align: right; direction: ltr; margin-left: 1em;">''INFO216, UiB, 2017-2020. All code examples are [https://creativecommons.org/choose/zero/ CC0].'' </div>
| | ### http://dbpedia.org/resource/Robert_Mueller |
| | dbr:Robert_Mueller rdf:type owl:NamedIndividual ; |
| | io:leading <http://dbpedia.org/resource/Special_Counsel_investigation_(2017–2019)> ; |
| | foaf:name "Robert Mueller" . |
|
| |
|
| ==OWL - Complex Classes and Restrictions==
| |
| <syntaxhighlight>
| |
| import owlrl
| |
| from rdflib import Graph, Literal, Namespace, BNode
| |
| from rdflib.namespace import RDF, OWL, RDFS
| |
| from rdflib.collection import Collection
| |
|
| |
|
| g = Graph()
| | ### http://dbpedia.org/resource/Roger_Stone |
| ex = Namespace("http://example.org/")
| | dbr:Roger_Stone rdf:type owl:NamedIndividual ; |
| g.bind("ex", ex)
| | foaf:name "Roger Stone" . |
| g.bind("owl", OWL)
| |
|
| |
|
| # a Season is either Autumn, Winter, Spring, Summer
| |
| seasons = BNode()
| |
| Collection(g, seasons, [ex.Winter, ex.Autumn, ex.Spring, ex.Summer])
| |
| g.add((ex.Season, OWL.oneOf, seasons))
| |
|
| |
|
| # A Parent is a Father or Mother | | ### http://dbpedia.org/resource/Special_Counsel_investigation_(2017–2019) |
| b = BNode()
| | <http://dbpedia.org/resource/Special_Counsel_investigation_(2017–2019)> rdf:type owl:NamedIndividual ; |
| Collection(g, b, [ex.Father, ex.Mother])
| | foaf:title "Mueller Investigation" . |
| g.add((ex.Parent, OWL.unionOf, b))
| |
|
| |
|
| # A Woman is a person who has the "female" gender
| |
| br = BNode()
| |
| g.add((br, RDF.type, OWL.Restriction))
| |
| g.add((br, OWL.onProperty, ex.gender))
| |
| g.add((br, OWL.hasValue, ex.Female))
| |
| bi = BNode()
| |
| Collection(g, bi, [ex.Person, br])
| |
| g.add((ex.Woman, OWL.intersectionOf, bi))
| |
|
| |
|
| # A vegetarian is a Person who only eats vegetarian food | | ################################################################# |
| br = BNode()
| | # General axioms |
| g.add((br, RDF.type, OWL.Restriction))
| | ################################################################# |
| g.add((br, OWL.onProperty, ex.eats))
| |
| g.add((br, OWL.allValuesFrom, ex.VeganFood))
| |
| bi = BNode()
| |
| Collection(g, bi, [ex.Person, br])
| |
| g.add((ex.Vegetarian, OWL.intersectionOf, bi))
| |
|
| |
|
| # A vegetarian is a Person who can not eat meat.
| | [ rdf:type owl:AllDifferent ; |
| br = BNode()
| | owl:distinctMembers ( dbr:Donald_Trump |
| g.add((br, RDF.type, OWL.Restriction))
| | dbr:Elizabeth_Prelogar |
| g.add((br, OWL.onProperty, ex.eats))
| | dbr:Michael_Flynn |
| g.add((br, OWL.QualifiedCardinality, Literal(0)))
| | dbr:Paul_Manafort |
| g.add((br, OWL.onClass, ex.Meat))
| | dbr:Robert_Mueller |
| bi = BNode()
| | dbr:Roger_Stone |
| Collection(g, bi, [ex.Person, br])
| | ) |
| g.add((ex.Vegetarian, OWL.intersectionOf, bi))
| | ] . |
|
| |
|
| # A Worried Parent is a parent who has at least one sick child
| |
| br = BNode()
| |
| g.add((br, RDF.type, OWL.Restriction))
| |
| g.add((br, OWL.onProperty, ex.hasChild))
| |
| g.add((br, OWL.QualifiedMinCardinality, Literal(1)))
| |
| g.add((br, OWL.onClass, ex.Sick))
| |
| bi = BNode()
| |
| Collection(g, bi, [ex.Parent, br])
| |
| g.add((ex.WorriedParent, OWL.intersectionOf, bi))
| |
|
| |
|
| # using the restriction above, If we now write...: | | ### Generated by the OWL API (version 4.5.25.2023-02-15T19:15:49Z) https://github.com/owlcs/owlapi |
| g.add((ex.Bob, RDF.type, ex.Parent))
| |
| g.add((ex.Bob, ex.hasChild, ex.John))
| |
| g.add((ex.John, RDF.type, ex.Sick))
| |
| # ...we can infer with owl reasoning that Bob is a worried Parent even though we didn't specify it ourselves because Bob fullfills the restriction and Parent requirements.
| |
|
| |
|
| </syntaxhighlight> | | </syntaxhighlight> |
|
| |
|
| ==Protege-OWL reasoning with HermiT== | | =Using Graph Embeddings (Lab 13)= |
| | |
| | https://colab.research.google.com/drive/1WkRJUeUBVF5yVv7o0pOKfsd4pqG6369k |
|
| |
|
| [[:File:DL-reasoning-RoyalFamily-final.owl.txt | Example file]] from Lecture 13 about OWL-DL, rules and reasoning.
| | =Training Graph Embeddings (Lab 14)= |
|
| |
|
| | https://colab.research.google.com/drive/1jKpzlQ7gYTVzgphJsrK5iuMpFhkrY96q |
| --> | | --> |