Examples from the lectures: Difference between revisions

From info216
No edit summary
 
(17 intermediate revisions by the same user not shown)
Line 1: Line 1:
==S07: SHACL==
 
''This page currently shows the examples used in the Spring of 2023.
It will be updated with examples from 2024 as the course progresses.''
 
==Lecture 1: Introduction to KGs==
Turtle example:
<syntaxhighlight>
@prefix ex: <http://example.org/> .
ex:Roger_Stone
    ex:name "Roger Stone" ;
    ex:occupation ex:lobbyist ;
    ex:significant_person ex:Donald_Trump .
ex:Donald_Trump
    ex:name "Donald Trump" .
</syntaxhighlight>
 
<!--
==Lecture 2: RDF==
Blank nodes for anonymity, or when we have not decided on a URI:
<syntaxhighlight lang="Python">
from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD
 
EX = Namespace('http://example.org/')
 
g = Graph()
g.bind('ex', EX)  # this is why the line '@prefix ex: <http://example.org/> .'
                  # and the 'ex.' prefix are used when we print out Turtle later
 
robertMueller = BNode()
g.add((robertMueller, RDF.type, EX.Human))
g.add((robertMueller, FOAF.name, Literal('Robert Mueller', lang='en')))
g.add((robertMueller, EX.position_held, Literal('Director of the Federal Bureau of Investigation', lang='en')))
 
print(g.serialize(format='turtle'))
</syntaxhighlight>
 
Blank nodes used to group related properties:
<syntaxhighlight>
from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD
 
EX = Namespace('http://example.org/')
 
g = Graph()
g.bind('ex', EX)
 
# This is a task in Exercise 2
 
print(g.serialize(format='turtle'))
</syntaxhighlight>
 
Literals:
<syntaxhighlight>
from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD
 
EX = Namespace('http://example.org/')
 
g = Graph()
g.bind('ex', EX)
 
g.add((EX.Robert_Mueller, RDF.type, EX.Human))
g.add((EX.Robert_Mueller, FOAF.name, Literal('Robert Mueller', lang='en')))
g.add((EX.Robert_Mueller, FOAF.name, Literal('رابرت مولر', lang='fa')))
g.add((EX.Robert_Mueller, DC.description, Literal('sixth director of the FBI', datatype=XSD.string)))
g.add((EX.Robert_Mueller, EX.start_time, Literal(2001, datatype=XSD.integer)))
 
print(g.serialize(format='turtle'))
</syntaxhighlight>
 
Alternative container (open):
<syntaxhighlight>
from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD
 
EX = Namespace('http://example.org/')
 
g = Graph()
g.bind('ex', EX)
 
muellerReportArchives = BNode()
g.add((muellerReportArchives, RDF.type, RDF.Alt))
 
archive1 = 'https://archive.org/details/MuellerReportVolume1Searchable/' \
                    'Mueller%20Report%20Volume%201%20Searchable/'
archive2 = 'https://edition.cnn.com/2019/04/18/politics/full-mueller-report-pdf/index.html'
archive3 = 'https://www.politico.com/story/2019/04/18/mueller-report-pdf-download-text-file-1280891'
 
g.add((muellerReportArchives, RDFS.member, Literal(archive1, datatype=XSD.anyURI)))
g.add((muellerReportArchives, RDFS.member, Literal(archive2, datatype=XSD.anyURI)))
g.add((muellerReportArchives, RDFS.member, Literal(archive3, datatype=XSD.anyURI)))
 
g.add((EX.Mueller_Report, RDF.type, FOAF.Document))
g.add((EX.Mueller_Report, DC.contributor, EX.Robert_Mueller))
g.add((EX.Mueller_Report, SCHEMA.archivedAt, muellerReportArchives))
 
print(g.serialize(format='turtle'))
</syntaxhighlight>
 
Sequence container (open):
<syntaxhighlight>
from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD
 
EX = Namespace('http://example.org/')
 
g = Graph()
g.bind('ex', EX)
 
donaldTrumpSpouses = BNode()
g.add((donaldTrumpSpouses, RDF.type, RDF.Seq))
g.add((donaldTrumpSpouses, RDF._1, EX.IvanaTrump))
g.add((donaldTrumpSpouses, RDF._2, EX.MarlaMaples))
g.add((donaldTrumpSpouses, RDF._3, EX.MelaniaTrump))
 
g.add((EX.Donald_Trump, SCHEMA.spouse, donaldTrumpSpouses))
 
print(g.serialize(format='turtle'))
</syntaxhighlight>
 
Collection (closed list):
<syntaxhighlight>
from rdflib import Graph, Namespace, Literal, BNode, RDF, RDFS, DC, FOAF, XSD
 
EX = Namespace('http://example.org/')
 
g = Graph()
g.bind('ex', EX)
 
from rdflib.collection import Collection
 
g = Graph()
g.bind('ex', EX)
 
donaldTrumpSpouses = BNode()
Collection(g, donaldTrumpSpouses, [
    EX.IvanaTrump, EX.MarlaMaples, EX.MelaniaTrump
])
g.add((EX.Donald_Trump, SCHEMA.spouse, donaldTrumpSpouses))
 
print(g.serialize(format='turtle'))
g.serialize(destination='s02_Donald_Trump_spouses_list.ttl', format='turtle')
 
print(g.serialize(format='turtle'))
</syntaxhighlight>
 
==Lecture 3: SPARQL==
 
The KG4News knowledge graph can be accessed online [http://bg.newsangler.uib.no here (namespace ''kb'')] (read-only), or you can load the Turtle file into your own GraphDB repository.
(Remember to save the file with ''.ttl'' extension. You can use ''http://i2s.uib.no/kg4news/'' as base URI.)
 
===Limit===
<syntaxhighlight lang="SPARQL">
SELECT ?p WHERE {
    ?s ?p ?o .
}
LIMIT 10</syntaxhighlight>
 
===List distinct properties only (with limit)===
<syntaxhighlight lang="SPARQL">
SELECT DISTINCT ?p WHERE {
    ?s ?p ?o .
}
LIMIT 10
</syntaxhighlight>
 
===Limit with offset===
<syntaxhighlight lang="SPARQL">
SELECT DISTINCT ?p WHERE {
    ?s ?p ?o .
}
LIMIT 10 OFFSET 9
</syntaxhighlight>
 
===List rdf:types===
<syntaxhighlight lang="SPARQL">
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
 
SELECT DISTINCT ?t WHERE {
    ?s rdf:type ?t .
}
LIMIT 50
</syntaxhighlight>
 
===URI for Tim Berners-Lee===
<syntaxhighlight lang="SPARQL">
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
 
SELECT DISTINCT ?person WHERE {
    ?person foaf:name ?name . 
    FILTER(CONTAINS(?name, "Berners-Lee"))
}
LIMIT 10  # best to use limit if something goes wrong
</syntaxhighlight>
 
===Describe Tim Berners-Lee===
<syntaxhighlight lang="SPARQL">
DESCRIBE <http://i2s.uib.no/kg4news/author/1432678629>
</syntaxhighlight>
DESCRIBE returns a new RDF graph, whereas SELECT returns a table of rows.
 
===Papers that mention "Semantic Web" in the title===
<syntaxhighlight lang="SPARQL">
PREFIX ss: <http://semanticscholar.org/>
 
SELECT DISTINCT ?paper ?title WHERE {
    ?paper ss:title ?title . 
    FILTER(CONTAINS(STR(?title), "Semantic Web"))
}
LIMIT 50
</syntaxhighlight>
 
===Alternative filter that ignores capitalisation (lower/upper case)===
<syntaxhighlight lang="SPARQL">
    FILTER(REGEX(STR(?title), "Semantic Web", "i"))
</syntaxhighlight>
 
===Authors sorted by name===
<syntaxhighlight lang="SPARQL">
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
 
SELECT DISTINCT * WHERE {
    ?author foaf:name ?name . 
}
ORDER BY ?name
LIMIT 10
</syntaxhighlight>
 
===Sorted by descending name instead===
<syntaxhighlight lang="SPARQL">
ORDER BY DESC(?name)
</syntaxhighlight>
 
===Count papers by author===
<syntaxhighlight lang="SPARQL">
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX ss: <http://semanticscholar.org/>
 
SELECT DISTINCT ?author (COUNT(?paper) AS ?count) WHERE {
    ?author rdf:type ss:Author . 
    ?paper  rdf:type ss:Paper ;
            dct:contributor ?author .
}
GROUP BY ?author
LIMIT 10
</syntaxhighlight>
 
===Only list the most prolific authors===
<syntaxhighlight lang="SPARQL">
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX ss: <http://semanticscholar.org/>
 
SELECT DISTINCT ?author (COUNT(?paper) AS ?count) WHERE {
    ?author rdf:type ss:Author . 
    ?paper  rdf:type ss:Paper ;
            dct:contributor ?author .
}
GROUP BY ?author
HAVING (?count >= 10)  # similar to a filter expression
LIMIT 10              # include limit when you test
</syntaxhighlight>
 
===Order by descending paper count===
<syntaxhighlight lang="SPARQL">
SELECT ... {
    ...
}
GROUP BY ?person
HAVING (?count > 10)
ORDER BY DESC(?count)
LIMIT 10
</syntaxhighlight>
 
===Order by descending paper count and then by author name===
<syntaxhighlight lang="SPARQL">
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX ss: <http://semanticscholar.org/>
 
SELECT DISTINCT ?person (SAMPLE(?name) AS ?name) (COUNT(?paper) AS ?count) WHERE {
    ?person rdf:type ss:Author ;
            foaf:name ?name . 
    ?paper  rdf:type ss:Paper ;
            ss:title ?title ;
            dct:contributor ?person .
}
GROUP BY ?person
HAVING (?count > 10)
ORDER BY DESC(?count)
LIMIT 10
</syntaxhighlight>
 
===Embedded Wikidata query===
This is a toy example only. Embedded queries like these are better suited for situations where the same URIs are used in more than one triple store and you want to combine data. But Wikidata and KG4News do not use the same URIs. So instead, the example searches for similar labels, and this is something graph databases may not be optimised for. Moreover, Wikidata uses language-tagged strings whereas KG4News uses plain strings, so the labels cannot even be directly compared.
<syntaxhighlight lang="SPARQL">
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
 
SELECT DISTINCT ?enname ?person ?wdperson WHERE {
    BIND("T. Berners-Lee"@en AS ?enname)  # language-tagger label
 
    SERVICE <https://query.wikidata.org/bigdata/namespace/wdq/sparql> {
        # return a Wikidata identifier (URI) with this label as alternative
        SELECT ?wdperson ?enname WHERE {
            ?wdperson skos:altLabel ?enname .
        }
        LIMIT 10  # we use limit in case the label does not match
    }
           
    BIND(STR(?enname) AS ?name)  # the same label, but with language tag removed
    # return a KG4News identifier (URI) with this label as name
    ?person foaf:name ?name . 
 
}
LIMIT 10
</syntaxhighlight>
 
===Add one or more triples===
From now on you need a Blazegraph that allows writing, for example the [http://sandbox.i2s.uib.no I2S sandbox]. Remember to ''create a new namespace'' first and make sure you ''use'' it afterwards.
<syntaxhighlight lang="SPARQL">
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX kg: <http://i2s.uib.no/kg4news/>
PREFIX ss: <http://semanticscholar.org/>
 
INSERT DATA {  # note the Turtle-like syntax
    kg:paper_123 rdf:type ss:Paper ;
            ss:title "Semantic Knowledge Graphs for the News: A Review"@en ;
            kg:year 2022 ;
            dct:contributor kg:auth_456, kg:auth_789 .
}
</syntaxhighlight>
 
===Remove one or more triples===
<syntaxhighlight lang="SPARQL">
PREFIX kg: <http://i2s.uib.no/kg4news/>
 
DELETE DATA
{
    kg:paper_123 kg:year 2022  .
}
</syntaxhighlight>
 
===Pattern-based addition and or removal of triples===
<syntaxhighlight lang="SPARQL">
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dct: <http://purl.org/dc/terms/>
PREFIX kg: <http://i2s.uib.no/kg4news/>
PREFIX ss: <http://semanticscholar.org/>
 
DELETE DATA {   
    ?paper dct:contributor kg:auth_456
}
INSERT DATA {   
    ?paper dct:contributor kg:auth_654
}
WHERE {  # the patterns are similar to SELECT patterns
    ?paper dct:contributor kg:auth_456
}
</syntaxhighlight>
 
==Lecture 8: SHACL==
The examples are for use with the [https://shacl.org/playground/ interactive SHACL Playground].
The examples are for use with the [https://shacl.org/playground/ interactive SHACL Playground].


Line 196: Line 558:
     foaf:name "C. Bizer" .
     foaf:name "C. Bizer" .
</syntaxhighlight>
</syntaxhighlight>
==Lecture 8: RDFS==
Create two new GraphDB Repositories, one with RDFS inference and one with No inference. Try the SPARQL statements on both, so you can compare them.
To test ''rdfs:subClassOf'':
<syntaxhighlight lang="ttl">
PREFIX kg: <http://i2s.uib.no/kg4news/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
INSERT DATA {
kg:LOD_paper rdf:type kg:MainPaper .
    kg:MainPaper rdfs:subClassOf kg:Paper .
}
</syntaxhighlight>
<syntaxhighlight lang="ttl">
PREFIX kg: <http://i2s.uib.no/kg4news/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
ASK {
kg:LOD_paper rdf:type kg:Paper .
}
</syntaxhighlight>
To test ''rdfs:domain'':
<syntaxhighlight lang="ttl">
PREFIX kg: <http://i2s.uib.no/kg4news/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
INSERT DATA {
kg:TBL kg:author kg:LOD_paper .
    kg:author rdfs:domain kg:Author .
}
</syntaxhighlight>
<syntaxhighlight lang="ttl">
PREFIX kg: <http://i2s.uib.no/kg4news/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
ASK {
kg:TBL rdf:type kg:Author .
}
</syntaxhighlight>
To see all the triples in the KG:
<syntaxhighlight lang="ttl">
SELECT * WHERE {
  ?s ?p ?o
}
</syntaxhighlight>
==Lecture 11: Graph embeddings==
Here is the example that used graph embeddings from ConceptNet. The ''numbernatch-en-19.08.txt.gz'' file can be [https://github.com/commonsense/conceptnet-numberbatch downloaded from GitHub].
Precomputed pickle files are [https://universityofbergen-my.sharepoint.com/:f:/g/personal/andreas_opdahl_uib_no/ElJv6Kl6zfJNmQrXMiz5eDoBogFpktjx8xoWgxOgAI70tw?e=99cClh available here (requires UiB login)].
<syntaxhighlight lang="python">
import pickle
import pandas as pd
import numpy as np
from sklearn.neighbors import NearestNeighbors
NUMBERBATCH_FILE= './conceptnet/numberbatch-en-19.08.txt'
EMBEDDINGS_FILE = f'./conceptnet/numberbatch-embeddings.pickle'
N_NEIGHBOURS = 5
NUMBERBATCH_NEIGHBOURS_FILE = f'./conceptnet/numberbatch-neighbours-{N_NEIGHBOURS}.pickle'
# load numberbatch
reload = False  # set False first time you run, or if you already have the pickle file
if not reload:
    embeddings = {}
    with open(NUMBERBATCH_FILE) as file:
        n_terms, n_dims = map(int, file.readline().strip().split(' '))
        for line in file:
            splits = line.strip().split(' ')
            embeddings[splits[0]] = np.array(list(map(float, splits[1:])))
    with open(EMBEDDINGS_FILE, 'wb') as file:
        pickle.dump(embeddings, file)
else:
    with open(EMBEDDINGS_FILE, 'rb') as file:
        embeddings = pickle.load(file)
# find nearest neighbours
reload = False  # set False first time you run, or if you already have the pickle file
if not reload:
    knn = NearestNeighbors(n_neighbors=N_NEIGHBOURS, algorithm='ball_tree')
    np_embeddings = np.array(list(embeddings.values()))
    knn.fit(np_embeddings)
    with open(NUMBERBATCH_NEIGHBOURS_FILE, 'wb') as file:
        pickle.dump(knn, file)
else:
    with open(NUMBERBATCH_NEIGHBOURS_FILE, 'rb') as file:
        knn = pickle.load(file)
   
# test
distances, indexes = knn.kneighbors([embeddings['bergen']])
for dist, idx in zip(distances[0], indexes[0]):
    print(f'{dist}:\t{list(embeddings.keys())[idx]}')
def vector_neighbours(vector):
    distances, indexes = knn.kneighbors([vector])
    for dist, idx in zip(distances[0], indexes[0]):
        print(f'{dist}:\t{list(embeddings.keys())[idx]}')
vector_neighbours(embeddings['oslo'] - embeddings['norway'] + embeddings['france'])
</syntaxhighlight>
-->

Latest revision as of 10:54, 20 January 2025

This page currently shows the examples used in the Spring of 2023. It will be updated with examples from 2024 as the course progresses.

Lecture 1: Introduction to KGs

Turtle example:

@prefix ex: <http://example.org/> .
ex:Roger_Stone
    ex:name "Roger Stone" ;
    ex:occupation ex:lobbyist ;
    ex:significant_person ex:Donald_Trump .
ex:Donald_Trump
    ex:name "Donald Trump" .