migration

master
manetta 3 years ago
commit 09e3837795

Binary file not shown.

File diff suppressed because it is too large Load Diff

After

Width:  |  Height:  |  Size: 232 KiB

Binary file not shown.

Binary file not shown.

Binary file not shown.

File diff suppressed because it is too large Load Diff

After

Width:  |  Height:  |  Size: 165 KiB

Binary file not shown.

Binary file not shown.

@ -0,0 +1,44 @@
function getRandom(max) {
var o = Math.round((Math.random())*max);
return o;
}
function place_imgs(window_size){
$.ajax({
dataType: 'jsonp',
url : 'https://pzwiki.wdka.nl/mw-mediadesign/api.php?format=json&action=query&generator=categorymembers&gcmtype=file&gcmtitle=Category:Special%20Issue%205&prop=imageinfo&iiprop=url&iiurlheight=150',
success : function(json) {
var pages=json.query.pages;
for (key in pages){
var img_url = pages[key].imageinfo[0].thumburl;
var img_w = pages[key].imageinfo[0].thumbwidth;
var img_h = pages[key].imageinfo[0].thumbheight;
var img_max_left = window_size.w - img_w;
var img_max_top = window_size.h - img_h;
var img_pos_left = ( getRandom(img_max_left).toString() )+"px";
var img_pos_top = ( getRandom(img_max_top).toString() )+"px";
var img_el = jQuery('<img />',{ src: img_url, class:'background'} );
$('div.background').append(img_el[0]);
img_el.css('left',img_pos_left,'!important')
.css('top',img_pos_top,'!important')
.css('z-index', (getRandom(pages.length)).toString() ,'!important')
.css('display','block');
// console.log(img_el, pages.length);
// console.log(img, img_w, img_h);
}
}
})
}
$(document).ready(function(){
var wh = {w: ($(window).width()), h: ($(window).height())} ;
place_imgs(wh);
})

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 400 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 140 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 616 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 777 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 155 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 MiB

@ -0,0 +1,302 @@
<!DOCTYPE html>
<html>
<head>
<title>Special Issue 5: OuNuPo</title>
<script type="text/javascript" src="jquery-3.3.1.min.js"></script>
<script type="text/javascript" src="images-mwapi.js"></script>
<meta charset="utf-8">
<meta name="description" content="" />
<meta name="keywords" content="Piet Zwart Institute, Experimental Publishing, XPUB, OuNuPo, book, scanning, software, algorithmic, Manetta Berends, Cristina Cochior, Varia, WORM Pirate Bay, DIY, book scanning, feminist, research, constraint writing, literature, text, digitisation, processing, OuLiPo" />
<meta name="author" content="Experimental Publishing " />
<meta name="application-name" content="Experimental Publishing - Special Issue #5 OuNuPo" />
<!-- for Facebook opengraph og: -->
<meta property="og:title" content="Special Issue 5 - OuNuPo - XPUB"/>
<meta property="og:type" content="website"/>
<meta property="og:locale" content="en_US"/>
<meta property="og:site_name" content="Experimental Publishing - Special Issue #5 OuNuPo" />
<meta content="https://issue.xpub.nl/img/special-issue-5.png" property="og:image">
<meta property="og:url" content="https://issue.xpub.nl/05/"/>
<meta property="og:description" content="In the Ouvroir de Numérisation Potentielle (the workshop of potential digitisation, or OuNuPo) the XPUB practitioners reflected on several topics: how culture is shaped by book scanning? Who has access and who is excluded from digital culture? How free software and open source hardware have bootstrapped a new culture of librarians? What happens to text when it becomes data that can be transformed, manipulated and analysed ad nauseam? To answer these questions, the XPUB practitioners have written software, built a bookscanner and assembled a unique printed reader." />
<link rel="stylesheet" href="style.css" type="text/css" media="screen" />
</head>
<script type="application/json" class="js-hypothesis-config">
{"showHighlights": false}
</script>
<script src="https://hypothes.is/embed.js" async></script>
<body>
<div class="background" >
</div>
<div class="content" >
<h1>Special Issue 5 - OuNuPo</h1>
<div><video width="100%" controls>
<source src="https://media.xpub.nl/special_issue_05-web.mp4" type="video/mp4">
Your browser does not support the video tag :(
Try a recent version of Firefox or Chromium!
</video></div>
<br><br><br>
<div class="image">
</div>
<p>XPUB, Varia and WORM invite you for an evening of book scanning, short presentations, discussions and software experiments in the context of text digitisation and processing.
28/03/18 - 19:00 at WORM<p>
<h2>OuNuPo, Ouvroir de Numérisation Potentielle, the workshop of potential digitisation</h2>
<img src="images/try_scanning_loop.gif" alt="scanning" style="width: 50%;float: right;">
<p>From January until the end of March 2018 the practitioners of the Media Design Experimental Publishing Master course (XPUB) of the Piet Zwart Institute, in collaboration with Manetta Berends & Cristina Cochior (Varia) and the WORM Pirate Bay, have set sail on the vast sea of DIY book scanning, feminist research methodologies, constraint writing, algorithmic literature and the cultures of text digitisation and processing.</p>
<p>The term OuNuPo is derived from OuLiPo (Ouvroir de littérature potentielle), founded in 1960. OuLiPo is a mostly French speaking gathering of writers and mathematicians interested in constrained writing techniques. A famous technique is for instance the lipogram that generates texts in which one or more letters have been excluded. OuLiPo eventually led to OuXPo to expand these creative constraints to other practices (OuCiPo for film making, OuPeinPo for painting, etc). Following this expansion, XPUB launches OuNuPo, Ouvroir de Numérisation Potentielle, the workshop of potential digitisation, turning the book scanner as a platform for media design and publishing experiments.</p>
<p>In the past three months, the XPUB practitioners have used OuNuPo as a means to reflect on several topics: how culture is shaped by book scanning? Who has access and who is excluded from digital culture? How free software and open source hardware have bootstrapped a new culture of librarians? What happens to text when it becomes data that can be transformed, manipulated and analysed ad nauseam?</p>
<p>To answer these questions, the XPUB practitioners have written software and assembled a unique printed reader, informed by critical and feminist research methodologies. The text selection explores the themes of the digital transfer of cultural biases, Techno/Cyber/Xeno-Feminism, oral culture in the context of knowledge sharing, shadow libraries, database narratives, gender and future librarians. The content of the reader will be scanned by a DIY book scanner built in the past months, and processed by different software processes and performances written by the XPUB practitioners, from chat bots to concrete poetry generators and speech recognition feedback loops.</p>
<h2>Inside the workshop of potential digitisation:</h2>
<p>To approach the workshop of potential digitisation, the following strategy was adopted: two book scanners were built using a variation of the Archivist Book Scanner, a 2014 public domain (CC0 licensed) hardware design developed within the DIY Book Scanner community. Next to that, a unique reader was put together in the form of 6 books on scanning cultures, edited, designed and produced by the XPUB practitioners. Each book is a compilation of 5 to 10 annotated texts addressing a specific question, or topic, relevant to the practitioners. The 6 books are gathered inside a cloth, folded according to the Japanese Furoshiki art of wrapping. Finally, instead of using the book scanner as a mere text scanning and PDF creating apparatus, each XPUB practitioners wrote their own text processing software to echo, reflect upon, or explore further their reading material as a means to articulate through code the two levels of textual interpretation and dissemination: the human and the machine. Using the book scanner and the software they wrote, they will scan and make public the reader, not as one-to-one digital copy like a downloadable PDF file, but as the output of a series of software experiments.</p>
<h3>Chapter 1 - Alice Strete</h3>
<em>Techno/Cyber/Xeno-Feminism + carlandre & overunder</em>
<pre>
output/carlandre.txt: ocr/output.txt
cat $< | python3 src/carlandre.py > $(@)
output/overunder: ocr/output.txt
python3 src/overunder.py
</pre>
<img src="images/Xeno.jpg" width="80%" />
<p>The Intimate and Possibly Subversive Relationship Between Women and Machines Reader explores topics from women's introduction into the technological workforce, the connection between weaving and programming, and using technology in favour of the feminist movement. One major concept that appears throughout the reader is an almost mystical connection between women and software writing, embedded deep in women's tradition of weaving not just threads, but networks. Does software have a gender?</p>
<img src="images/22.png" width="80%" />
<p>Echoing to her selection of texts, Alice proposes two software based transformation of her reader: carlandre and overunder. carlandre is a program that generates a pattern inspired by the concrete poetry of Carl Andre, it creates a vertical wave of words whose lengths go from ascending to descending and so on. overunder is inspired by the relationship between weaving and programming, this interpreted language written in Python translates simple weaving instructions into a digital interpretation of weaving on text.</p>
<h3>Chapter 2 - Joca van der Horst</h3>
<em>Who is the Librarian + Reading the Structure</em>
<pre>
reading_structure: ocr/output.txt
## Analyzes OCR'ed text using a Part of Speech (POS) tagger. Outputs a string of tags (e.g. nouns, verbs, adjectives, and adverbs). Dependencies: python3's nltk, jinja2, weasyprint
mkdir -p output/reading_structure
cp src/reading_structure/jquery.min.js output/reading_structure
cp src/reading_structure/script.js output/reading_structure
cp src/reading_structure/style.css output/reading_structure
cat $&lt; | python3 src/reading_structure/reading_structure.py
weasyprint -s src/reading_structure/print-noun.css output/reading_structure/index.html output/reading_structure/poster_noun.pdf
weasyprint -s src/reading_structure/print-adv.css output/reading_structure/index.html output/reading_structure/poster_adv.pdf
weasyprint -s src/reading_structure/print-dppt.css output/reading_structure/index.html output/reading_structure/poster_dppt.pdf
weasyprint -s src/reading_structure/print-stopword.css output/reading_structure/index.html output/reading_structure/poster_stopword.pdf
weasyprint -s src/reading_structure/print-neutral.css output/reading_structure/index.html output/reading_structure/poster_neutral.pdf
weasyprint -s src/reading_structure/print-entity.css output/reading_structure/index.html output/reading_structure/poster_named_entities.pdf
x-www-browser output/reading_structure/index.html
</pre>
<img src="images/800px-Reader_joca_inside.jpg" width="80%" />
<p>With Who is the Librarian: The gendered image of the librarian and the information scientist, Joca explores two frequent gender stereotypes: librarianship as a job for women and information science as a male-dominated field. The selection of texts in this reader elaborates on the origin of these stereotypes and the different social status of these professions. This could be the way to answer the question: Who do we want to be the librarian in the future?</p>
<img src="images/Reading_structure_screen_interface.png" width="80%" />
<p>Then moving from human interpretation to software interpretation, Joca presents a software, Reading the Structure, that attempts to make visible to human readers how machines, or to be more precise, specific software implementation of text analysis, interpret texts. Computers read a text differently than we do. One of the common methods for software to analyse a text, is to cut the sentences into loose words. Then each word can be labelled for importance, sentiment, or its function in the sentence. During this process of structuring the text, the relation with the original text fades away. Reading the Structure is a reading interface that brings the labels back in the original text. Does that makes us, mere humans, able to read like our machines do?</p>
<h3>Chapter 3 - Zalán Szakács</h3>
<em>From DIY Book Scanning to the Shadow Librarian + ACCP - Analogue Circular Communication Protocol</em>
<img src="images/Screen_Shot_2018-03-24_at_12.44.38.png" width="80%" />
<p>Zalán's reader, From DIY Book Scanning to the Shadow Librarian, traces back the beginnings of the shadow libraries starting from the Soviet era of Russia and explores its impact on contemporary academic publishing. Amongst other things, the text selection Informs the reader about activists in this field such as Aaron Swartz, the writer of Guerilla Open Access Manifesto and Alexandra Elbakyan, the founder of Sci-Hub.</p>
<img src="images/Manifesto_a_1_small.gif" width="80%" />
<p>Where does the message start? Where does the message end? The user is challenged by the coding tool ACCP to discover the rules behind the circular decoding system and decipher the message. Through the programming language Python and the software DrawBot, words are processed and mapped into a spatial graphical system with the 26 characters of the alphabet and the 10 numbers are arranged around a circle. With a radial stencil placed in front of the graphics, it is possible to turn the images back into words.</p>
<h3>Chapter 4 - Natasha Berting</h3>
<em>How Bias Spreads from the Canon to the Web + Erase / Replace</em>
<pre>
erase: tiffs hocrs
python3 src/erase_leastcommon.py
rm $(input-hocr)
rm $(images-tiff)
replace:tiffs hocrs
python3 src/replace_leastcommon.py
rm $(input-hocr)
rm $(images-tiff)
</pre>
<img src="images/Reader-001.jpg" width="80%" />
<p>Natasha's contribution explores the politics of selection, transparency and as Johanna Drucker said, "calls attention to the made-ness of knowledge". Her selection of texts explores how human biases and cultural blind spots are transferred from the page to the screen, as companies like Google turn books into databases, bags of words into training sets, and use them in ways that are not always clearly communicated.</p>
<img src="images/Delete1.png" width="80%" />
<p>The texts will be processed by the Erase / Replace scripts, which are two experiments that question who and what is included or excluded in book scanning. In each script, what is first scanned affects what is visible and what is hidden in what is scanned in a second stage, so on so forth. The scripts learn each page's vocabulary and favours the most common words. The least common word recede further and further away from view, finally disappearing all together or even replaced by the more common words. Every scan session results in a different distortion, and outputs the original scanned image, but with the text manipulated.</p>
<p>Ultimately these texts and scripts are tools for thinking about how knowledge is mined and presented online, how bias spreads from the Canon to the web, finding opportunities to break open this process.</p>
<h3>Chapter 5 - Alexander Roidl</h3>
<em>Scanning the Database + chatbook</em>
<pre>
chatbook: ocr/output.txt
python3 src/chatbook.py
oulibot: ocr/output.txt #chatbot based on the knowledge of the scans Dependencies: nltk_rake, irc, nltk
python3 src/oulibot.py
</pre>
<img src="images/IMG_6781.JPG" width="80%" />
<p>In Scanning the database, Alexander offers to navigate in and out of database narratives. His reader looks at how databases are structured and formed, and how the data they hold are classified, and how such structuring and classification leads to bias. It shows how important it is to question the authoritative dimension of databases, by looking closely at what is being scanned, how it is stored, organized and selected. </p>
<img src="images/Screenshot_from_2018-03-25_00-19-24.png" width="80%" />
<p>In response to these questions, Alex proposes an alternative interface to such database, by creating a chat bot that enables the user / viewer to explore the content of scanned material by chatting with the books canner. By adding an explicit layer of software mediation, the experiment questions how knowledge is built and mediated in the age of machine learning.</p>
<h3>Chapter 6 - Angeliki Diakrousi</h3>
<em>From Tedious Tasks to Liberating Orality + ttssr-&gt;> Reading and speech recognition in loop</em>
<pre>
ttssr-human-only: ocr/output.txt
bash src/ttssr-loop-human-only.sh ocr/output.txt</pre>
<img src="images/DSC5797.jpg" width="80%" />
<img src="images/Ttssr-algologs.png" width="80%" />
<p>Angeliki's collection of texts From Tedious Tasks to Liberating Orality- Practices of the Excluded on Sharing Knowledge, refers to oral culture in relation to programming, as a way of sharing knowledge including our individually embodied position and voice. The emphasis on the role of personal positioning is often supported by feminist theorists. Similarly, and in contrast to scanning, reading out loud is a way of distributing knowledge in a shared space with other people, and this is the core principle behind the ttssr-&gt; Reading and speech recognition in loop software. Using speech recognition software and python scripts Angeliki proposes to the audience to participate in a system that highlights how each voice bears the personal story of an individual. In this case the involvement of a machine provides another layer of reflection of the reading process.</p>
<h2>Credits</h2>
<p>OuNuPo was produced as part of a collaboration between XPUB and WORM. The project was developed by the XPUB practitioners (Natasha Berting, Angeliki Diakrousi, Joca van der Horst, Alexander Roidl, Alice Strete and Zalán Szakács) with the support from Varia special guests (Manetta Berends and Cristina Cochior), the WORM Pirate Bay (Wojtek Szustak and Frederic Van de Velde), diybookscanner.eu (Mark Van den Borre) and XPUB staff and tutors (Delphine Bedel, André Castro, Aymeric Mansoux, Michael Murtaugh, Leslie Robbins and Steve Rushton).</p>
<div class="seperator">
<p>------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------</p>
</div>
<h2>OuNuPo-Make Code Repository</h2>
<h1 id="ounupo-make">OuNuPo Make</h1>
<p>Software experiments for the OuNuPo bookscanner, part of Special Issue 5</p>
<p><a href="https://git.xpub.nl/OuNuPo-make/" class="uri">https://git.xpub.nl/OuNuPo-make/</a></p>
<h2 id="authors">Authors</h2>
<p>Natasha Berting, Angeliki Diakrousi, Joca van der Horst, Alexander Roidl, Alice Strete and Zalán Szakács.</p>
<h2 id="clone-repository">Clone Repository</h2>
<p><code>git clone https://git.xpub.nl/repos/OuNuPo-make.git</code></p>
<h2 id="general-depencies">General depencies</h2>
<ul>
<li>Python3</li>
<li>GNU make</li>
<li>Python3 NLTK <code>pip3 install nltk</code></li>
</ul>
<h1 id="make-commands">Make commands</h1>
<h2 id="sitting-inside-a-pocketsphinx-angeliki">Sitting inside a pocket(sphinx): Angeliki</h2>
<p>Speech recognition feedback loops using the first sentence of a scanned text as input</p>
<p>run: <code>make ttssr-human-only</code></p>
<p>Specific Dependencies:</p>
<ul>
<li>PocketSphinx package <code>sudo aptitude install pocketsphinx pocketsphinx-en-us</code></li>
<li>PocketSphinx Python library: <code>sudo pip3 install PocketSphinx</code></li>
<li>Other software packages:<code>sudo apt-get install gcc automake autoconf libtool bison swig python-dev libpulse-dev</code></li>
<li>Speech Recognition Python library: <code>sudo pip3 install SpeechRecognition</code></li>
<li>TermColor Python library: <code>sudo pip3 install termcolor</code></li>
<li>PyAudio Python library: <code>sudo pip3 install pyaudio</code></li>
</ul>
<h3 id="licenses">Licenses:</h3>
<p>© 2018 WTFPL Do What the Fuck You Want to Public License. © 2018 BSD 3-Clause Berkeley Software Distribution</p>
<h2 id="reading-the-structure-joca">Reading the Structure: Joca</h2>
<p>Uses OCR'ed text as an input, labels each word for Part-of-Speech, stopwords and sentiment. Then it generates a reading interface where words with a specific label are hidden. Output can be saved as poster, or exported as json featuring the full data set.</p>
<p>Run: <code>make reading_structure</code></p>
<p>Specific Dependencies:</p>
<ul>
<li><a href="http://www.nltk.org/install.html">NLTK</a> packages: tokenize.punkt, pos_tag, word_tokenize, sentiment.vader, vader_lexicon (python3; import nltk; nltk.download() and select these models)</li>
<li><a href="https://spacy.io/usage/">spaCy</a> Python library</li>
<li>spacy: en_core_web_sm model (python3 -m spacy download en_core_web_sm)</li>
<li><a href="http://weasyprint.readthedocs.io/en/latest/install.html">weasyprint</a></li>
<li><a href="http://jinja.pocoo.org/docs/2.10/intro/#installation">jinja2</a></li>
<li>font: <a href="https://www.fontsquirrel.com/fonts/pt-serif">PT Sans</a></li>
<li>font: <a href="https://www.fontsquirrel.com/fonts/ubuntu-mono">Ubuntu Mono</a></li>
</ul>
<h3 id="license-gnu-agplv3">License: GNU AGPLv3</h3>
<p>Permissions of this license are conditioned on making available complete source code of licensed works and modifications, which include larger works using a licensed work, under the same license. Copyright and license notices must be preserved. Contributors provide an express grant of patent rights. When a modified version is used to provide a service over a network, the complete source code of the modified version must be made available. See src/reading_structure/license.txt for the full license.</p>
<h2 id="erase-replace-natasha">Erase / Replace: Natasha</h2>
<p>Receives your scanned pages in order, then analyzes each image and its vocabulary. Finds and crops the least common words, and either erases them, or replaces them with the most common words. Outputs a PDF of increasingly distorted scan images.</p>
<p>For erase script run: <code>make erase</code></p>
<p>For replace script run: <code>make replace</code></p>
<p>Specific Dependencies:</p>
<ul>
<li>NLTK English Corpus:
<ul>
<li>run NLTK downloader <code>python -m nltk.downloader</code></li>
<li>select menu &quot;Corpora&quot;</li>
<li>select &quot;stopwords&quot;</li>
<li>&quot;Download&quot;</li>
</ul></li>
<li>Python Image Library (PIL): <code>pip3 install Pillow</code></li>
<li>PDF generation for Python (FPDF): <code>pip3 install fpdf</code></li>
<li>HTML5lib Python Library: <code>pip3 install html5lib</code></li>
</ul>
<h3 id="notes-bugs">Notes &amp; Bugs:</h3>
<p>This script is very picky about the input images it can work with. For best results, please use high resolution images in RGB colorspace. Errors can occur when image modes do not match or tesseract cannot successfully make HOCR files.</p>
<h2 id="carlandre-overunder-alice-strete">carlandre &amp; over/under: Alice Strete</h2>
<p>Person who aspires to call herself a software artist sometime next year.</p>
<h3 id="license">License:</h3>
<p>Copyright © 2018 Alice Strete This work is free. You can redistribute it and/or modify it under the terms of the Do What The Fuck You Want To Public License, Version 2, as published by Sam Hocevar. See http://www.wtfpl.net/ for more details.</p>
<h3 id="dependencies">Dependencies:</h3>
<ul>
<li><a href="https://docs.pytest.org/en/latest/getting-started.html">pytest</a></li>
</ul>
<p>Programs:</p>
<h3 id="carlandre">carlandre</h3>
<p>Description: Generates concrete poetry from a text file. If you're connected to a printer located in /dev/usb/lp0 you can print the poem.</p>
<p>run: <code>make carlandre</code></p>
<h3 id="overunder">over/under</h3>
<p>Description: Interpreted programming language written in Python3 which translates basic weaving instructions into code and applies them to text.</p>
<p>run: <code>make overunder</code></p>
<h3 id="instructions">Instructions:</h3>
<ul>
<li>over/under works with specific commands which execute specific instructions.</li>
<li>When running, an interpreter will open: <code>&gt;</code></li>
<li>To load your text, type 'load'. This is necessary before any other instructions. Every time you load the text, the previous instructions will be discarded.</li>
<li>To see the line you are currently on, type 'show'.</li>
<li>To start your pattern, type 'over' or 'under', each followed by an integer, separated by a comma. e.g. over 5, under 5, over 6, under 10</li>
<li>To move on to the next line of text, press enter twice.</li>
<li>To see your pattern, type 'pattern'.</li>
<li>To save your pattern in a text file, type 'save'.</li>
<li>To leave the program, type 'quit'.</li>
</ul>
<h2 id="oulibot-alex">oulibot: Alex</h2>
<p>Description: Chatbot that will help you to write a poem based on the text you inserted by giving you constraints.</p>
<p>run: <code>make oulibot</code></p>
<h4 id="dependencies-1">Dependencies:</h4>
<p>Python libraries:</p>
<ul>
<li>irc : <code>pip3 install irc</code></li>
<li>rake_nltk Python library: <code>pip3 install rake_nltk</code></li>
<li>textblob: <code>pip3 install textblob</code></li>
<li>PIL: <code>pip3 install Pillow</code></li>
<li>numpy: <code>pip3 install numpy</code></li>
<li>tweepy: <code>pip3 install tweepy</code></li>
<li>NLTK stopwords:
<ul>
<li>run NLTK downloader <code>python -m nltk.downloader</code></li>
<li>select menu &quot;Corpora&quot;</li>
<li>select &quot;stopwords&quot;</li>
<li>&quot;Download&quot;</li>
</ul></li>
</ul>
</div>
</body>
</html>

File diff suppressed because one or more lines are too long

@ -0,0 +1,129 @@
@font-face {
font-family: 'NotCourierSans';
src: url('fonts/NotCourierSans.eot?#iefix') format('embedded-opentype'), url('fonts/NotCourierSans.woff') format('woff'), url('fonts/NotCourierSans.ttf') format('truetype'), url('fonts/NotCourierSans.svg#NotCourierSans') format('svg');
font-weight: normal;
font-style: normal;
}
@font-face {
font-family: 'PinyonScript';
src: url('fonts/PinyonScript.eot?#iefix') format('embedded-opentype'), url('fonts/PinyonScript.woff') format('woff'), url('PinyonScript.ttf') format('truetype'), url('fonts/PinyonScript.svg#PinyonScript') format('svg');
font-weight: normal;
font-style: normal;
}
body{
font-family: NotCourierSans;
font-weight: normal;
background-color: white;
position: relative;
}
h1 {
font-family: PinyonScript;
font-weight: normal;
color: #003cb3;
}
h1 {
font-size: 80px;
margin-top: 0px;
}
p {
font-family: NotCourierSans;
color: #003cb3;
font-size: 17px;
}
a {
color: #003cb3;
}
audio{
width: 400px;
}
.image{
width: 400px;
}
.image img{
width: 400px;
}
.seperator{
width:100%;
overflow: hidden;
}
div.content{z-index:20;
width: 75%;
margin-left: auto;
margin-right: auto;
background: #fffc;
margin-bottom: 50%;
padding: 0 1%;
}
/* child elements of div.content */
div.content * { width:100%;
background: white;
}
div.content h2 {color: #FFFFFF;
background-color: #000000;
}
div.content audio { width:75%; height:30px; }
/*imgs.background z-index<10*/
div.background{z-index:-1;
width:100%;}
img.background{ position: fixed;
z-index:-1;
animation: fadein 3s;
-moz-animation: fadein 3s;/* Firefox */
-webkit-animation: fadein 3s;/* Safari & Chrome */
-o-animation: fadein 3s; /* Opera */
}
@keyframes fadein {
from {
opacity:0;
}
to {
opacity:1;
}
}
@-moz-keyframes fadein { /* Firefox */
from {
opacity:0;
}
to {
opacity:1;
}
}
@-webkit-keyframes fadein { /* Safari and Chrome */
from {
opacity:0;
}
to {
opacity:1;
}
}
@-o-keyframes fadein { /* Opera */
from {
opacity:0;
}
to {
opacity: 1;
}
}
Loading…
Cancel
Save