Framework CCCS page

This commit is contained in:
Stefan Schlott 2013-09-05 09:51:17 +02:00
parent 09aba964a9
commit 8f812ab935
319 changed files with 24165 additions and 50 deletions

101
scripts/convert_announcement.rb Executable file
View file

@ -0,0 +1,101 @@
#!/usr/bin/env ruby
# encoding: utf-8
# Convert to UTF8: iconv -f ISO-8859-15 -t UTF-8 infile > outfile
file = File.open(ARGV[0])
header = {}
body = ''
in_header = true
last_header = nil
file.each do |line|
if in_header
if line =~ /^\s*$/
# do nothing
elsif line =~ /^(\w*):\s*(.*)$/
# New header
last_header = $1.downcase
header[last_header] = $2.strip
elsif line =~ /^\s+(.*)$/
# Continued header
header[last_header] << " #{$1.strip}"
else
# First line of body
body << line
in_header = false
end
else
body << line
end
end
puts "---"
puts "kind: event"
date = header['datum']
if date =~ /^.*,\s*(.*)$/
date=$1
end
if date =~ /^(\d+)\.(\d+)\.(\d+)$/
year = $3.to_i
month = $2.to_i
day = $1.to_i
elsif date =~ /^(\d+).\s*([^\s]+)\s+(\d+)$/
year = $3.to_i
month = case $2.downcase[0..2]
when 'jan'
1
when 'feb'
2
when 'mär'
3
when 'apr'
4
when 'mai'
5
when 'jun'
6
when 'jul'
7
when 'aug'
8
when 'sep'
9
when 'okt'
10
when 'nov'
11
when 'dez'
12
else
0
end
day = $1.to_i
else
year = 0
month = 0
day = 0
end
if (year<100)
if (year<80)
year = year + 2000
else
year = year + 1900
end
end
puts "startdate: %04d-%02d-%02dT19:30:00" % [year,month,day]
puts "duration: 2h"
puts "title: #{header['thema']}"
puts "speakers:"
puts " -"
puts " name: #{header['referent']}"
if (header['ort'].downcase.start_with?('stadtbibliothek'))
puts "location:"
puts " location: bib"
elsif (header['ort'].downcase.start_with?('filmhaus'))
puts "location:"
puts " location: wand5"
end
puts "public: true"
puts "---"
puts body

30
scripts/download-wikipage.rb Executable file
View file

@ -0,0 +1,30 @@
#!/usr/bin/env ruby
# encoding: utf-8
require 'rubygems'
require 'bundler/setup'
require 'nokogiri'
require 'open-uri'
def empty_p?(node)
node.name=='p' && node.content.length==0
node.content.length==0
end
if ARGV.length==0
abort('Need one parameter (page name)')
end
doc = Nokogiri::HTML(open("https://www.cccs.de/wiki/bin/view/Main/#{ARGV[0]}"))
content = doc.css('.twikiTopic').children
File.open("#{ARGV[0]}.html", 'w') do |file|
file.write("---\n")
file.write("title: #{ARGV[0]}\n")
file.write("kind: page\n")
file.write("---\n")
file.write(content)
end

64
scripts/expandlinks.rb Normal file
View file

@ -0,0 +1,64 @@
require 'csv'
require 'net/http'
require 'net/https'
def redirect_url(resp)
if resp['location'].nil?
resp.body.match(/<a href=\"([^>]+)\">/i)[1]
else
resp['location']
end
end
def follow_url(url, maxdepth=5)
if maxdepth>0
begin
uri = URI.parse(url)
resp = if url.start_with?('https://')
https = Net::HTTP.new(uri.host, uri.port)
https.use_ssl = true
https.verify_mode = OpenSSL::SSL::VERIFY_NONE
request = Net::HTTP::Get.new(uri.request_uri)
https.request(request)
else
Net::HTTP.get_response(uri)
end
rescue
puts "Network error getting #{url}"
return url
end
if resp.kind_of?(Net::HTTPRedirection)
follow_url(redirect_url(resp), maxdepth-1)
else
url
end
else
url
end
end
if ! File.exists?(ARGV[0])
puts('File not found')
raise Exception
end
CSV.open(ARGV[1], 'wb') do |out|
CSV.foreach(ARGV[0]) do |row|
# puts "id=#{row[0]} time=#{row[1]} nick=#{row[2]} --> #{row[3]}"
tweet = row[3]
links = tweet.scan(/https?:\/\/[^ ]*[^ .);:!?]/)
links.each { |link|
# puts "Resolving #{link}"
newlink = follow_url(link)
if (newlink!=link)
# puts "#{link} --> #{newlink}"
tweet.gsub!(link, newlink)
end
}
row[3] = tweet
out << row
end
end

35
scripts/generate-stammtisch.rb Executable file
View file

@ -0,0 +1,35 @@
#!/usr/bin/env ruby
# encoding: utf-8
require 'date'
require 'yaml'
def event(day, dayoffset, location)
startDate = Time.local(day.year, day.month, day.day + dayoffset, 18, 0, 0)
{
'startdate' => startDate,
'duration' => '4h',
'location' => {
'location' => location
}
}
end
year=ARGV[0].to_i
result = { 'events' => [] }
for month in 1..12
date = Date.new(year,month,1)
firsttuesday = if (date.wday<=2)
date + (2-date.wday)
else
date + (9-date.wday)
end
result['events'] << event(firsttuesday, 0, 'zadu')
result['events'] << event(firsttuesday, 15, 'shack')
end
puts result.to_yaml

55
scripts/update-planetfeeds.rb Executable file
View file

@ -0,0 +1,55 @@
#!/usr/bin/env ruby
# encoding: utf-8
require 'yaml'
require 'feedzirra'
def getBlogroll(blogroll_file)
blogroll_raw = YAML.load_file(blogroll_file)
blogroll = { }
blogroll_raw['blogroll'].each { |blog| blogroll[blog['feed']]=blog['user'] }
blogroll
end
blogroll_file = ARGV[0]
blogposts_file = ARGV[1]
# Read existing data
blogposts = if File.exists?(blogposts_file)
YAML.load_file(blogposts_file)
else
{ }
end
if !blogposts['blogposts']
blogposts['blogposts'] = []
end
blogroll = getBlogroll(blogroll_file)
# Build list for detecting duplicates
posturls = blogposts['blogposts'].map { |post| post['url'] }
# Read feed
feeds = Feedzirra::Feed.fetch_and_parse(blogroll.keys)
# Add feed data
feeds.each do |feed,data|
data.entries.each do |posting|
if !posturls.include?(posting.url)
postdata = { }
postdata['user'] = blogroll[feed]
postdata['date'] = posting.published
postdata['title'] = posting.title
postdata['url'] = posting.url
blogposts['blogposts'] << postdata
end
end
end
# Sort, limit list
blogposts['blogposts'].sort! { |a,b| b['date'] <=> a['date'] }
blogposts['blogposts'] = blogposts['blogposts'][0..19]
# Output
File.open(blogposts_file, 'w+') {|f| f.write(blogposts.to_yaml) }

29
scripts/update-twitter.sh Executable file
View file

@ -0,0 +1,29 @@
#!/bin/bash
if ! `which t > /dev/null 2>&1` ; then
echo "t not found. Please install from http://sferik.github.com/t/"
exit 2
fi
if [ -n "$TWITTER_USER" ] ; then
TWITTER_USER=$1
fi
EXPANDLINKS="ruby `dirname $0`/expandlinks.rb"
if [ -f twitter.csv ] ; then
# Update timeline backup
LASTID=`head -n 1 twitter.csv| cut -f 1 -d ","`
t timeline @${TWITTER_USER} --number 3200 --csv -s $LASTID | sed '1d' > twitter-update.csv.unexpanded
$EXPANDLINKS twitter-update.csv.unexpanded twitter-update.csv || exit 1
rm twitter-update.csv.unexpanded
mv twitter.csv twitter.csv.old
cat twitter-update.csv twitter.csv.old > twitter.csv
rm twitter.csv.old twitter-update.csv
else
# Full fetch
t timeline @${TWITTER_USER} --number 3200 --csv | sed '1d' > twitter.csv.unexpanded
$EXPANDLINKS twitter.csv.unexpanded twitter.csv
rm twitter.csv.unexpanded
fi