X-Git-Url: http://git.hungrycats.org/cgi-bin/gitweb.cgi?p=xscreensaver;a=blobdiff_plain;f=driver%2Fxscreensaver-getimage-file;h=ee06a79a10407321113f68692351ceb14b499564;hp=bc7bcbd9e33abaf8bcf7772a8871408d5bf24329;hb=019de959b265701cd0c3fccbb61f2b69f06bf9ee;hpb=5f9c47ca98dd43d8f59b7c27d3fde6edfde4fe21 diff --git a/driver/xscreensaver-getimage-file b/driver/xscreensaver-getimage-file index bc7bcbd9..ee06a79a 100755 --- a/driver/xscreensaver-getimage-file +++ b/driver/xscreensaver-getimage-file @@ -1,5 +1,5 @@ #!/usr/bin/perl -w -# Copyright © 2001-2011 Jamie Zawinski . +# Copyright © 2001-2013 Jamie Zawinski . # # Permission to use, copy, modify, distribute, and sell this software and its # documentation for any purpose is hereby granted without fee, provided that @@ -13,6 +13,10 @@ # prints its name. The file will be an image file whose dimensions are # larger than a certain minimum size. # +# If the directory is a URL, it is assumed to be an RSS or Atom feed. +# The images from that feed will be downloaded, cached, and selected from +# at random. The feed will be re-polled periodically, as needed. +# # The various xscreensaver hacks that manipulate images ("jigsaw", etc.) get # the image to manipulate by running the "xscreensaver-getimage" program. # @@ -45,8 +49,15 @@ use bytes; # Larry can take Unicode and shove it up his ass sideways. # Perl 5.8.0 causes us to start getting incomprehensible # errors about UTF-8 all over the place without this. +use Digest::MD5 qw(md5_base64); + +# Some Linux systems don't install LWP by default! +# Only error out if we're actually loading a URL instead of local data. +BEGIN { eval 'use LWP::Simple;' } + + my $progname = $0; $progname =~ s@.*/@@g; -my $version = q{ $Revision: 1.29 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/; +my $version = q{ $Revision: 1.37 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/; my $verbose = 0; @@ -66,6 +77,10 @@ my $cache_p = 1; # my $cache_max_age = 60 * 60 * 3; # 3 hours +# Re-poll RSS/Atom feeds when local copy is older than this many seconds. +# +my $feed_max_age = $cache_max_age; + # This matches files that we are allowed to use as images (case-insensitive.) # Anything not matching this is ignored. This is so you can point your @@ -235,7 +250,11 @@ sub read_cache($) { my $dd = "$ENV{HOME}/Library/Caches"; # MacOS location if (-d $dd) { $cache_file_name = "$dd/org.jwz.xscreensaver.getimage.cache"; - } elsif (-d "$ENV{HOME}/tmp") { + } elsif (-d "$ENV{HOME}/.cache") { # Gnome "FreeDesktop XDG" location + $dd = "$ENV{HOME}/.cache/xscreensaver"; + if (! -d $dd) { mkdir ($dd) || error ("mkdir $dd: $!"); } + $cache_file_name = "$dd/xscreensaver-getimage.cache" + } elsif (-d "$ENV{HOME}/tmp") { # If ~/.tmp/ exists, use it. $cache_file_name = "$ENV{HOME}/tmp/.xscreensaver-getimage.cache"; } else { $cache_file_name = "$ENV{HOME}/.xscreensaver-getimage.cache"; @@ -297,7 +316,7 @@ sub write_cache($) { print $cache_fd "$dir\n"; foreach (@all_files) { my $f = $_; # stupid Perl. do this to avoid modifying @all_files! - $f =~ s@^\Q$dir\L/@@so || die; # remove $dir from front + $f =~ s@^\Q$dir/@@so || die; # remove $dir from front print $cache_fd "$f\n"; } } @@ -313,6 +332,464 @@ sub write_cache($) { } +sub html_unquote($) { + my ($h) = @_; + + # This only needs to handle entities that occur in RSS, not full HTML. + my %ent = ( 'amp' => '&', 'lt' => '<', 'gt' => '>', + 'quot' => '"', 'apos' => "'" ); + $h =~ s/(&(\#)?([[:alpha:]\d]+);?)/ + { + my ($o, $c) = ($1, $3); + if (! defined($2)) { + $c = $ent{$c}; # for < + } else { + if ($c =~ m@^x([\dA-F]+)$@si) { # for A + $c = chr(hex($1)); + } elsif ($c =~ m@^\d+$@si) { # for A + $c = chr($c); + } else { + $c = undef; + } + } + ($c || $o); + } + /gexi; + return $h; +} + + + +sub set_proxy($) { + my ($ua) = @_; + + if (!defined($ENV{http_proxy}) && !defined($ENV{HTTP_PROXY})) { + my $proxy_data = `scutil --proxy 2>/dev/null`; + my ($server) = ($proxy_data =~ m/\bHTTPProxy\s*:\s*([^\s]+)/s); + my ($port) = ($proxy_data =~ m/\bHTTPPort\s*:\s*([^\s]+)/s); + if ($server) { + # Note: this ignores the "ExceptionsList". + $ENV{http_proxy} = "http://" . $server . ($port ? ":$port" : "") . "/"; + print STDERR "$progname: MacOS proxy: $ENV{http_proxy}\n" + if ($verbose > 2) + } + } + + $ua->env_proxy(); +} + + +sub init_lwp() { + if (! defined ($LWP::Simple::ua)) { + error ("\n\n\tPerl is broken. Do this to repair it:\n" . + "\n\tsudo cpan LWP::Simple\n"); + } + set_proxy ($LWP::Simple::ua); +} + + +# Returns a list of the image enclosures in the RSS or Atom feed. +# Elements of the list are references, [ "url", "guid" ]. +# +sub parse_feed($); +sub parse_feed($) { + my ($url) = @_; + + init_lwp(); + $LWP::Simple::ua->agent ("$progname/$version"); + $LWP::Simple::ua->timeout (10); # bail sooner than the default of 3 minutes + + my $body = (LWP::Simple::get($url) || ''); + + if ($body !~ m@^<\?xml\s@si) { + # Not an RSS/Atom feed. Try RSS autodiscovery. + + # (Great news, everybody: Flickr no longer provides RSS for "Sets", + # only for "Photostreams", and only the first 20 images of those. + # Thanks, assholes.) + + error ("not an RSS or Atom feed, or HTML: $url") + unless ($body =~ m@<(HEAD|BODY|A|IMG)\b@si); + + # Find the first with RSS or Atom in it, and use that instead. + + $body =~ s@]*)>@{ + my $p = $1; + if ($p =~ m! \b REL \s* = \s* ['"]? alternate \b!six && + $p =~ m! \b TYPE \s* = \s* ['"]? application/(atom|rss) !six && + $p =~ m! \b HREF \s* = \s* ['"] ( [^<>'"]+ ) !six + ) { + my $u2 = html_unquote ($1); + if ($u2 =~ m!^/!s) { + my ($h) = ($url =~ m!^([a-z]+://[^/]+)!si); + $u2 = "$h$u2"; + } + print STDERR "$progname: found feed: $u2\n" + if ($verbose); + return parse_feed ($u2); + } + ''; + }@gsexi; + + error ("no RSS or Atom feed for HTML page: $url"); + } + + + $body =~ s@( + # + if (! $iurl) { + $item =~ s!(]*>)!{ + my $link = $1; + my ($rel) = ($link =~ m/\bREL\s*=\s*[\"\']?([^<>\'\"]+)/si); + my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si); + my ($href) = ($link =~ m/\bHREF\s*=\s*[\"\']([^<>\'\"]+)/si); + + if ($rel && lc($rel) eq 'enclosure') { + if ($type) { + $href = undef unless ($type =~ m@^image/@si); # omit videos + } + $iurl = html_unquote($href) if $href; + } + $link; + }!gsexi; + } + + # Then look for + # + if (! $iurl) { + $item =~ s!(]*>)!{ + my $link = $1; + my ($href) = ($link =~ m/\bURL\s*=\s*[\"\']([^<>\'\"]+)/si); + $iurl = html_unquote($href) if $href; + $link; + }!gsexi; + } + + # Then look for + # + if (! $iurl) { + $item =~ s!(]*>)!{ + my $link = $1; + my ($type) = ($link =~ m/\bTYPE\s*=\s*[\"\']?([^<>\'\"]+)/si); + my ($href) = ($link =~ m/\bURL\s*=\s*[\"\']([^<>\'\"]+)/si); + $iurl = html_unquote($href) + if ($href && $type && $type =~ m@^image/@si); # omit videos + $link; + }!gsexi; + } + + # Ok, maybe there's an image in the field? + # + if (! $iurl) { + $item =~ s!((]*>)([^<>]*))!{ + my ($all, $u2) = ($1, $3); + $iurl = html_unquote($u2) if ($u2 =~ m/$good_file_re/io); + $all; + }!gsexi; + } + + # Then look for ... with an inside. + # + if (! $iurl) { + $item =~ s!(]*>.*?)!{ + my $desc = $1; + $desc = html_unquote($desc); + my ($href) = ($desc =~ m@]*\bSRC=[\"\']?([^\"\'<>]+)@si); + $iurl = $href if ($href); + $desc; + }!gsexi; + } + + # Could also do , but the above probably covers all + # of the real-world possibilities. + + + # Find a unique ID for this image, to defeat image farms. + # First look for ... + ($id) = ($item =~ m!]*>\s*([^<>]+?)\s*!si) unless $id; + + # Then look for ... + ($id) = ($item =~ m!]*>\s*([^<>]+?)\s*!si) unless $id; + + # Then look for ... + ($id) = ($item =~ m!]*>\s*([^<>]+?)\s*!si) unless $id; + + + if ($iurl) { + $id = $iurl unless $id; + my $o = $ids{$id}; + if (! $o) { + $ids{$id} = $iurl; + my @P = ($iurl, $id); + push @imgs, \@P; + } elsif ($iurl ne $o) { + print STDERR "$progname: WARNING: dup ID \"$id\"" . + " for \"$o\" and \"$iurl\"\n"; + } + } + } + + return @imgs; +} + + +# Like md5_base64 but uses filename-safe characters. +# +sub md5_file($) { + my ($s) = @_; + $s = md5_base64($s); + $s =~ s@[/]@_@gs; + $s =~ s@[+]@-@gs; + return $s; +} + + +# Given the URL of an image, download it into the given directory +# and return the file name. +# +sub download_image($$$) { + my ($url, $uid, $dir) = @_; + + my $url2 = $url; + $url2 =~ s/\#.*$//s; # Omit search terms after file extension + $url2 =~ s/\?.*$//s; + my ($ext) = ($url2 =~ m@\.([a-z\d]+)$@si); + + # If the feed hasn't put a sane extension on their URLs, nothing's going + # to work. This code assumes that file names have extensions, even the + # ones in the cache directory. + # + if (! $ext) { + print STDERR "$progname: skipping extensionless URL: $url\n" + if ($verbose > 1); + return undef; + } + + # Don't bother downloading files that we will reject anyway. + # + if (! ($url2 =~ m/$good_file_re/io)) { + print STDERR "$progname: skipping non-image URL: $url\n" + if ($verbose > 1); + return undef; + } + + my $file = md5_file ($uid); + $file .= '.' . lc($ext) if $ext; + + # Don't bother doing If-Modified-Since to see if the URL has changed. + # If we have already downloaded it, assume it's good. + if (-f "$dir/$file") { + print STDERR "$progname: exists: $dir/$file for $uid / $url\n" + if ($verbose > 1); + return $file; + } + + # Special-case kludge for Flickr: + # Their RSS feeds sometimes include only the small versions of the images. + # So if the URL ends in one of the "small-size" letters, change it to "b". + # + # _o orig, 1600 + + # _k large, 2048 max + # _h large, 1600 max + # _b large, 1024 max + # _c medium, 800 max + # _z medium, 640 max + # "" medium, 500 max + # _n small, 320 max + # _m small, 240 max + # _t thumb, 100 max + # _q square, 150x150 + # _s square, 75x75 + # + $url =~ s@_[sqtmnzc](\.[a-z]+)$@_b$1@si + if ($url =~ m@^https?://[^/?#&]*?flickr\.com/@si); + + print STDERR "$progname: downloading: $dir/$file for $uid / $url\n" + if ($verbose > 1); + init_lwp(); + $LWP::Simple::ua->agent ("$progname/$version"); + my $status = LWP::Simple::mirror ($url, "$dir/$file"); + if (!LWP::Simple::is_success ($status)) { + print STDERR "$progname: error $status: $url\n"; # keep going + } + + return $file; +} + + +sub mirror_feed($) { + my ($url) = @_; + + if ($url !~ m/^https?:/si) { # not a URL: local directory. + return (undef, $url); + } + + my $dir = "$ENV{HOME}/Library/Caches"; # MacOS location + if (-d $dir) { + $dir = "$dir/org.jwz.xscreensaver.feeds"; + } elsif (-d "$ENV{HOME}/.cache") { # Gnome "FreeDesktop XDG" location + $dir = "$ENV{HOME}/.cache/xscreensaver"; + if (! -d $dir) { mkdir ($dir) || error ("mkdir $dir: $!"); } + $dir .= "/feeds"; + if (! -d $dir) { mkdir ($dir) || error ("mkdir $dir: $!"); } + } elsif (-d "$ENV{HOME}/tmp") { # If ~/.tmp/ exists, use it. + $dir = "$ENV{HOME}/tmp/.xscreensaver-feeds"; + } else { + $dir = "$ENV{HOME}/.xscreensaver-feeds"; + } + + if (! -d $dir) { + mkdir ($dir) || error ("mkdir $dir: $!"); + print STDERR "$progname: mkdir $dir/\n" if ($verbose); + } + + # MD5 for directory name to use for cache of a feed URL. + $dir .= '/' . md5_file ($url); + + if (! -d $dir) { + mkdir ($dir) || error ("mkdir $dir: $!"); + print STDERR "$progname: mkdir $dir/ for $url\n" if ($verbose); + } + + # At this point, we have the directory corresponding to this URL. + # Now check to see if the files in it are up to date, and download + # them if not. + + my $stamp = '.timestamp'; + my $lock = "$dir/$stamp"; + + print STDERR "$progname: awaiting lock: $lock\n" + if ($verbose > 1); + + my $mtime = ((stat($lock))[9]) || 0; + + my $lock_fd; + open ($lock_fd, '+>>', $lock) || error ("unable to write $lock: $!"); + flock ($lock_fd, LOCK_EX) || error ("unable to lock $lock: $!"); + seek ($lock_fd, 0, 0) || error ("unable to rewind $lock: $!"); + + my $poll_p = ($mtime + $feed_max_age < time); + + # --no-cache cmd line arg means poll again right now. + $poll_p = 1 unless ($cache_p); + + # Even if the cache is young, make sure there is at least one file, + # and re-check if not. + # + if (! $poll_p) { + my $count = 0; + opendir (my $dirh, $dir) || error ("$dir: $!"); + foreach my $f (readdir ($dirh)) { + next if ($f =~ m/^\./s); + $count++; + last; + } + closedir $dirh; + + if ($count <= 0) { + print STDERR "$progname: no files in cache of $url\n" if ($verbose); + $poll_p = 1; + } + } + + if ($poll_p) { + + print STDERR "$progname: loading $url\n" if ($verbose); + + my %files; + opendir (my $dirh, $dir) || error ("$dir: $!"); + foreach my $f (readdir ($dirh)) { + next if ($f eq '.' || $f eq '..'); + $files{$f} = 0; # 0 means "file exists, should be deleted" + } + closedir $dirh; + + $files{$stamp} = 1; + + # Download each image currently in the feed. + # + my $count = 0; + my @urls = parse_feed ($url); + print STDERR "$progname: " . ($#urls + 1) . " images\n" + if ($verbose > 1); + foreach my $p (@urls) { + my ($furl, $id) = @$p; + my $f = download_image ($furl, $id, $dir); + next unless $f; + $files{$f} = 1; # Got it, don't delete + $count++; + } + + my $empty_p = ($count <= 0); + + # Now delete any files that are no longer in the feed. + # But if there was nothing in the feed (network failure?) + # then don't blow away the old files. + # + my $kept = 0; + foreach my $f (keys(%files)) { + if ($count <= 0) { + $kept++; + } elsif ($files{$f}) { + $kept++; + } else { + if (unlink ("$dir/$f")) { + print STDERR "$progname: rm $dir/$f\n" if ($verbose > 1); + } else { + print STDERR "$progname: rm $dir/$f: $!\n"; # don't bail + } + } + } + + # Both feed and cache are empty. No files at all. Bail. + error ("empty feed: $url") if ($kept <= 1); + + # Feed is empty, but we have some files from last time. Warn. + print STDERR "$progname: empty feed: using cache: $url\n" + if ($empty_p); + + $mtime = time(); # update the timestamp + + } else { + + # Not yet time to re-check the URL. + print STDERR "$progname: using cache: $url\n" if ($verbose); + + } + + # Unlock and update the write date on the .timestamp file. + # + truncate ($lock_fd, 0) || error ("unable to truncate $lock: $!"); + seek ($lock_fd, 0, 0) || error ("unable to rewind $lock: $!"); + utime ($mtime, $mtime, $lock_fd) || error ("unable to touch $lock: $!"); + flock ($lock_fd, LOCK_UN) || error ("unable to unlock $lock: $!"); + close ($lock_fd); + $lock_fd = undef; + print STDERR "$progname: unlocked $lock\n" if ($verbose > 1); + + # Don't bother using the imageDirectory cache. We know that this directory + # is flat, and we can assume that an RSS feed doesn't contain 100,000 images + # like ~/Pictures/ might. + # + $cache_p = 0; + + # Return the URL and directory name of the files of that URL's local cache. + # + return ($url, $dir); +} + + sub find_random_file($) { my ($dir) = @_; @@ -323,6 +800,14 @@ sub find_random_file($) { } } + my $url; + ($url, $dir) = mirror_feed ($dir); + + if ($url) { + $use_spotlight_p = 0; + print STDERR "$progname: $dir is cache for $url\n" if ($verbose > 1); + } + @all_files = read_cache ($dir); if ($#all_files >= 0) { @@ -350,8 +835,6 @@ sub find_random_file($) { write_cache ($dir); -# @all_files = sort(@all_files); - if ($#all_files < 0) { print STDERR "$progname: no files in $dir\n"; exit 1; @@ -363,13 +846,19 @@ sub find_random_file($) { my $n = int (rand ($#all_files + 1)); my $file = $all_files[$n]; if (large_enough_p ($file)) { - $file =~ s@^\Q$dir\L/@@so || die; # remove $dir from front + if (! $url) { + $file =~ s@^\Q$dir/@@so || die; # remove $dir from front + } return $file; } } print STDERR "$progname: no suitable images in $dir " . "(after $max_tries tries)\n"; + + # If we got here, blow away the cache. Maybe it's stale. + unlink $cache_file_name if $cache_file_name; + exit 1; } @@ -380,6 +869,11 @@ sub large_enough_p($) { my ($w, $h) = image_file_size ($file); if (!defined ($h)) { + + # Nonexistent files are obviously too small! + # Already printed $verbose message about the file not existing. + return 0 unless -f $file; + print STDERR "$progname: $file: unable to determine image size\n" if ($verbose); # Assume that unknown files are of good sizes: this will happen if @@ -494,11 +988,10 @@ sub image_size($) { sub image_file_size($) { my ($file) = @_; my $in; - if (! open ($in, '<', $file)) { + if (! open ($in, '<:raw', $file)) { print STDERR "$progname: $file: $!\n" if ($verbose); - return undef; + return (); } - binmode ($in); # Larry can take Unicode and shove it up his ass sideways. my $body = ''; sysread ($in, $body, 1024 * 50); # The first 50k should be enough. close $in; # (It's not for certain huge jpegs... @@ -513,10 +1006,14 @@ sub error($) { } sub usage() { - print STDERR "usage: $progname [--verbose] directory\n" . + print STDERR "usage: $progname [--verbose] directory-or-feed-url\n\n" . " Prints the name of a randomly-selected image file. The directory\n" . " is searched recursively. Images smaller than " . - "${min_image_width}x${min_image_height} are excluded.\n"; + "${min_image_width}x${min_image_height} are excluded.\n" . + "\n" . + " The directory may also be the URL of an RSS/Atom feed. Enclosed\n" . + " images will be downloaded and cached locally.\n" . + "\n"; exit 1; } @@ -525,26 +1022,32 @@ sub main() { while ($_ = $ARGV[0]) { shift @ARGV; - if ($_ eq "--verbose") { $verbose++; } - elsif (m/^-v+$/) { $verbose += length($_)-1; } - elsif ($_ eq "--name") { } # ignored, for compatibility - elsif ($_ eq "--spotlight") { $use_spotlight_p = 1; } - elsif ($_ eq "--no-spotlight") { $use_spotlight_p = 0; } - elsif ($_ eq "--cache") { $cache_p = 1; } - elsif ($_ eq "--no-cache") { $cache_p = 0; } - elsif (m/^-./) { usage; } - elsif (!defined($dir)) { $dir = $_; } - else { usage; } + if (m/^--?verbose$/s) { $verbose++; } + elsif (m/^-v+$/s) { $verbose += length($_)-1; } + elsif (m/^--?name$/s) { } # ignored, for compatibility + elsif (m/^--?spotlight$/s) { $use_spotlight_p = 1; } + elsif (m/^--?no-spotlight$/s) { $use_spotlight_p = 0; } + elsif (m/^--?cache$/s) { $cache_p = 1; } + elsif (m/^--?no-?cache$/s) { $cache_p = 0; } + elsif (m/^-./) { usage; } + elsif (!defined($dir)) { $dir = $_; } + else { usage; } } usage unless (defined($dir)); - $dir =~ s@^~/@$ENV{HOME}/@s; # allow literal "~/" - $dir =~ s@/+$@@s; # omit trailing / + $dir =~ s@^feed:@http:@si; - if (! -d $dir) { - print STDERR "$progname: $dir: not a directory\n"; - usage; + if ($dir =~ m/^https?:/si) { + # ok + } else { + $dir =~ s@^~/@$ENV{HOME}/@s; # allow literal "~/" + $dir =~ s@/+$@@s; # omit trailing / + + if (! -d $dir) { + print STDERR "$progname: $dir: not a directory or URL\n"; + usage; + } } my $file = find_random_file ($dir);